8214259: Implementation: JEP 189: Shenandoah: A Low-Pause-Time Garbage Collector (Experimental)
authorrkennke
Mon, 10 Dec 2018 15:47:44 +0100
changeset 52925 9c18c9d839d3
parent 52924 420ff459906f
child 52926 38bee05fb0e4
8214259: Implementation: JEP 189: Shenandoah: A Low-Pause-Time Garbage Collector (Experimental) Reviewed-by: kvn, roland, shade, coleenp, lmesnik, pliden, jgeorge, ihse, erikj Contributed-by: Christine Flood <chf@redhat.com>, Aleksey Shipilev <shade@redhat.com>, Roland Westrelin <rwestrel@redhat.com>, Zhenygu Gu <zgu@redhat.com>, Andrew Haley <aph@redhat.com>, Andrew Dinn <adinn@redhat.com>, Mario Torre <mtorre@redhat.com>, Roman Kennke <rkennke@redhat.com>
make/autoconf/hotspot.m4
make/hotspot/gensrc/GensrcAdlc.gmk
make/hotspot/lib/JvmFeatures.gmk
make/hotspot/lib/JvmOverrideFiles.gmk
src/hotspot/cpu/aarch64/aarch64.ad
src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp
src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp
src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp
src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp
src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
src/hotspot/share/adlc/formssel.cpp
src/hotspot/share/ci/ciInstanceKlass.cpp
src/hotspot/share/ci/ciInstanceKlass.hpp
src/hotspot/share/code/codeCache.hpp
src/hotspot/share/gc/shared/barrierSetConfig.hpp
src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
src/hotspot/share/gc/shared/c2/barrierSetC2.hpp
src/hotspot/share/gc/shared/collectedHeap.hpp
src/hotspot/share/gc/shared/gcCause.cpp
src/hotspot/share/gc/shared/gcCause.hpp
src/hotspot/share/gc/shared/gcConfig.cpp
src/hotspot/share/gc/shared/gcConfiguration.cpp
src/hotspot/share/gc/shared/gcName.hpp
src/hotspot/share/gc/shared/gc_globals.hpp
src/hotspot/share/gc/shared/referenceProcessor.cpp
src/hotspot/share/gc/shared/vmStructs_gc.hpp
src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp
src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp
src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp
src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp
src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp
src/hotspot/share/gc/shenandoah/shenandoahAllocTracker.cpp
src/hotspot/share/gc/shenandoah/shenandoahAllocTracker.hpp
src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp
src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp
src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahBarrierSetAssembler.hpp
src/hotspot/share/gc/shenandoah/shenandoahBrooksPointer.hpp
src/hotspot/share/gc/shenandoah/shenandoahBrooksPointer.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp
src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp
src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp
src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp
src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp
src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp
src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeapLock.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeuristics.hpp
src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.hpp
src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp
src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp
src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp
src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp
src/hotspot/share/gc/shenandoah/shenandoahMetrics.cpp
src/hotspot/share/gc/shenandoah/shenandoahMetrics.hpp
src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp
src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.hpp
src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp
src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp
src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
src/hotspot/share/gc/shenandoah/shenandoahPacer.hpp
src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp
src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp
src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.cpp
src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.hpp
src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp
src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp
src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.hpp
src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp
src/hotspot/share/gc/shenandoah/shenandoahStringDedup.hpp
src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.cpp
src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp
src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp
src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.cpp
src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.hpp
src/hotspot/share/gc/shenandoah/shenandoahTracer.hpp
src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp
src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp
src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp
src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp
src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp
src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp
src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp
src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp
src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp
src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp
src/hotspot/share/memory/metaspace.hpp
src/hotspot/share/opto/arraycopynode.cpp
src/hotspot/share/opto/cfgnode.hpp
src/hotspot/share/opto/classes.cpp
src/hotspot/share/opto/classes.hpp
src/hotspot/share/opto/compile.cpp
src/hotspot/share/opto/compile.hpp
src/hotspot/share/opto/lcm.cpp
src/hotspot/share/opto/library_call.cpp
src/hotspot/share/opto/loopPredicate.cpp
src/hotspot/share/opto/loopTransform.cpp
src/hotspot/share/opto/loopnode.cpp
src/hotspot/share/opto/loopnode.hpp
src/hotspot/share/opto/loopopts.cpp
src/hotspot/share/opto/macro.cpp
src/hotspot/share/opto/node.hpp
src/hotspot/share/opto/type.cpp
src/hotspot/share/opto/type.hpp
src/hotspot/share/runtime/fieldDescriptor.hpp
src/hotspot/share/runtime/mutexLocker.cpp
src/hotspot/share/runtime/vmOperations.hpp
src/hotspot/share/utilities/globalDefinitions.hpp
src/hotspot/share/utilities/macros.hpp
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeap.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeapRegion.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java
test/hotspot/jtreg/TEST.ROOT
test/hotspot/jtreg/TEST.groups
test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java
test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesShenandoah.java
test/hotspot/jtreg/gc/CriticalNativeArgs.java
test/hotspot/jtreg/gc/TestFullGCCount.java
test/hotspot/jtreg/gc/TestHumongousReferenceObject.java
test/hotspot/jtreg/gc/TestSystemGC.java
test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java
test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java
test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java
test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java
test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java
test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java
test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java
test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java
test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java
test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java
test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java
test/hotspot/jtreg/gc/epsilon/CriticalNativeArgs.java
test/hotspot/jtreg/gc/epsilon/CriticalNativeStress.java
test/hotspot/jtreg/gc/epsilon/libCriticalNative.c
test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java
test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java
test/hotspot/jtreg/gc/libCriticalNative.c
test/hotspot/jtreg/gc/logging/TestGCId.java
test/hotspot/jtreg/gc/metaspace/TestMetaspacePerfCounters.java
test/hotspot/jtreg/gc/shenandoah/TestAllocHumongousFragment.java
test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java
test/hotspot/jtreg/gc/shenandoah/TestAllocObjectArrays.java
test/hotspot/jtreg/gc/shenandoah/TestAllocObjects.java
test/hotspot/jtreg/gc/shenandoah/TestArrayCopyCheckCast.java
test/hotspot/jtreg/gc/shenandoah/TestArrayCopyStress.java
test/hotspot/jtreg/gc/shenandoah/TestElasticTLAB.java
test/hotspot/jtreg/gc/shenandoah/TestEvilSyncBug.java
test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java
test/hotspot/jtreg/gc/shenandoah/TestHeapUncommit.java
test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java
test/hotspot/jtreg/gc/shenandoah/TestLargeObjectAlignment.java
test/hotspot/jtreg/gc/shenandoah/TestLotsOfCycles.java
test/hotspot/jtreg/gc/shenandoah/TestParallelRefprocSanity.java
test/hotspot/jtreg/gc/shenandoah/TestPeriodicGC.java
test/hotspot/jtreg/gc/shenandoah/TestRefprocSanity.java
test/hotspot/jtreg/gc/shenandoah/TestRegionSampling.java
test/hotspot/jtreg/gc/shenandoah/TestRetainObjects.java
test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java
test/hotspot/jtreg/gc/shenandoah/TestSmallHeap.java
test/hotspot/jtreg/gc/shenandoah/TestStringDedup.java
test/hotspot/jtreg/gc/shenandoah/TestStringDedupStress.java
test/hotspot/jtreg/gc/shenandoah/TestStringInternCleanup.java
test/hotspot/jtreg/gc/shenandoah/TestVerifyJCStress.java
test/hotspot/jtreg/gc/shenandoah/TestVerifyLevels.java
test/hotspot/jtreg/gc/shenandoah/TestWithLogLevel.java
test/hotspot/jtreg/gc/shenandoah/TestWrongArrayMember.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestC1ArrayCopyNPE.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestC1VectorizedMismatch.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestCommonGCLoads.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestExpandedWBLostNullCheckDep.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestMaybeNullUnsafeAccess.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestNullCheck.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestReferenceCAS.java
test/hotspot/jtreg/gc/shenandoah/compiler/TestWriteBarrierClearControl.java
test/hotspot/jtreg/gc/shenandoah/jni/TestJNICritical.java
test/hotspot/jtreg/gc/shenandoah/jni/TestJNIGlobalRefs.java
test/hotspot/jtreg/gc/shenandoah/jni/TestPinnedGarbage.java
test/hotspot/jtreg/gc/shenandoah/jni/libTestJNICritical.c
test/hotspot/jtreg/gc/shenandoah/jni/libTestJNIGlobalRefs.c
test/hotspot/jtreg/gc/shenandoah/jni/libTestPinnedGarbage.c
test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java
test/hotspot/jtreg/gc/shenandoah/jvmti/libTestHeapDump.c
test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java
test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryMXBeans.java
test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryPools.java
test/hotspot/jtreg/gc/shenandoah/mxbeans/TestPauseNotifications.java
test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargeObj.java
test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargerThanHeap.java
test/hotspot/jtreg/gc/shenandoah/oom/TestAllocSmallObj.java
test/hotspot/jtreg/gc/shenandoah/oom/TestClassLoaderLeak.java
test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java
test/hotspot/jtreg/gc/shenandoah/options/TestAlwaysPreTouch.java
test/hotspot/jtreg/gc/shenandoah/options/TestArgumentRanges.java
test/hotspot/jtreg/gc/shenandoah/options/TestClassUnloadingArguments.java
test/hotspot/jtreg/gc/shenandoah/options/TestCodeCacheRootStyles.java
test/hotspot/jtreg/gc/shenandoah/options/TestEnabled.java
test/hotspot/jtreg/gc/shenandoah/options/TestExplicitGC.java
test/hotspot/jtreg/gc/shenandoah/options/TestExplicitGCNoConcurrent.java
test/hotspot/jtreg/gc/shenandoah/options/TestHeuristicsUnlock.java
test/hotspot/jtreg/gc/shenandoah/options/TestHumongousThresholdArgs.java
test/hotspot/jtreg/gc/shenandoah/options/TestLoopMiningArguments.java
test/hotspot/jtreg/gc/shenandoah/options/TestObjectAlignment.java
test/hotspot/jtreg/gc/shenandoah/options/TestPacing.java
test/hotspot/jtreg/gc/shenandoah/options/TestParallelRegionStride.java
test/hotspot/jtreg/gc/shenandoah/options/TestRegionSizeArgs.java
test/hotspot/jtreg/gc/shenandoah/options/TestSelectiveBarrierFlags.java
test/hotspot/jtreg/gc/shenandoah/options/TestSingleThreaded.java
test/hotspot/jtreg/gc/shenandoah/options/TestWrongBarrierDisable.java
test/hotspot/jtreg/gc/startup_warnings/TestShenandoah.java
test/hotspot/jtreg/gc/stress/CriticalNativeStress.java
test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithShenandoah.java
test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithShenandoah.java
test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithShenandoah.java
test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithShenandoah.java
test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java
test/hotspot/jtreg/gc/whitebox/TestWBGC.java
test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java
test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java
test/hotspot/jtreg/serviceability/sa/ClhsdbJhisto.java
test/hotspot/jtreg/serviceability/sa/TestHeapDumpForLargeArray.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java
test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java
test/lib/sun/hotspot/gc/GC.java
--- a/make/autoconf/hotspot.m4	Mon Dec 10 17:34:49 2018 +0300
+++ b/make/autoconf/hotspot.m4	Mon Dec 10 15:47:44 2018 +0100
@@ -25,7 +25,7 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \
+    graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
     static-build link-time-opt aot jfr"
 
 # Deprecated JVM features (these are ignored, but with a warning)
@@ -325,6 +325,15 @@
     fi
   fi
 
+  # Only enable Shenandoah on supported arches
+  AC_MSG_CHECKING([if shenandoah can be built])
+  if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
+    AC_MSG_RESULT([yes])
+  else
+    DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES shenandoahgc"
+    AC_MSG_RESULT([no, platform not supported])
+  fi
+
   # Only enable ZGC on supported platforms
   AC_MSG_CHECKING([if zgc can be built])
   if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
@@ -336,7 +345,7 @@
 
   # Disable unsupported GCs for Zero
   if HOTSPOT_CHECK_JVM_VARIANT(zero); then
-    DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES epsilongc g1gc zgc"
+    DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES epsilongc g1gc zgc shenandoahgc"
   fi
 
   # Turn on additional features based on other parts of configure
@@ -470,7 +479,7 @@
   fi
 
   # All variants but minimal (and custom) get these features
-  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc jni-check jvmti management nmt services vm-structs zgc"
+  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
 
   # Disable CDS on AIX.
   if test "x$OPENJDK_TARGET_OS" = "xaix"; then
--- a/make/hotspot/gensrc/GensrcAdlc.gmk	Mon Dec 10 17:34:49 2018 +0300
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk	Mon Dec 10 15:47:44 2018 +0100
@@ -136,6 +136,12 @@
       $d/os_cpu/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH).ad \
     )))
 
+  ifeq ($(call check-jvm-feature, shenandoahgc), true)
+    AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
+        $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \
+      )))
+  endif
+
   SINGLE_AD_SRCFILE := $(ADLC_SUPPORT_DIR)/all-ad-src.ad
 
   INSERT_FILENAME_AWK_SCRIPT := \
--- a/make/hotspot/lib/JvmFeatures.gmk	Mon Dec 10 17:34:49 2018 +0300
+++ b/make/hotspot/lib/JvmFeatures.gmk	Mon Dec 10 15:47:44 2018 +0100
@@ -166,6 +166,13 @@
   JVM_EXCLUDE_PATTERNS += gc/z
 endif
 
+ifneq ($(call check-jvm-feature, shenandoahgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_SHENANDOAHGC=0
+  JVM_EXCLUDE_PATTERNS += gc/shenandoah
+else
+  JVM_CFLAGS_FEATURES += -DSUPPORT_BARRIER_ON_PRIMITIVES -DSUPPORT_NOT_TO_SPACE_INVARIANT
+endif
+
 ifneq ($(call check-jvm-feature, jfr), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
   JVM_EXCLUDE_PATTERNS += jfr
--- a/make/hotspot/lib/JvmOverrideFiles.gmk	Mon Dec 10 17:34:49 2018 +0300
+++ b/make/hotspot/lib/JvmOverrideFiles.gmk	Mon Dec 10 15:47:44 2018 +0100
@@ -36,6 +36,11 @@
   BUILD_LIBJVM_assembler_x86.cpp_CXXFLAGS := -Wno-maybe-uninitialized
   BUILD_LIBJVM_cardTableBarrierSetAssembler_x86.cpp_CXXFLAGS := -Wno-maybe-uninitialized
   BUILD_LIBJVM_interp_masm_x86.cpp_CXXFLAGS := -Wno-uninitialized
+  ifeq ($(DEBUG_LEVEL), release)
+    # Need extra inlining to collapse all marking code into the hot marking loop
+    BUILD_LIBJVM_shenandoahConcurrentMark.cpp_CXXFLAGS := --param inline-unit-growth=1000
+    BUILD_LIBJVM_shenandoahTraversalGC.cpp_CXXFLAGS := --param inline-unit-growth=1000
+  endif
 endif
 
 LIBJVM_FDLIBM_COPY_OPT_FLAG := $(CXX_O_FLAG_NONE)
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Mon Dec 10 15:47:44 2018 +0100
@@ -1,6 +1,6 @@
 //
 // Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2014, Red Hat Inc. All rights reserved.
+// Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -1272,6 +1272,8 @@
     case Op_CompareAndSwapL:
     case Op_CompareAndSwapP:
     case Op_CompareAndSwapN:
+    case Op_ShenandoahCompareAndSwapP:
+    case Op_ShenandoahCompareAndSwapN:
     case Op_CompareAndSwapB:
     case Op_CompareAndSwapS:
     case Op_GetAndSetI:
@@ -1293,6 +1295,10 @@
     case Op_WeakCompareAndSwapL:
     case Op_WeakCompareAndSwapP:
     case Op_WeakCompareAndSwapN:
+    case Op_ShenandoahWeakCompareAndSwapP:
+    case Op_ShenandoahWeakCompareAndSwapN:
+    case Op_ShenandoahCompareAndExchangeP:
+    case Op_ShenandoahCompareAndExchangeN:
       return maybe_volatile;
     default:
       return false;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,671 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahRuntime.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
+#endif
+
+#define __ masm->
+
+address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
+
+void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+                                                       Register addr, Register count, RegSet saved_regs) {
+  if (is_oop) {
+    bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
+    if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
+      __ push(saved_regs, sp);
+      if (count == c_rarg0) {
+        if (addr == c_rarg1) {
+          // exactly backwards!!
+          __ mov(rscratch1, c_rarg0);
+          __ mov(c_rarg0, c_rarg1);
+          __ mov(c_rarg1, rscratch1);
+        } else {
+          __ mov(c_rarg1, count);
+          __ mov(c_rarg0, addr);
+        }
+      } else {
+        __ mov(c_rarg0, addr);
+        __ mov(c_rarg1, count);
+      }
+      if (UseCompressedOops) {
+        __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
+      } else {
+        __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
+      }
+      __ pop(saved_regs, sp);
+    }
+  }
+}
+
+void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+                                                       Register start, Register end, Register scratch, RegSet saved_regs) {
+  if (is_oop) {
+    __ push(saved_regs, sp);
+    // must compute element count unless barrier set interface is changed (other platforms supply count)
+    assert_different_registers(start, end, scratch);
+    __ lea(scratch, Address(end, BytesPerHeapOop));
+    __ sub(scratch, scratch, start);               // subtract start to get #bytes
+    __ lsr(scratch, scratch, LogBytesPerHeapOop);  // convert to element count
+    __ mov(c_rarg0, start);
+    __ mov(c_rarg1, scratch);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
+    __ pop(saved_regs, sp);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
+                                                                 Register obj,
+                                                                 Register pre_val,
+                                                                 Register thread,
+                                                                 Register tmp,
+                                                                 bool tosca_live,
+                                                                 bool expand_call) {
+  if (ShenandoahSATBBarrier) {
+    satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
+                                                           Register obj,
+                                                           Register pre_val,
+                                                           Register thread,
+                                                           Register tmp,
+                                                           bool tosca_live,
+                                                           bool expand_call) {
+  // If expand_call is true then we expand the call_VM_leaf macro
+  // directly to skip generating the check by
+  // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
+
+  assert(thread == rthread, "must be");
+
+  Label done;
+  Label runtime;
+
+  assert_different_registers(obj, pre_val, tmp, rscratch1);
+  assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
+
+  Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
+  Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+  Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+  // Is marking active?
+  if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+    __ ldrw(tmp, in_progress);
+  } else {
+    assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+    __ ldrb(tmp, in_progress);
+  }
+  __ cbzw(tmp, done);
+
+  // Do we need to load the previous value?
+  if (obj != noreg) {
+    __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
+  }
+
+  // Is the previous value null?
+  __ cbz(pre_val, done);
+
+  // Can we store original value in the thread's buffer?
+  // Is index == 0?
+  // (The index field is typed as size_t.)
+
+  __ ldr(tmp, index);                      // tmp := *index_adr
+  __ cbz(tmp, runtime);                    // tmp == 0?
+                                        // If yes, goto runtime
+
+  __ sub(tmp, tmp, wordSize);              // tmp := tmp - wordSize
+  __ str(tmp, index);                      // *index_adr := tmp
+  __ ldr(rscratch1, buffer);
+  __ add(tmp, tmp, rscratch1);             // tmp := tmp + *buffer_adr
+
+  // Record the previous value
+  __ str(pre_val, Address(tmp, 0));
+  __ b(done);
+
+  __ bind(runtime);
+  // save the live input values
+  RegSet saved = RegSet::of(pre_val);
+  if (tosca_live) saved += RegSet::of(r0);
+  if (obj != noreg) saved += RegSet::of(obj);
+
+  __ push(saved, sp);
+
+  // Calling the runtime using the regular call_VM_leaf mechanism generates
+  // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
+  // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
+  //
+  // If we care generating the pre-barrier without a frame (e.g. in the
+  // intrinsified Reference.get() routine) then ebp might be pointing to
+  // the caller frame and so this check will most likely fail at runtime.
+  //
+  // Expanding the call directly bypasses the generation of the check.
+  // So when we do not have have a full interpreter frame on the stack
+  // expand_call should be passed true.
+
+  if (expand_call) {
+    assert(pre_val != c_rarg1, "smashed arg");
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
+  } else {
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
+  }
+
+  __ pop(saved, sp);
+
+  __ bind(done);
+}
+
+void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
+  if (ShenandoahReadBarrier) {
+    read_barrier_impl(masm, dst);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
+  assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+  Label is_null;
+  __ cbz(dst, is_null);
+  read_barrier_not_null_impl(masm, dst);
+  __ bind(is_null);
+}
+
+void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
+  if (ShenandoahReadBarrier) {
+    read_barrier_not_null_impl(masm, dst);
+  }
+}
+
+
+void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
+  assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+  __ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
+}
+
+void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
+  if (ShenandoahWriteBarrier) {
+    write_barrier_impl(masm, dst);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
+  assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
+  assert(dst != rscratch1, "need rscratch1");
+  assert(dst != rscratch2, "need rscratch2");
+
+  Label done;
+
+  Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ ldrb(rscratch1, gc_state);
+
+  // Check for heap stability
+  __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
+  __ tst(rscratch1, rscratch2);
+  __ br(Assembler::EQ, done);
+
+  // Heap is unstable, need to perform the read-barrier even if WB is inactive
+  __ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
+
+  // Check for evacuation-in-progress and jump to WB slow-path if needed
+  __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
+  __ tst(rscratch1, rscratch2);
+  __ br(Assembler::EQ, done);
+
+  RegSet to_save = RegSet::of(r0);
+  if (dst != r0) {
+    __ push(to_save, sp);
+    __ mov(r0, dst);
+  }
+
+  __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
+
+  if (dst != r0) {
+    __ mov(dst, r0);
+    __ pop(to_save, sp);
+  }
+
+  __ bind(done);
+}
+
+void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
+  if (ShenandoahStoreValEnqueueBarrier) {
+    Label is_null;
+    __ cbz(dst, is_null);
+    write_barrier_impl(masm, dst);
+    __ bind(is_null);
+    // Save possibly live regs.
+    RegSet live_regs = RegSet::range(r0, r4) - dst;
+    __ push(live_regs, sp);
+    __ strd(v0, __ pre(sp, 2 * -wordSize));
+
+    satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false);
+
+    // Restore possibly live regs.
+    __ ldrd(v0, __ post(sp, 2 * wordSize));
+    __ pop(live_regs, sp);
+  }
+  if (ShenandoahStoreValReadBarrier) {
+    read_barrier_impl(masm, dst);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                                            Register dst, Address src, Register tmp1, Register tmp_thread) {
+  bool on_oop = type == T_OBJECT || type == T_ARRAY;
+  bool in_heap = (decorators & IN_HEAP) != 0;
+  bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
+  bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
+  bool on_reference = on_weak || on_phantom;
+
+  if (in_heap) {
+    read_barrier_not_null(masm, src.base());
+  }
+
+  BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+  if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
+    __ enter();
+    satb_write_barrier_pre(masm /* masm */,
+                           noreg /* obj */,
+                           dst /* pre_val */,
+                           rthread /* thread */,
+                           tmp1 /* tmp */,
+                           true /* tosca_live */,
+                           true /* expand_call */);
+    __ leave();
+  }
+}
+
+void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                                             Address dst, Register val, Register tmp1, Register tmp2) {
+  bool on_oop = type == T_OBJECT || type == T_ARRAY;
+  bool in_heap = (decorators & IN_HEAP) != 0;
+  if (in_heap) {
+    write_barrier(masm, dst.base());
+  }
+  if (!on_oop) {
+    BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
+    return;
+  }
+
+  // flatten object address if needed
+  if (dst.index() == noreg && dst.offset() == 0) {
+    if (dst.base() != r3) {
+      __ mov(r3, dst.base());
+    }
+  } else {
+    __ lea(r3, dst);
+  }
+
+  shenandoah_write_barrier_pre(masm,
+                               r3 /* obj */,
+                               tmp2 /* pre_val */,
+                               rthread /* thread */,
+                               tmp1  /* tmp */,
+                               val != noreg /* tosca_live */,
+                               false /* expand_call */);
+
+  if (val == noreg) {
+    BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
+  } else {
+    storeval_barrier(masm, val, tmp1);
+    // G1 barrier needs uncompressed oop for region cross check.
+    Register new_val = val;
+    if (UseCompressedOops) {
+      new_val = rscratch2;
+      __ mov(new_val, val);
+    }
+    BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
+  }
+
+}
+
+void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
+  __ cmp(op1, op2);
+  if (ShenandoahAcmpBarrier) {
+    Label done;
+    __ br(Assembler::EQ, done);
+    // The object may have been evacuated, but we won't see it without a
+    // membar here.
+    __ membar(Assembler::LoadStore| Assembler::LoadLoad);
+    read_barrier(masm, op1);
+    read_barrier(masm, op2);
+    __ cmp(op1, op2);
+    __ bind(done);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
+                                                  Register var_size_in_bytes,
+                                                  int con_size_in_bytes,
+                                                  Register t1,
+                                                  Register t2,
+                                                  Label& slow_case) {
+
+  assert_different_registers(obj, t2);
+  assert_different_registers(obj, var_size_in_bytes);
+  Register end = t2;
+
+  __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
+  if (var_size_in_bytes == noreg) {
+    __ lea(end, Address(obj, (int) (con_size_in_bytes + ShenandoahBrooksPointer::byte_size())));
+  } else {
+    __ add(var_size_in_bytes, var_size_in_bytes, ShenandoahBrooksPointer::byte_size());
+    __ lea(end, Address(obj, var_size_in_bytes));
+  }
+  __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
+  __ cmp(end, rscratch1);
+  __ br(Assembler::HI, slow_case);
+
+  // update the tlab top pointer
+  __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
+
+  __ add(obj, obj, ShenandoahBrooksPointer::byte_size());
+  __ str(obj, Address(obj, ShenandoahBrooksPointer::byte_offset()));
+
+  // recover var_size_in_bytes if necessary
+  if (var_size_in_bytes == end) {
+    __ sub(var_size_in_bytes, var_size_in_bytes, obj);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
+  bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
+  bool is_write = (decorators & ACCESS_WRITE) != 0;
+  if (is_write) {
+    if (oop_not_null) {
+      write_barrier(masm, obj);
+    } else {
+      Label done;
+      __ cbz(obj, done);
+      write_barrier(masm, obj);
+      __ bind(done);
+    }
+  } else {
+    if (oop_not_null) {
+      read_barrier_not_null(masm, obj);
+    } else {
+      read_barrier(masm, obj);
+    }
+  }
+}
+
+void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
+                                                bool acquire, bool release, bool weak, bool encode,
+                                                Register tmp1, Register tmp2, Register tmp3,
+                                                Register result) {
+
+  if (!ShenandoahCASBarrier) {
+    if (UseCompressedOops) {
+      if (encode) {
+        __ encode_heap_oop(tmp1, expected);
+        expected = tmp1;
+        __ encode_heap_oop(tmp3, new_val);
+        new_val = tmp3;
+      }
+      __ cmpxchg(addr, expected, new_val, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
+      __ membar(__ AnyAny);
+    } else {
+      __ cmpxchg(addr, expected, new_val, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
+      __ membar(__ AnyAny);
+    }
+    return;
+  }
+
+  if (encode) {
+    storeval_barrier(masm, new_val, tmp3);
+  }
+
+  if (UseCompressedOops) {
+    if (encode) {
+      __ encode_heap_oop(tmp1, expected);
+      expected = tmp1;
+      __ encode_heap_oop(tmp2, new_val);
+      new_val = tmp2;
+    }
+  }
+  bool is_cae = (result != noreg);
+  bool is_narrow = UseCompressedOops;
+  Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
+  if (! is_cae) result = rscratch1;
+
+  assert_different_registers(addr, expected, new_val, result, tmp3);
+
+  Label retry, done, fail;
+
+  // CAS, using LL/SC pair.
+  __ bind(retry);
+  __ load_exclusive(result, addr, size, acquire);
+  if (is_narrow) {
+    __ cmpw(result, expected);
+  } else {
+    __ cmp(result, expected);
+  }
+  __ br(Assembler::NE, fail);
+  __ store_exclusive(tmp3, new_val, addr, size, release);
+  if (weak) {
+    __ cmpw(tmp3, 0u); // If the store fails, return NE to our caller
+  } else {
+    __ cbnzw(tmp3, retry);
+  }
+  __ b(done);
+
+ __  bind(fail);
+  // Check if rb(expected)==rb(result)
+  // Shuffle registers so that we have memory value ready for next expected.
+  __ mov(tmp3, expected);
+  __ mov(expected, result);
+  if (is_narrow) {
+    __ decode_heap_oop(result, result);
+    __ decode_heap_oop(tmp3, tmp3);
+  }
+  read_barrier_impl(masm, result);
+  read_barrier_impl(masm, tmp3);
+  __ cmp(result, tmp3);
+  // Retry with expected now being the value we just loaded from addr.
+  __ br(Assembler::EQ, retry);
+  if (is_narrow && is_cae) {
+    // For cmp-and-exchange and narrow oops, we need to restore
+    // the compressed old-value. We moved it to 'expected' a few lines up.
+    __ mov(result, expected);
+  }
+  __ bind(done);
+
+}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
+  ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
+
+  __ bind(*stub->entry());
+
+  assert(stub->pre_val()->is_register(), "Precondition.");
+
+  Register pre_val_reg = stub->pre_val()->as_register();
+
+  if (stub->do_load()) {
+    ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+  }
+  __ cbz(pre_val_reg, *stub->continuation());
+  ce->store_parameter(stub->pre_val()->as_register(), 0);
+  __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
+  __ b(*stub->continuation());
+}
+
+void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
+
+  Register obj = stub->obj()->as_register();
+  Register res = stub->result()->as_register();
+
+  Label done;
+
+  __ bind(*stub->entry());
+
+  if (res != obj) {
+    __ mov(res, obj);
+  }
+  // Check for null.
+  if (stub->needs_null_check()) {
+    __ cbz(res, done);
+  }
+
+  write_barrier(ce->masm(), res);
+
+  __ bind(done);
+  __ b(*stub->continuation());
+}
+
+#undef __
+
+#define __ sasm->
+
+void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+  __ prologue("shenandoah_pre_barrier", false);
+
+  // arg0 : previous value of memory
+
+  BarrierSet* bs = BarrierSet::barrier_set();
+
+  const Register pre_val = r0;
+  const Register thread = rthread;
+  const Register tmp = rscratch1;
+
+  Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+  Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+  Label done;
+  Label runtime;
+
+  // Is marking still active?
+  Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ ldrb(tmp, gc_state);
+  __ mov(rscratch2, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL);
+  __ tst(tmp, rscratch2);
+  __ br(Assembler::EQ, done);
+
+  // Can we store original value in the thread's buffer?
+  __ ldr(tmp, queue_index);
+  __ cbz(tmp, runtime);
+
+  __ sub(tmp, tmp, wordSize);
+  __ str(tmp, queue_index);
+  __ ldr(rscratch2, buffer);
+  __ add(tmp, tmp, rscratch2);
+  __ load_parameter(0, rscratch2);
+  __ str(rscratch2, Address(tmp, 0));
+  __ b(done);
+
+  __ bind(runtime);
+  __ push_call_clobbered_registers();
+  __ load_parameter(0, pre_val);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
+  __ pop_call_clobbered_registers();
+  __ bind(done);
+
+  __ epilogue();
+}
+
+#undef __
+
+#endif // COMPILER1
+
+address ShenandoahBarrierSetAssembler::shenandoah_wb() {
+  assert(_shenandoah_wb != NULL, "need write barrier stub");
+  return _shenandoah_wb;
+}
+
+#define __ cgen->assembler()->
+
+// Shenandoah write barrier.
+//
+// Input:
+//   r0: OOP to evacuate.  Not null.
+//
+// Output:
+//   r0: Pointer to evacuated OOP.
+//
+// Trash rscratch1, rscratch2.  Preserve everything else.
+address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
+
+  __ align(6);
+  StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
+  address start = __ pc();
+
+  Label work;
+  __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
+  __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+  __ ldrb(rscratch2, Address(rscratch2, rscratch1));
+  __ tbnz(rscratch2, 0, work);
+  __ ret(lr);
+  __ bind(work);
+
+  Register obj = r0;
+
+  __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+  __ push_call_clobbered_registers();
+
+  __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT));
+  __ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
+  __ mov(rscratch1, obj);
+  __ pop_call_clobbered_registers();
+  __ mov(obj, rscratch1);
+
+  __ leave(); // required for proper stackwalking of RuntimeStub frame
+  __ ret(lr);
+
+  return start;
+}
+
+#undef __
+
+void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
+  if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
+    int stub_code_size = 2048;
+    ResourceMark rm;
+    BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
+    CodeBuffer buf(bb);
+    StubCodeGenerator cgen(&buf);
+    _shenandoah_wb = generate_shenandoah_wb(&cgen);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP
+#define CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+#ifdef COMPILER1
+class LIR_Assembler;
+class ShenandoahPreBarrierStub;
+class ShenandoahWriteBarrierStub;
+class StubAssembler;
+class StubCodeGenerator;
+#endif
+
+class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
+private:
+
+  static address _shenandoah_wb;
+
+  void satb_write_barrier_pre(MacroAssembler* masm,
+                              Register obj,
+                              Register pre_val,
+                              Register thread,
+                              Register tmp,
+                              bool tosca_live,
+                              bool expand_call);
+  void shenandoah_write_barrier_pre(MacroAssembler* masm,
+                                    Register obj,
+                                    Register pre_val,
+                                    Register thread,
+                                    Register tmp,
+                                    bool tosca_live,
+                                    bool expand_call);
+
+  void read_barrier(MacroAssembler* masm, Register dst);
+  void read_barrier_impl(MacroAssembler* masm, Register dst);
+  void read_barrier_not_null(MacroAssembler* masm, Register dst);
+  void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
+  void write_barrier(MacroAssembler* masm, Register dst);
+  void write_barrier_impl(MacroAssembler* masm, Register dst);
+  void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
+  void asm_acmp_barrier(MacroAssembler* masm, Register op1, Register op2);
+
+  address generate_shenandoah_wb(StubCodeGenerator* cgen);
+
+public:
+  static address shenandoah_wb();
+
+#ifdef COMPILER1
+  void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
+  void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
+  void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
+  virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+                                  Register addr, Register count, RegSet saved_regs);
+  virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+                                  Register start, Register end, Register tmp, RegSet saved_regs);
+  virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                       Register dst, Address src, Register tmp1, Register tmp_thread);
+  virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                        Address dst, Register val, Register tmp1, Register tmp2);
+  virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
+  virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
+  virtual void tlab_allocate(MacroAssembler* masm, Register obj,
+                             Register var_size_in_bytes,
+                             int con_size_in_bytes,
+                             Register t1,
+                             Register t2,
+                             Label& slow_case);
+
+  void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
+                   bool acquire, bool release, bool weak, bool encode,
+                   Register tmp1, Register tmp2, Register tmp3 = rscratch2,
+                   Register result = noreg);
+
+  virtual void barrier_stubs_init();
+};
+
+#endif // CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
+
+void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
+  Register addr = _addr->as_register_lo();
+  Register newval = _new_value->as_register();
+  Register cmpval = _cmp_value->as_register();
+  Register tmp1 = _tmp1->as_register();
+  Register tmp2 = _tmp2->as_register();
+  ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, true, tmp1, tmp2);
+}
+
+#ifdef ASSERT
+#define __ gen->lir(__FILE__, __LINE__)->
+#else
+#define __ gen->lir()->
+#endif
+
+LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+  BasicType bt = access.type();
+  if (access.is_oop()) {
+    LIRGenerator *gen = access.gen();
+    if (ShenandoahSATBBarrier) {
+      pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
+                  LIR_OprFact::illegalOpr /* pre_val */);
+    }
+    if (ShenandoahCASBarrier) {
+      cmp_value.load_item();
+      new_value.load_item();
+
+      LIR_Opr t1 = gen->new_register(T_OBJECT);
+      LIR_Opr t2 = gen->new_register(T_OBJECT);
+      LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
+
+      __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2,
+                                                   LIR_OprFact::illegalOpr));
+
+      LIR_Opr result = gen->new_register(T_INT);
+      __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
+               result, T_INT);
+      return result;
+    }
+  }
+  return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+}
+
+LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+  LIRGenerator* gen = access.gen();
+  BasicType type = access.type();
+
+  LIR_Opr result = gen->new_register(type);
+  value.load_item();
+  LIR_Opr value_opr = value.result();
+
+  if (access.is_oop()) {
+    value_opr = storeval_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators());
+  }
+
+  assert(type == T_INT || type == T_OBJECT || type == T_ARRAY LP64_ONLY( || type == T_LONG ), "unexpected type");
+  LIR_Opr tmp = gen->new_register(T_INT);
+  __ xchg(access.resolved_addr(), value_opr, result, tmp);
+
+  if (access.is_oop()) {
+    if (ShenandoahSATBBarrier) {
+      pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
+                  result /* pre_val */);
+    }
+  }
+
+  return result;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,281 @@
+//
+// Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+
+source_hpp %{
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+%}
+
+encode %{
+  enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
+    MacroAssembler _masm(&cbuf);
+    guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
+  %}
+
+  enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
+    MacroAssembler _masm(&cbuf);
+    guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
+  %}
+%}
+
+instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
+  match(Set dst (ShenandoahReadBarrier src));
+  format %{ "shenandoah_rb $dst,$src" %}
+  ins_encode %{
+    Register s = $src$$Register;
+    Register d = $dst$$Register;
+    __ ldr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+
+instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+
+  match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+
+  effect(TEMP tmp, KILL cr);
+
+  format %{
+    "cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+
+  ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp),
+             aarch64_enc_cset_eq(res));
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+
+  match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+
+  effect(TEMP tmp, KILL cr);
+
+  format %{
+    "cmpxchgw_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
+    __ cset($res$$Register, Assembler::EQ);
+  %}
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+
+  predicate(needs_acquiring_load_exclusive(n));
+  match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
+  ins_cost(VOLATILE_REF_COST);
+
+  effect(TEMP tmp, KILL cr);
+
+  format %{
+    "cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+
+  ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp),
+             aarch64_enc_cset_eq(res));
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+
+  predicate(needs_acquiring_load_exclusive(n));
+  match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
+  ins_cost(VOLATILE_REF_COST);
+
+  effect(TEMP tmp, KILL cr);
+
+ format %{
+    "cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
+    __ cset($res$$Register, Assembler::EQ);
+  %}
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+  match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(TEMP_DEF res, TEMP tmp, KILL cr);
+  format %{
+    "cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ false, /*release*/ true, /*weak*/ false, /* encode*/ false, noreg, noreg, rscratch2, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+  match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(TEMP_DEF res, TEMP tmp, KILL cr);
+  format %{
+    "cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg, rscratch2, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+  predicate(needs_acquiring_load_exclusive(n));
+  match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
+  ins_cost(VOLATILE_REF_COST);
+  effect(TEMP_DEF res, TEMP tmp, KILL cr);
+  format %{
+    "cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ true, /*release*/ true, /*weak*/ false, /* encode*/ false, noreg, noreg, rscratch2, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+  predicate(needs_acquiring_load_exclusive(n));
+  match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
+  ins_cost(VOLATILE_REF_COST);
+  effect(TEMP_DEF res, TEMP tmp, KILL cr);
+  format %{
+    "cmpxchg_acq_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg, rscratch2, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+  match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(TEMP tmp, KILL cr);
+  format %{
+    "cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+  match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(TEMP tmp, KILL cr);
+  format %{
+    "cmpxchg_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+  predicate(needs_acquiring_load_exclusive(n));
+  match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
+  ins_cost(VOLATILE_REF_COST);
+  effect(TEMP tmp, KILL cr);
+  format %{
+    "cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ true, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+  predicate(needs_acquiring_load_exclusive(n));
+  match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
+  ins_cost(VOLATILE_REF_COST);
+  effect(TEMP tmp, KILL cr);
+  format %{
+    "cmpxchg_acq_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    Register tmp = $tmp$$Register;
+    __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+                                                   /*acquire*/ true, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,1041 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahRuntime.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
+#endif
+
+#define __ masm->
+
+address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
+
+void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                                                       Register src, Register dst, Register count) {
+
+  bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+  bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
+  bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
+  bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
+
+  if (type == T_OBJECT || type == T_ARRAY) {
+#ifdef _LP64
+    if (!checkcast && !obj_int) {
+      // Save count for barrier
+      __ movptr(r11, count);
+    } else if (disjoint && obj_int) {
+      // Save dst in r11 in the disjoint case
+      __ movq(r11, dst);
+    }
+#else
+    if (disjoint) {
+      __ mov(rdx, dst);          // save 'to'
+    }
+#endif
+
+    if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
+      Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+#ifndef _LP64
+      __ push(thread);
+      __ get_thread(thread);
+#endif
+
+      Label filtered;
+      Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
+      // Is marking active?
+      if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+        __ cmpl(in_progress, 0);
+      } else {
+        assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+        __ cmpb(in_progress, 0);
+      }
+
+      NOT_LP64(__ pop(thread);)
+
+        __ jcc(Assembler::equal, filtered);
+
+      __ pusha();                      // push registers
+#ifdef _LP64
+      if (count == c_rarg0) {
+        if (dst == c_rarg1) {
+          // exactly backwards!!
+          __ xchgptr(c_rarg1, c_rarg0);
+        } else {
+          __ movptr(c_rarg1, count);
+          __ movptr(c_rarg0, dst);
+        }
+      } else {
+        __ movptr(c_rarg0, dst);
+        __ movptr(c_rarg1, count);
+      }
+      if (UseCompressedOops) {
+        __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
+      } else {
+        __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
+      }
+#else
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry),
+                      dst, count);
+#endif
+      __ popa();
+      __ bind(filtered);
+    }
+  }
+
+}
+
+void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                                                       Register src, Register dst, Register count) {
+  bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+  bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
+  bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
+  Register tmp = rax;
+
+  if (type == T_OBJECT || type == T_ARRAY) {
+#ifdef _LP64
+    if (!checkcast && !obj_int) {
+      // Save count for barrier
+      count = r11;
+    } else if (disjoint && obj_int) {
+      // Use the saved dst in the disjoint case
+      dst = r11;
+    } else if (checkcast) {
+      tmp = rscratch1;
+    }
+#else
+    if (disjoint) {
+      __ mov(dst, rdx); // restore 'to'
+    }
+#endif
+
+    __ pusha();             // push registers (overkill)
+#ifdef _LP64
+    if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
+      assert_different_registers(c_rarg1, dst);
+      __ mov(c_rarg1, count);
+      __ mov(c_rarg0, dst);
+    } else {
+      assert_different_registers(c_rarg0, count);
+      __ mov(c_rarg0, dst);
+      __ mov(c_rarg1, count);
+    }
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
+#else
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry),
+                    dst, count);
+#endif
+    __ popa();
+  }
+}
+
+void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
+                                                                 Register obj,
+                                                                 Register pre_val,
+                                                                 Register thread,
+                                                                 Register tmp,
+                                                                 bool tosca_live,
+                                                                 bool expand_call) {
+
+  if (ShenandoahSATBBarrier) {
+    satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
+                                                           Register obj,
+                                                           Register pre_val,
+                                                           Register thread,
+                                                           Register tmp,
+                                                           bool tosca_live,
+                                                           bool expand_call) {
+  // If expand_call is true then we expand the call_VM_leaf macro
+  // directly to skip generating the check by
+  // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
+
+#ifdef _LP64
+  assert(thread == r15_thread, "must be");
+#endif // _LP64
+
+  Label done;
+  Label runtime;
+
+  assert(pre_val != noreg, "check this code");
+
+  if (obj != noreg) {
+    assert_different_registers(obj, pre_val, tmp);
+    assert(pre_val != rax, "check this code");
+  }
+
+  Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
+  Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+  Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+  Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL);
+  __ jcc(Assembler::zero, done);
+
+  // Do we need to load the previous value?
+  if (obj != noreg) {
+    __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
+  }
+
+  // Is the previous value null?
+  __ cmpptr(pre_val, (int32_t) NULL_WORD);
+  __ jcc(Assembler::equal, done);
+
+  // Can we store original value in the thread's buffer?
+  // Is index == 0?
+  // (The index field is typed as size_t.)
+
+  __ movptr(tmp, index);                   // tmp := *index_adr
+  __ cmpptr(tmp, 0);                       // tmp == 0?
+  __ jcc(Assembler::equal, runtime);       // If yes, goto runtime
+
+  __ subptr(tmp, wordSize);                // tmp := tmp - wordSize
+  __ movptr(index, tmp);                   // *index_adr := tmp
+  __ addptr(tmp, buffer);                  // tmp := tmp + *buffer_adr
+
+  // Record the previous value
+  __ movptr(Address(tmp, 0), pre_val);
+  __ jmp(done);
+
+  __ bind(runtime);
+  // save the live input values
+  if(tosca_live) __ push(rax);
+
+  if (obj != noreg && obj != rax)
+    __ push(obj);
+
+  if (pre_val != rax)
+    __ push(pre_val);
+
+  // Calling the runtime using the regular call_VM_leaf mechanism generates
+  // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
+  // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
+  //
+  // If we care generating the pre-barrier without a frame (e.g. in the
+  // intrinsified Reference.get() routine) then ebp might be pointing to
+  // the caller frame and so this check will most likely fail at runtime.
+  //
+  // Expanding the call directly bypasses the generation of the check.
+  // So when we do not have have a full interpreter frame on the stack
+  // expand_call should be passed true.
+
+  NOT_LP64( __ push(thread); )
+
+#ifdef _LP64
+  // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
+  // pre_val be c_rarg1 (where the call prologue would copy thread argument).
+  // Note: this should not accidentally smash thread, because thread is always r15.
+  assert(thread != c_rarg0, "smashed arg");
+  if (c_rarg0 != pre_val) {
+    __ mov(c_rarg0, pre_val);
+  }
+#endif
+
+  if (expand_call) {
+    LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
+#ifdef _LP64
+    if (c_rarg1 != thread) {
+      __ mov(c_rarg1, thread);
+    }
+    // Already moved pre_val into c_rarg0 above
+#else
+    __ push(thread);
+    __ push(pre_val);
+#endif
+    __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), 2);
+  } else {
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread);
+  }
+
+  NOT_LP64( __ pop(thread); )
+
+  // save the live input values
+  if (pre_val != rax)
+    __ pop(pre_val);
+
+  if (obj != noreg && obj != rax)
+    __ pop(obj);
+
+  if(tosca_live) __ pop(rax);
+
+  __ bind(done);
+}
+
+void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
+  if (ShenandoahReadBarrier) {
+    read_barrier_impl(masm, dst);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
+  assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+  Label is_null;
+  __ testptr(dst, dst);
+  __ jcc(Assembler::zero, is_null);
+  read_barrier_not_null_impl(masm, dst);
+  __ bind(is_null);
+}
+
+void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
+  if (ShenandoahReadBarrier) {
+    read_barrier_not_null_impl(masm, dst);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
+  assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
+  __ movptr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
+}
+
+
+void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
+  if (ShenandoahWriteBarrier) {
+    write_barrier_impl(masm, dst);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
+  assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
+#ifdef _LP64
+  Label done;
+
+  Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
+  __ jccb(Assembler::zero, done);
+
+  // Heap is unstable, need to perform the read-barrier even if WB is inactive
+  read_barrier_not_null(masm, dst);
+
+  __ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
+  __ jccb(Assembler::zero, done);
+
+   if (dst != rax) {
+     __ xchgptr(dst, rax); // Move obj into rax and save rax into obj.
+   }
+
+   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
+
+   if (dst != rax) {
+     __ xchgptr(rax, dst); // Swap back obj with rax.
+   }
+
+  __ bind(done);
+#else
+  Unimplemented();
+#endif
+}
+
+void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
+  if (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier) {
+    storeval_barrier_impl(masm, dst, tmp);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) {
+  assert(UseShenandoahGC && (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled");
+
+  if (dst == noreg) return;
+
+#ifdef _LP64
+  if (ShenandoahStoreValEnqueueBarrier) {
+    Label is_null;
+    __ testptr(dst, dst);
+    __ jcc(Assembler::zero, is_null);
+    write_barrier_impl(masm, dst);
+    __ bind(is_null);
+
+    // The set of registers to be saved+restored is the same as in the write-barrier above.
+    // Those are the commonly used registers in the interpreter.
+    __ pusha();
+    // __ push_callee_saved_registers();
+    __ subptr(rsp, 2 * Interpreter::stackElementSize);
+    __ movdbl(Address(rsp, 0), xmm0);
+
+    satb_write_barrier_pre(masm, noreg, dst, r15_thread, tmp, true, false);
+    __ movdbl(xmm0, Address(rsp, 0));
+    __ addptr(rsp, 2 * Interpreter::stackElementSize);
+    //__ pop_callee_saved_registers();
+    __ popa();
+  }
+  if (ShenandoahStoreValReadBarrier) {
+    read_barrier_impl(masm, dst);
+  }
+#else
+  Unimplemented();
+#endif
+}
+
+void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+             Register dst, Address src, Register tmp1, Register tmp_thread) {
+  bool on_oop = type == T_OBJECT || type == T_ARRAY;
+  bool in_heap = (decorators & IN_HEAP) != 0;
+  bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
+  bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
+  bool on_reference = on_weak || on_phantom;
+  if (in_heap) {
+    read_barrier_not_null(masm, src.base());
+  }
+  BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+  if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
+    const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
+    NOT_LP64(__ get_thread(thread));
+
+    // Generate the SATB pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    shenandoah_write_barrier_pre(masm /* masm */,
+                                 noreg /* obj */,
+                                 dst /* pre_val */,
+                                 thread /* thread */,
+                                 tmp1 /* tmp */,
+                                 true /* tosca_live */,
+                                 true /* expand_call */);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+              Address dst, Register val, Register tmp1, Register tmp2) {
+
+  bool in_heap = (decorators & IN_HEAP) != 0;
+  bool as_normal = (decorators & AS_NORMAL) != 0;
+  if (in_heap) {
+    write_barrier(masm, dst.base());
+  }
+  if (type == T_OBJECT || type == T_ARRAY) {
+    bool needs_pre_barrier = as_normal;
+
+    Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
+    Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
+    // flatten object address if needed
+    // We do it regardless of precise because we need the registers
+    if (dst.index() == noreg && dst.disp() == 0) {
+      if (dst.base() != tmp1) {
+        __ movptr(tmp1, dst.base());
+      }
+    } else {
+      __ lea(tmp1, dst);
+    }
+
+#ifndef _LP64
+    InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm);
+#endif
+
+    NOT_LP64(__ get_thread(rcx));
+    NOT_LP64(imasm->save_bcp());
+
+    if (needs_pre_barrier) {
+      shenandoah_write_barrier_pre(masm /*masm*/,
+                                   tmp1 /* obj */,
+                                   tmp2 /* pre_val */,
+                                   rthread /* thread */,
+                                   tmp3  /* tmp */,
+                                   val != noreg /* tosca_live */,
+                                   false /* expand_call */);
+    }
+    if (val == noreg) {
+      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg);
+    } else {
+      storeval_barrier(masm, val, tmp3);
+      BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg);
+    }
+    NOT_LP64(imasm->restore_bcp());
+  } else {
+    BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
+  }
+}
+
+#ifndef _LP64
+void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
+                                               Address obj1, jobject obj2) {
+  Unimplemented();
+}
+
+void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
+                                               Register obj1, jobject obj2) {
+  Unimplemented();
+}
+#endif
+
+
+void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
+  __ cmpptr(op1, op2);
+  if (ShenandoahAcmpBarrier) {
+    Label done;
+    __ jccb(Assembler::equal, done);
+    read_barrier(masm, op1);
+    read_barrier(masm, op2);
+    __ cmpptr(op1, op2);
+    __ bind(done);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register src1, Address src2) {
+  __ cmpptr(src1, src2);
+  if (ShenandoahAcmpBarrier) {
+    Label done;
+    __ jccb(Assembler::equal, done);
+    __ movptr(rscratch2, src2);
+    read_barrier(masm, src1);
+    read_barrier(masm, rscratch2);
+    __ cmpptr(src1, rscratch2);
+    __ bind(done);
+  }
+}
+
+void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
+                                                  Register thread, Register obj,
+                                                  Register var_size_in_bytes,
+                                                  int con_size_in_bytes,
+                                                  Register t1, Register t2,
+                                                  Label& slow_case) {
+  assert_different_registers(obj, t1, t2);
+  assert_different_registers(obj, var_size_in_bytes, t1);
+  Register end = t2;
+  if (!thread->is_valid()) {
+#ifdef _LP64
+    thread = r15_thread;
+#else
+    assert(t1->is_valid(), "need temp reg");
+    thread = t1;
+    __ get_thread(thread);
+#endif
+  }
+
+  __ verify_tlab();
+
+  __ movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
+  if (var_size_in_bytes == noreg) {
+    __ lea(end, Address(obj, con_size_in_bytes + ShenandoahBrooksPointer::byte_size()));
+  } else {
+    __ addptr(var_size_in_bytes, ShenandoahBrooksPointer::byte_size());
+    __ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
+  }
+  __ cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
+  __ jcc(Assembler::above, slow_case);
+
+  // update the tlab top pointer
+  __ movptr(Address(thread, JavaThread::tlab_top_offset()), end);
+
+  // Initialize brooks pointer
+#ifdef _LP64
+  __ incrementq(obj, ShenandoahBrooksPointer::byte_size());
+#else
+  __ incrementl(obj, ShenandoahBrooksPointer::byte_size());
+#endif
+  __ movptr(Address(obj, ShenandoahBrooksPointer::byte_offset()), obj);
+
+  // recover var_size_in_bytes if necessary
+  if (var_size_in_bytes == end) {
+    __ subptr(var_size_in_bytes, obj);
+  }
+  __ verify_tlab();
+}
+
+void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
+  bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
+  bool is_write = (decorators & ACCESS_WRITE) != 0;
+  if (is_write) {
+    if (oop_not_null) {
+      write_barrier(masm, obj);
+    } else {
+      Label done;
+      __ testptr(obj, obj);
+      __ jcc(Assembler::zero, done);
+      write_barrier(masm, obj);
+      __ bind(done);
+    }
+  } else {
+    if (oop_not_null) {
+      read_barrier_not_null(masm, obj);
+    } else {
+      read_barrier(masm, obj);
+    }
+  }
+}
+
+// Special Shenandoah CAS implementation that handles false negatives
+// due to concurrent evacuation.
+#ifndef _LP64
+void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
+                                                Register res, Address addr, Register oldval, Register newval,
+                                                bool exchange, bool encode, Register tmp1, Register tmp2) {
+  // Shenandoah has no 32-bit version for this.
+  Unimplemented();
+}
+#else
+void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
+                                                Register res, Address addr, Register oldval, Register newval,
+                                                bool exchange, bool encode, Register tmp1, Register tmp2) {
+  if (!ShenandoahCASBarrier) {
+#ifdef _LP64
+    if (UseCompressedOops) {
+      if (encode) {
+        __ encode_heap_oop(oldval);
+        __ mov(rscratch1, newval);
+        __ encode_heap_oop(rscratch1);
+        newval = rscratch1;
+      }
+      if (os::is_MP()) {
+        __ lock();
+      }
+      // oldval (rax) is implicitly used by this instruction
+      __ cmpxchgl(newval, addr);
+    } else
+#endif
+      {
+        if (os::is_MP()) {
+          __ lock();
+        }
+        __ cmpxchgptr(newval, addr);
+      }
+
+    if (!exchange) {
+      assert(res != NULL, "need result register");
+      __ setb(Assembler::equal, res);
+      __ movzbl(res, res);
+    }
+    return;
+  }
+
+  assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
+  assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
+
+  Label retry, done;
+
+  // Apply storeval barrier to newval.
+  if (encode) {
+    storeval_barrier(masm, newval, tmp1);
+  }
+
+  if (UseCompressedOops) {
+    if (encode) {
+      __ encode_heap_oop(oldval);
+      __ mov(rscratch1, newval);
+      __ encode_heap_oop(rscratch1);
+      newval = rscratch1;
+    }
+  }
+
+  // Remember oldval for retry logic below
+  if (UseCompressedOops) {
+    __ movl(tmp1, oldval);
+  } else {
+    __ movptr(tmp1, oldval);
+  }
+
+  // Step 1. Try to CAS with given arguments. If successful, then we are done,
+  // and can safely return.
+  if (os::is_MP()) __ lock();
+  if (UseCompressedOops) {
+    __ cmpxchgl(newval, addr);
+  } else {
+    __ cmpxchgptr(newval, addr);
+  }
+  __ jcc(Assembler::equal, done, true);
+
+  // Step 2. CAS had failed. This may be a false negative.
+  //
+  // The trouble comes when we compare the to-space pointer with the from-space
+  // pointer to the same object. To resolve this, it will suffice to read both
+  // oldval and the value from memory through the read barriers -- this will give
+  // both to-space pointers. If they mismatch, then it was a legitimate failure.
+  //
+  if (UseCompressedOops) {
+    __ decode_heap_oop(tmp1);
+  }
+  read_barrier_impl(masm, tmp1);
+
+  if (UseCompressedOops) {
+    __ movl(tmp2, oldval);
+    __ decode_heap_oop(tmp2);
+  } else {
+    __ movptr(tmp2, oldval);
+  }
+  read_barrier_impl(masm, tmp2);
+
+  __ cmpptr(tmp1, tmp2);
+  __ jcc(Assembler::notEqual, done, true);
+
+  // Step 3. Try to CAS again with resolved to-space pointers.
+  //
+  // Corner case: it may happen that somebody stored the from-space pointer
+  // to memory while we were preparing for retry. Therefore, we can fail again
+  // on retry, and so need to do this in loop, always re-reading the failure
+  // witness through the read barrier.
+  __ bind(retry);
+  if (os::is_MP()) __ lock();
+  if (UseCompressedOops) {
+    __ cmpxchgl(newval, addr);
+  } else {
+    __ cmpxchgptr(newval, addr);
+  }
+  __ jcc(Assembler::equal, done, true);
+
+  if (UseCompressedOops) {
+    __ movl(tmp2, oldval);
+    __ decode_heap_oop(tmp2);
+  } else {
+    __ movptr(tmp2, oldval);
+  }
+  read_barrier_impl(masm, tmp2);
+
+  __ cmpptr(tmp1, tmp2);
+  __ jcc(Assembler::equal, retry, true);
+
+  // Step 4. If we need a boolean result out of CAS, check the flag again,
+  // and promote the result. Note that we handle the flag from both the CAS
+  // itself and from the retry loop.
+  __ bind(done);
+  if (!exchange) {
+    assert(res != NULL, "need result register");
+    __ setb(Assembler::equal, res);
+    __ movzbl(res, res);
+  }
+}
+#endif // LP64
+
+void ShenandoahBarrierSetAssembler::save_vector_registers(MacroAssembler* masm) {
+  int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8);
+  if (UseAVX > 2) {
+    num_xmm_regs = LP64_ONLY(32) NOT_LP64(8);
+  }
+
+  if (UseSSE == 1)  {
+    __ subptr(rsp, sizeof(jdouble)*8);
+    for (int n = 0; n < 8; n++) {
+      __ movflt(Address(rsp, n*sizeof(jdouble)), as_XMMRegister(n));
+    }
+  } else if (UseSSE >= 2)  {
+    if (UseAVX > 2) {
+      __ push(rbx);
+      __ movl(rbx, 0xffff);
+      __ kmovwl(k1, rbx);
+      __ pop(rbx);
+    }
+#ifdef COMPILER2
+    if (MaxVectorSize > 16) {
+      if(UseAVX > 2) {
+        // Save upper half of ZMM registers
+        __ subptr(rsp, 32*num_xmm_regs);
+        for (int n = 0; n < num_xmm_regs; n++) {
+          __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
+        }
+      }
+      assert(UseAVX > 0, "256 bit vectors are supported only with AVX");
+      // Save upper half of YMM registers
+      __ subptr(rsp, 16*num_xmm_regs);
+      for (int n = 0; n < num_xmm_regs; n++) {
+        __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
+      }
+    }
+#endif
+    // Save whole 128bit (16 bytes) XMM registers
+    __ subptr(rsp, 16*num_xmm_regs);
+#ifdef _LP64
+    if (VM_Version::supports_evex()) {
+      for (int n = 0; n < num_xmm_regs; n++) {
+        __ vextractf32x4(Address(rsp, n*16), as_XMMRegister(n), 0);
+      }
+    } else {
+      for (int n = 0; n < num_xmm_regs; n++) {
+        __ movdqu(Address(rsp, n*16), as_XMMRegister(n));
+      }
+    }
+#else
+    for (int n = 0; n < num_xmm_regs; n++) {
+      __ movdqu(Address(rsp, n*16), as_XMMRegister(n));
+    }
+#endif
+  }
+}
+
+void ShenandoahBarrierSetAssembler::restore_vector_registers(MacroAssembler* masm) {
+  int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8);
+  if (UseAVX > 2) {
+    num_xmm_regs = LP64_ONLY(32) NOT_LP64(8);
+  }
+  if (UseSSE == 1)  {
+    for (int n = 0; n < 8; n++) {
+      __ movflt(as_XMMRegister(n), Address(rsp, n*sizeof(jdouble)));
+    }
+    __ addptr(rsp, sizeof(jdouble)*8);
+  } else if (UseSSE >= 2)  {
+    // Restore whole 128bit (16 bytes) XMM registers
+#ifdef _LP64
+    if (VM_Version::supports_evex()) {
+      for (int n = 0; n < num_xmm_regs; n++) {
+        __ vinsertf32x4(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, n*16), 0);
+      }
+    } else {
+      for (int n = 0; n < num_xmm_regs; n++) {
+        __ movdqu(as_XMMRegister(n), Address(rsp, n*16));
+      }
+    }
+#else
+    for (int n = 0; n < num_xmm_regs; n++) {
+      __ movdqu(as_XMMRegister(n), Address(rsp, n*16));
+    }
+#endif
+    __ addptr(rsp, 16*num_xmm_regs);
+
+#ifdef COMPILER2
+    if (MaxVectorSize > 16) {
+      // Restore upper half of YMM registers.
+      for (int n = 0; n < num_xmm_regs; n++) {
+        __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
+      }
+      __ addptr(rsp, 16*num_xmm_regs);
+      if (UseAVX > 2) {
+        for (int n = 0; n < num_xmm_regs; n++) {
+          __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
+        }
+        __ addptr(rsp, 32*num_xmm_regs);
+      }
+    }
+#endif
+  }
+}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
+  ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
+
+  __ bind(*stub->entry());
+  assert(stub->pre_val()->is_register(), "Precondition.");
+
+  Register pre_val_reg = stub->pre_val()->as_register();
+
+  if (stub->do_load()) {
+    ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+  }
+
+  __ cmpptr(pre_val_reg, (int32_t)NULL_WORD);
+  __ jcc(Assembler::equal, *stub->continuation());
+  ce->store_parameter(stub->pre_val()->as_register(), 0);
+  __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
+  __ jmp(*stub->continuation());
+
+}
+
+void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
+  __ bind(*stub->entry());
+
+  Label done;
+  Register obj = stub->obj()->as_register();
+  Register res = stub->result()->as_register();
+
+  if (res != obj) {
+    __ mov(res, obj);
+  }
+
+  // Check for null.
+  if (stub->needs_null_check()) {
+    __ testptr(res, res);
+    __ jcc(Assembler::zero, done);
+  }
+
+  write_barrier(ce->masm(), res);
+
+  __ bind(done);
+  __ jmp(*stub->continuation());
+}
+
+#undef __
+
+#define __ sasm->
+
+void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+  __ prologue("shenandoah_pre_barrier", false);
+  // arg0 : previous value of memory
+
+  __ push(rax);
+  __ push(rdx);
+
+  const Register pre_val = rax;
+  const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+  const Register tmp = rdx;
+
+  NOT_LP64(__ get_thread(thread);)
+
+  Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+  Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+  Label done;
+  Label runtime;
+
+  // Is SATB still active?
+  Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL);
+  __ jcc(Assembler::zero, done);
+
+  // Can we store original value in the thread's buffer?
+
+  __ movptr(tmp, queue_index);
+  __ testptr(tmp, tmp);
+  __ jcc(Assembler::zero, runtime);
+  __ subptr(tmp, wordSize);
+  __ movptr(queue_index, tmp);
+  __ addptr(tmp, buffer);
+
+  // prev_val (rax)
+  __ load_parameter(0, pre_val);
+  __ movptr(Address(tmp, 0), pre_val);
+  __ jmp(done);
+
+  __ bind(runtime);
+
+  __ save_live_registers_no_oop_map(true);
+
+  // load the pre-value
+  __ load_parameter(0, rcx);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), rcx, thread);
+
+  __ restore_live_registers(true);
+
+  __ bind(done);
+
+  __ pop(rdx);
+  __ pop(rax);
+
+  __ epilogue();
+}
+
+#undef __
+
+#endif // COMPILER1
+
+address ShenandoahBarrierSetAssembler::shenandoah_wb() {
+  assert(_shenandoah_wb != NULL, "need write barrier stub");
+  return _shenandoah_wb;
+}
+
+#define __ cgen->assembler()->
+
+address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
+  __ align(CodeEntryAlignment);
+  StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
+  address start = __ pc();
+
+#ifdef _LP64
+  Label not_done;
+
+  // We use RDI, which also serves as argument register for slow call.
+  // RAX always holds the src object ptr, except after the slow call and
+  // the cmpxchg, then it holds the result.
+  // R8 and RCX are used as temporary registers.
+  __ push(rdi);
+  __ push(r8);
+
+  // Check for object beeing in the collection set.
+  // TODO: Can we use only 1 register here?
+  // The source object arrives here in rax.
+  // live: rax
+  // live: rdi
+  __ mov(rdi, rax);
+  __ shrptr(rdi, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+  // live: r8
+  __ movptr(r8, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
+  __ movbool(r8, Address(r8, rdi, Address::times_1));
+  // unlive: rdi
+  __ testbool(r8);
+  // unlive: r8
+  __ jccb(Assembler::notZero, not_done);
+
+  __ pop(r8);
+  __ pop(rdi);
+  __ ret(0);
+
+  __ bind(not_done);
+
+  __ push(rcx);
+  __ push(rdx);
+  __ push(rdi);
+  __ push(rsi);
+  __ push(r8);
+  __ push(r9);
+  __ push(r10);
+  __ push(r11);
+  __ push(r12);
+  __ push(r13);
+  __ push(r14);
+  __ push(r15);
+  save_vector_registers(cgen->assembler());
+  __ movptr(rdi, rax);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), rdi);
+  restore_vector_registers(cgen->assembler());
+  __ pop(r15);
+  __ pop(r14);
+  __ pop(r13);
+  __ pop(r12);
+  __ pop(r11);
+  __ pop(r10);
+  __ pop(r9);
+  __ pop(r8);
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(rdx);
+  __ pop(rcx);
+
+  __ pop(r8);
+  __ pop(rdi);
+  __ ret(0);
+#else
+  ShouldNotReachHere();
+#endif
+  return start;
+}
+
+#undef __
+
+void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
+  if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
+    int stub_code_size = 4096;
+    ResourceMark rm;
+    BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
+    CodeBuffer buf(bb);
+    StubCodeGenerator cgen(&buf);
+    _shenandoah_wb = generate_shenandoah_wb(&cgen);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+#ifdef COMPILER1
+class LIR_Assembler;
+class ShenandoahPreBarrierStub;
+class ShenandoahWriteBarrierStub;
+class StubAssembler;
+class StubCodeGenerator;
+#endif
+
+class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
+private:
+
+  static address _shenandoah_wb;
+
+  void satb_write_barrier_pre(MacroAssembler* masm,
+                              Register obj,
+                              Register pre_val,
+                              Register thread,
+                              Register tmp,
+                              bool tosca_live,
+                              bool expand_call);
+
+  void shenandoah_write_barrier_pre(MacroAssembler* masm,
+                                    Register obj,
+                                    Register pre_val,
+                                    Register thread,
+                                    Register tmp,
+                                    bool tosca_live,
+                                    bool expand_call);
+
+  void read_barrier(MacroAssembler* masm, Register dst);
+  void read_barrier_impl(MacroAssembler* masm, Register dst);
+
+  void read_barrier_not_null(MacroAssembler* masm, Register dst);
+  void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
+
+  void write_barrier(MacroAssembler* masm, Register dst);
+  void write_barrier_impl(MacroAssembler* masm, Register dst);
+
+  void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
+  void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp);
+
+  address generate_shenandoah_wb(StubCodeGenerator* cgen);
+
+  void save_vector_registers(MacroAssembler* masm);
+  void restore_vector_registers(MacroAssembler* masm);
+
+public:
+  static address shenandoah_wb();
+
+#ifdef COMPILER1
+  void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
+  void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
+  void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
+  void cmpxchg_oop(MacroAssembler* masm,
+                   Register res, Address addr, Register oldval, Register newval,
+                   bool exchange, bool encode, Register tmp1, Register tmp2);
+  virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                                  Register src, Register dst, Register count);
+  virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                                  Register src, Register dst, Register count);
+  virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                       Register dst, Address src, Register tmp1, Register tmp_thread);
+  virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+                        Address dst, Register val, Register tmp1, Register tmp2);
+
+#ifndef _LP64
+  virtual void obj_equals(MacroAssembler* masm,
+                          Address obj1, jobject obj2);
+  virtual void obj_equals(MacroAssembler* masm,
+                          Register obj1, jobject obj2);
+#endif
+
+  virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
+  virtual void obj_equals(MacroAssembler* masm, Register src1, Address src2);
+
+  virtual void tlab_allocate(MacroAssembler* masm,
+                             Register thread, Register obj,
+                             Register var_size_in_bytes,
+                             int con_size_in_bytes,
+                             Register t1, Register t2,
+                             Label& slow_case);
+
+  virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
+
+  virtual void barrier_stubs_init();
+
+};
+
+#endif // CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
+
+void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
+  Register addr = _addr->as_register_lo();
+  Register newval = _new_value->as_register();
+  Register cmpval = _cmp_value->as_register();
+  Register tmp1 = _tmp1->as_register();
+  Register tmp2 = _tmp2->as_register();
+  assert(cmpval == rax, "wrong register");
+  assert(newval != NULL, "new val must be register");
+  assert(cmpval != newval, "cmp and new values must be in different registers");
+  assert(cmpval != addr, "cmp and addr must be in different registers");
+  assert(newval != addr, "new value and addr must be in different registers");
+  ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), NULL, Address(addr, 0), cmpval, newval, true, true, tmp1, tmp2);
+}
+
+#ifdef ASSERT
+#define __ gen->lir(__FILE__, __LINE__)->
+#else
+#define __ gen->lir()->
+#endif
+
+LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+
+  if (access.is_oop()) {
+    LIRGenerator* gen = access.gen();
+    if (ShenandoahSATBBarrier) {
+      pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
+                  LIR_OprFact::illegalOpr /* pre_val */);
+    }
+    if (ShenandoahCASBarrier) {
+      cmp_value.load_item_force(FrameMap::rax_oop_opr);
+      new_value.load_item();
+
+      LIR_Opr t1 = gen->new_register(T_OBJECT);
+      LIR_Opr t2 = gen->new_register(T_OBJECT);
+      LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
+
+      __ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, LIR_OprFact::illegalOpr));
+
+      LIR_Opr result = gen->new_register(T_INT);
+      __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
+               result, T_INT);
+      return result;
+    }
+  }
+  return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+}
+
+LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+  LIRGenerator* gen = access.gen();
+  BasicType type = access.type();
+
+  LIR_Opr result = gen->new_register(type);
+  value.load_item();
+  LIR_Opr value_opr = value.result();
+
+  if (access.is_oop()) {
+    value_opr = storeval_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators());
+  }
+
+  // Because we want a 2-arg form of xchg and xadd
+  __ move(value_opr, result);
+
+  assert(type == T_INT || type == T_OBJECT || type == T_ARRAY LP64_ONLY( || type == T_LONG ), "unexpected type");
+  __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
+
+  if (access.is_oop()) {
+    if (ShenandoahSATBBarrier) {
+      pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
+                  result /* pre_val */);
+    }
+  }
+
+  return result;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,152 @@
+//
+// Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+
+source_hpp %{
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+%}
+
+instruct shenandoahRB(rRegP dst, rRegP src, rFlagsReg cr) %{
+  match(Set dst (ShenandoahReadBarrier src));
+  effect(DEF dst, USE src);
+  ins_cost(125); // XXX
+  format %{ "shenandoah_rb $dst, $src" %}
+  ins_encode %{
+    Register d = $dst$$Register;
+    Register s = $src$$Register;
+    __ movptr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
+  %}
+  ins_pipe(ialu_reg_mem);
+%}
+
+instruct shenandoahRBNarrow(rRegP dst, rRegN src) %{
+  predicate(UseCompressedOops && (Universe::narrow_oop_shift() == 0));
+  match(Set dst (ShenandoahReadBarrier (DecodeN src)));
+  effect(DEF dst, USE src);
+  ins_cost(125); // XXX
+  format %{ "shenandoah_rb $dst, $src" %}
+  ins_encode %{
+    Register d = $dst$$Register;
+    Register s = $src$$Register;
+    __ movptr(d, Address(r12, s, Address::times_1, ShenandoahBrooksPointer::byte_offset()));
+  %}
+  ins_pipe(ialu_reg_mem);
+%}
+
+instruct shenandoahRBNarrowShift(rRegP dst, rRegN src) %{
+  predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
+  match(Set dst (ShenandoahReadBarrier (DecodeN src)));
+  effect(DEF dst, USE src);
+  ins_cost(125); // XXX
+  format %{ "shenandoah_rb $dst, $src" %}
+  ins_encode %{
+    Register d = $dst$$Register;
+    Register s = $src$$Register;
+    __ movptr(d, Address(r12, s, Address::times_8, ShenandoahBrooksPointer::byte_offset()));
+  %}
+  ins_pipe(ialu_reg_mem);
+%}
+
+instruct compareAndSwapP_shenandoah(rRegI res,
+                                    memory mem_ptr,
+                                    rRegP tmp1, rRegP tmp2,
+                                    rax_RegP oldval, rRegP newval,
+                                    rFlagsReg cr)
+%{
+  predicate(VM_Version::supports_cx8());
+  match(Set res (ShenandoahCompareAndSwapP mem_ptr (Binary oldval newval)));
+  match(Set res (ShenandoahWeakCompareAndSwapP mem_ptr (Binary oldval newval)));
+  effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval);
+
+  format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
+
+  ins_encode %{
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+                                                   $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
+                                                   false, // swap
+                                                   false, $tmp1$$Register, $tmp2$$Register
+                                                   );
+  %}
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndSwapN_shenandoah(rRegI res,
+                                    memory mem_ptr,
+                                    rRegP tmp1, rRegP tmp2,
+                                    rax_RegN oldval, rRegN newval,
+                                    rFlagsReg cr) %{
+  match(Set res (ShenandoahCompareAndSwapN mem_ptr (Binary oldval newval)));
+  match(Set res (ShenandoahWeakCompareAndSwapN mem_ptr (Binary oldval newval)));
+  effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval);
+
+  format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
+
+  ins_encode %{
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+                                                   $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
+                                                   false, // swap
+                                                   false, $tmp1$$Register, $tmp2$$Register
+                                                   );
+  %}
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndExchangeN_shenandoah(memory mem_ptr,
+                                        rax_RegN oldval, rRegN newval,
+                                        rRegP tmp1, rRegP tmp2,
+                                        rFlagsReg cr) %{
+  match(Set oldval (ShenandoahCompareAndExchangeN mem_ptr (Binary oldval newval)));
+  effect(TEMP tmp1, TEMP tmp2, KILL cr);
+
+  format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
+
+  ins_encode %{
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+                                                   NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
+                                                   true, // exchange
+                                                   false, $tmp1$$Register, $tmp2$$Register
+                                                   );
+  %}
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndExchangeP_shenandoah(memory mem_ptr,
+                                        rax_RegP oldval, rRegP newval,
+                                        rRegP tmp1, rRegP tmp2,
+                                        rFlagsReg cr)
+%{
+  predicate(VM_Version::supports_cx8());
+  match(Set oldval (ShenandoahCompareAndExchangeP mem_ptr (Binary oldval newval)));
+  effect(KILL cr, TEMP tmp1, TEMP tmp2);
+  ins_cost(1000);
+
+  format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
+
+  ins_encode %{
+    ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+                                                   NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
+                                                   true,  // exchange
+                                                   false, $tmp1$$Register, $tmp2$$Register
+                                                   );
+  %}
+  ins_pipe( pipe_cmpxchg );
+%}
--- a/src/hotspot/share/adlc/formssel.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/adlc/formssel.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -775,7 +775,10 @@
        !strcmp(_matrule->_rChild->_opType,"GetAndSetP")   ||
        !strcmp(_matrule->_rChild->_opType,"GetAndSetN")   ||
        !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") ||
-       !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN")))  return true;
+       !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") ||
+       !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
+       !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
+       !strcmp(_matrule->_rChild->_opType,"ShenandoahReadBarrier")))  return true;
   else if ( is_ideal_load() == Form::idealP )                return true;
   else if ( is_ideal_store() != Form::none  )                return true;
 
@@ -3498,10 +3501,12 @@
     "CompareAndSwapB", "CompareAndSwapS", "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
     "WeakCompareAndSwapB", "WeakCompareAndSwapS", "WeakCompareAndSwapI", "WeakCompareAndSwapL", "WeakCompareAndSwapP", "WeakCompareAndSwapN",
     "CompareAndExchangeB", "CompareAndExchangeS", "CompareAndExchangeI", "CompareAndExchangeL", "CompareAndExchangeP", "CompareAndExchangeN",
+    "ShenandoahCompareAndSwapN", "ShenandoahCompareAndSwapP", "ShenandoahWeakCompareAndSwapP", "ShenandoahWeakCompareAndSwapN", "ShenandoahCompareAndExchangeP", "ShenandoahCompareAndExchangeN",
     "StoreCM",
     "ClearArray",
     "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
     "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
+    "ShenandoahReadBarrier",
     "LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
   };
   int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -742,3 +742,27 @@
     ik->do_local_static_fields(&sffp);
   }
 }
+
+#ifdef ASSERT
+bool ciInstanceKlass::debug_final_field_at(int offset) {
+  GUARDED_VM_ENTRY(
+    InstanceKlass* ik = get_instanceKlass();
+    fieldDescriptor fd;
+    if (ik->find_field_from_offset(offset, false, &fd)) {
+      return fd.is_final();
+    }
+  );
+  return false;
+}
+
+bool ciInstanceKlass::debug_stable_field_at(int offset) {
+  GUARDED_VM_ENTRY(
+    InstanceKlass* ik = get_instanceKlass();
+    fieldDescriptor fd;
+    if (ik->find_field_from_offset(offset, false, &fd)) {
+      return fd.is_stable();
+    }
+  );
+  return false;
+}
+#endif
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -274,6 +274,11 @@
 
   // Dump the current state of this klass for compilation replay.
   virtual void dump_replay_data(outputStream* out);
+
+#ifdef ASSERT
+  bool debug_final_field_at(int offset);
+  bool debug_stable_field_at(int offset);
+#endif
 };
 
 #endif // SHARE_VM_CI_CIINSTANCEKLASS_HPP
--- a/src/hotspot/share/code/codeCache.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/code/codeCache.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -75,6 +75,7 @@
 class ExceptionCache;
 class KlassDepChange;
 class OopClosure;
+class ShenandoahParallelCodeHeapIterator;
 
 class CodeCache : AllStatic {
   friend class VMStructs;
@@ -82,6 +83,7 @@
   template <class T, class Filter> friend class CodeBlobIterator;
   friend class WhiteBox;
   friend class CodeCacheLoader;
+  friend class ShenandoahParallelCodeHeapIterator;
  private:
   // CodeHeaps of the cache
   static GrowableArray<CodeHeap*>* _heaps;
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -32,6 +32,7 @@
   f(CardTableBarrierSet)                             \
   EPSILONGC_ONLY(f(EpsilonBarrierSet))               \
   G1GC_ONLY(f(G1BarrierSet))                         \
+  SHENANDOAHGC_ONLY(f(ShenandoahBarrierSet))         \
   ZGC_ONLY(f(ZBarrierSet))
 
 #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f)          \
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -36,6 +36,9 @@
 #if INCLUDE_G1GC
 #include "gc/g1/g1BarrierSet.inline.hpp"
 #endif
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#endif
 #if INCLUDE_ZGC
 #include "gc/z/zBarrierSet.inline.hpp"
 #endif
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -314,6 +314,8 @@
   virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {}
 
   virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const { return NULL; }
+  virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { return false; }
+  virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const { return false; }
 };
 
 #endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -90,6 +90,7 @@
 //     CMSHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
+//   ShenandoahHeap
 //   ZCollectedHeap
 //
 class CollectedHeap : public CHeapObj<mtInternal> {
@@ -176,7 +177,8 @@
     CMS,
     G1,
     Epsilon,
-    Z
+    Z,
+    Shenandoah
   };
 
   static inline size_t filler_array_max_size() {
--- a/src/hotspot/share/gc/shared/gcCause.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/gcCause.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -111,6 +111,21 @@
     case _dcmd_gc_run:
       return "Diagnostic Command";
 
+    case _shenandoah_allocation_failure_evac:
+      return "Allocation Failure During Evacuation";
+
+    case _shenandoah_stop_vm:
+      return "Stopping VM";
+
+    case _shenandoah_concurrent_gc:
+      return "Concurrent GC";
+
+    case _shenandoah_traversal_gc:
+      return "Traversal GC";
+
+    case _shenandoah_upgrade_to_full_gc:
+      return "Upgrade To Full GC";
+
     case _z_timer:
       return "Timer";
 
--- a/src/hotspot/share/gc/shared/gcCause.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/gcCause.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -80,6 +80,12 @@
 
     _dcmd_gc_run,
 
+    _shenandoah_stop_vm,
+    _shenandoah_allocation_failure_evac,
+    _shenandoah_concurrent_gc,
+    _shenandoah_traversal_gc,
+    _shenandoah_upgrade_to_full_gc,
+
     _z_timer,
     _z_warmup,
     _z_allocation_rate,
@@ -123,7 +129,8 @@
     // _allocation_failure is the generic cause a collection for allocation failure
     // _adaptive_size_policy is for a collecton done before a full GC
     return (cause == GCCause::_allocation_failure ||
-            cause == GCCause::_adaptive_size_policy);
+            cause == GCCause::_adaptive_size_policy ||
+            cause == GCCause::_shenandoah_allocation_failure_evac);
   }
 
   // Return a string describing the GCCause.
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -43,6 +43,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serialArguments.hpp"
 #endif
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/shenandoahArguments.hpp"
+#endif
 #if INCLUDE_ZGC
 #include "gc/z/zArguments.hpp"
 #endif
@@ -57,23 +60,25 @@
       _flag(flag), _name(name), _arguments(arguments), _hs_err_name(hs_err_name) {}
 };
 
-     CMSGC_ONLY(static CMSArguments      cmsArguments;)
- EPSILONGC_ONLY(static EpsilonArguments  epsilonArguments;)
-      G1GC_ONLY(static G1Arguments       g1Arguments;)
-PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
-  SERIALGC_ONLY(static SerialArguments   serialArguments;)
-       ZGC_ONLY(static ZArguments        zArguments;)
+       CMSGC_ONLY(static CMSArguments      cmsArguments;)
+   EPSILONGC_ONLY(static EpsilonArguments  epsilonArguments;)
+        G1GC_ONLY(static G1Arguments       g1Arguments;)
+  PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
+    SERIALGC_ONLY(static SerialArguments   serialArguments;)
+SHENANDOAHGC_ONLY(static ShenandoahArguments shenandoahArguments;)
+         ZGC_ONLY(static ZArguments        zArguments;)
 
 // Table of supported GCs, for translating between command
 // line flag, CollectedHeap::Name and GCArguments instance.
 static const SupportedGC SupportedGCs[] = {
-       CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,      cmsArguments,      "concurrent mark sweep gc"))
-   EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC,       CollectedHeap::Epsilon,  epsilonArguments,  "epsilon gc"))
-        G1GC_ONLY_ARG(SupportedGC(UseG1GC,            CollectedHeap::G1,       g1Arguments,       "g1 gc"))
-  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"))
-  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"))
-    SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"))
-         ZGC_ONLY_ARG(SupportedGC(UseZGC,             CollectedHeap::Z,        zArguments,        "z gc"))
+       CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,        cmsArguments,        "concurrent mark sweep gc"))
+   EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC,       CollectedHeap::Epsilon,    epsilonArguments,    "epsilon gc"))
+        G1GC_ONLY_ARG(SupportedGC(UseG1GC,            CollectedHeap::G1,         g1Arguments,         "g1 gc"))
+  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel,   parallelArguments,   "parallel gc"))
+  PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel,   parallelArguments,   "parallel gc"))
+    SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC,        CollectedHeap::Serial,     serialArguments,     "serial gc"))
+SHENANDOAHGC_ONLY_ARG(SupportedGC(UseShenandoahGC,    CollectedHeap::Shenandoah, shenandoahArguments, "shenandoah gc"))
+         ZGC_ONLY_ARG(SupportedGC(UseZGC,             CollectedHeap::Z,          zArguments,          "z gc"))
 };
 
 #define FOR_EACH_SUPPORTED_GC(var)                                          \
@@ -90,14 +95,15 @@
 bool GCConfig::_gc_selected_ergonomically = false;
 
 void GCConfig::fail_if_unsupported_gc_is_selected() {
-  NOT_CMSGC(     FAIL_IF_SELECTED(UseConcMarkSweepGC, true));
-  NOT_EPSILONGC( FAIL_IF_SELECTED(UseEpsilonGC,       true));
-  NOT_G1GC(      FAIL_IF_SELECTED(UseG1GC,            true));
-  NOT_PARALLELGC(FAIL_IF_SELECTED(UseParallelGC,      true));
-  NOT_PARALLELGC(FAIL_IF_SELECTED(UseParallelOldGC,   true));
-  NOT_SERIALGC(  FAIL_IF_SELECTED(UseSerialGC,        true));
-  NOT_SERIALGC(  FAIL_IF_SELECTED(UseParallelOldGC,   false));
-  NOT_ZGC(       FAIL_IF_SELECTED(UseZGC,             true));
+  NOT_CMSGC(       FAIL_IF_SELECTED(UseConcMarkSweepGC, true));
+  NOT_EPSILONGC(   FAIL_IF_SELECTED(UseEpsilonGC,       true));
+  NOT_G1GC(        FAIL_IF_SELECTED(UseG1GC,            true));
+  NOT_PARALLELGC(  FAIL_IF_SELECTED(UseParallelGC,      true));
+  NOT_PARALLELGC(  FAIL_IF_SELECTED(UseParallelOldGC,   true));
+  NOT_SERIALGC(    FAIL_IF_SELECTED(UseSerialGC,        true));
+  NOT_SERIALGC(    FAIL_IF_SELECTED(UseParallelOldGC,   false));
+  NOT_SHENANDOAHGC(FAIL_IF_SELECTED(UseShenandoahGC,    true));
+  NOT_ZGC(         FAIL_IF_SELECTED(UseZGC,             true));
 }
 
 void GCConfig::select_gc_ergonomically() {
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -43,7 +43,7 @@
     return ParNew;
   }
 
-  if (UseZGC) {
+  if (UseZGC || UseShenandoahGC) {
     return NA;
   }
 
@@ -67,6 +67,10 @@
     return Z;
   }
 
+  if (UseShenandoahGC) {
+    return Shenandoah;
+  }
+
   return SerialOld;
 }
 
--- a/src/hotspot/share/gc/shared/gcName.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
   G1Old,
   G1Full,
   Z,
+  Shenandoah,
   NA,
   GCNameEndSentinel
 };
@@ -58,6 +59,7 @@
       case G1Old: return "G1Old";
       case G1Full: return "G1Full";
       case Z: return "Z";
+      case Shenandoah: return "Shenandoah";
       case NA: return "N/A";
       default: ShouldNotReachHere(); return NULL;
     }
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -41,6 +41,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serial_globals.hpp"
 #endif
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/shenandoah_globals.hpp"
+#endif
 #if INCLUDE_ZGC
 #include "gc/z/z_globals.hpp"
 #endif
@@ -140,6 +143,22 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
+  SHENANDOAHGC_ONLY(GC_SHENANDOAH_FLAGS(                                    \
+    develop,                                                                \
+    develop_pd,                                                             \
+    product,                                                                \
+    product_pd,                                                             \
+    diagnostic,                                                             \
+    diagnostic_pd,                                                          \
+    experimental,                                                           \
+    notproduct,                                                             \
+    manageable,                                                             \
+    product_rw,                                                             \
+    lp64_product,                                                           \
+    range,                                                                  \
+    constraint,                                                             \
+    writeable))                                                             \
+                                                                            \
   ZGC_ONLY(GC_Z_FLAGS(                                                      \
     develop,                                                                \
     develop_pd,                                                             \
@@ -179,6 +198,9 @@
   experimental(bool, UseZGC, false,                                         \
           "Use the Z garbage collector")                                    \
                                                                             \
+  experimental(bool, UseShenandoahGC, false,                                \
+          "Use the Shenandoah garbage collector")                           \
+                                                                            \
   product(uint, ParallelGCThreads, 0,                                       \
           "Number of parallel threads parallel gc will use")                \
           constraint(ParallelGCThreadsConstraintFunc,AfterErgo)             \
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1156,7 +1156,7 @@
       // Check assumption that an object is not potentially
       // discovered twice except by concurrent collectors that potentially
       // trace the same Reference object twice.
-      assert(UseConcMarkSweepGC || UseG1GC,
+      assert(UseConcMarkSweepGC || UseG1GC || UseShenandoahGC,
              "Only possible with a concurrent marking collector");
       return true;
     }
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -50,6 +50,9 @@
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/vmStructs_serial.hpp"
 #endif
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/vmStructs_shenandoah.hpp"
+#endif
 #if INCLUDE_ZGC
 #include "gc/z/vmStructs_z.hpp"
 #endif
@@ -73,6 +76,9 @@
   SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                 \
                                     volatile_nonstatic_field,                                                                        \
                                     static_field))                                                                                   \
+  SHENANDOAHGC_ONLY(VM_STRUCTS_SHENANDOAH(nonstatic_field,                                                                           \
+                               volatile_nonstatic_field,                                                                             \
+                               static_field))                                                                                        \
   ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field,                                                                                           \
                           volatile_nonstatic_field,                                                                                  \
                           static_field))                                                                                             \
@@ -178,6 +184,9 @@
   SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type,                           \
                                   declare_toplevel_type,                  \
                                   declare_integer_type))                  \
+  SHENANDOAHGC_ONLY(VM_TYPES_SHENANDOAH(declare_type,                     \
+                             declare_toplevel_type,                       \
+                             declare_integer_type))                       \
   ZGC_ONLY(VM_TYPES_ZGC(declare_type,                                     \
                         declare_toplevel_type,                            \
                         declare_integer_type))                            \
@@ -253,6 +262,8 @@
                                               declare_constant_with_value)) \
   SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant,                 \
                                           declare_constant_with_value))     \
+  SHENANDOAHGC_ONLY(VM_INT_CONSTANTS_SHENANDOAH(declare_constant,           \
+                                     declare_constant_with_value))          \
   ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant,                           \
                                 declare_constant_with_value))               \
                                                                             \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_IR.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
+
+#ifndef PATCHED_ADDR
+#define PATCHED_ADDR  (max_jint)
+#endif
+
+#ifdef ASSERT
+#define __ gen->lir(__FILE__, __LINE__)->
+#else
+#define __ gen->lir()->
+#endif
+
+void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
+  ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->gen_pre_barrier_stub(ce, this);
+}
+
+void ShenandoahWriteBarrierStub::emit_code(LIR_Assembler* ce) {
+  ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->gen_write_barrier_stub(ce, this);
+}
+
+void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
+  // First we test whether marking is in progress.
+  BasicType flag_type;
+  bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
+  bool do_load = pre_val == LIR_OprFact::illegalOpr;
+  if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+    flag_type = T_INT;
+  } else {
+    guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
+              "Assumption");
+    // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
+    // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
+    flag_type = T_BOOLEAN;
+  }
+  LIR_Opr thrd = gen->getThreadPointer();
+  LIR_Address* mark_active_flag_addr =
+    new LIR_Address(thrd,
+                    in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()),
+                    flag_type);
+  // Read the marking-in-progress flag.
+  LIR_Opr flag_val = gen->new_register(T_INT);
+  __ load(mark_active_flag_addr, flag_val);
+  __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
+
+  LIR_PatchCode pre_val_patch_code = lir_patch_none;
+
+  CodeStub* slow;
+
+  if (do_load) {
+    assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
+    assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
+
+    if (patch)
+      pre_val_patch_code = lir_patch_normal;
+
+    pre_val = gen->new_register(T_OBJECT);
+
+    if (!addr_opr->is_address()) {
+      assert(addr_opr->is_register(), "must be");
+      addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
+    }
+    slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL);
+  } else {
+    assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
+    assert(pre_val->is_register(), "must be");
+    assert(pre_val->type() == T_OBJECT, "must be an object");
+
+    slow = new ShenandoahPreBarrierStub(pre_val);
+  }
+
+  __ branch(lir_cond_notEqual, T_INT, slow);
+  __ branch_destination(slow->continuation());
+}
+
+LIR_Opr ShenandoahBarrierSetC1::read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+  if (UseShenandoahGC && ShenandoahReadBarrier) {
+    return read_barrier_impl(gen, obj, info, need_null_check);
+  } else {
+    return obj;
+  }
+}
+
+LIR_Opr ShenandoahBarrierSetC1::read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+  assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "Should be enabled");
+  LabelObj* done = new LabelObj();
+  LIR_Opr result = gen->new_register(T_OBJECT);
+  __ move(obj, result);
+  if (need_null_check) {
+    __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
+    __ branch(lir_cond_equal, T_LONG, done->label());
+  }
+  LIR_Address* brooks_ptr_address = gen->generate_address(result, ShenandoahBrooksPointer::byte_offset(), T_ADDRESS);
+  __ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none);
+
+  __ branch_destination(done->label());
+  return result;
+}
+
+LIR_Opr ShenandoahBarrierSetC1::write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+  if (UseShenandoahGC && ShenandoahWriteBarrier) {
+    return write_barrier_impl(gen, obj, info, need_null_check);
+  } else {
+    return obj;
+  }
+}
+
+LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
+  assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
+
+  obj = ensure_in_register(gen, obj);
+  assert(obj->is_register(), "must be a register at this point");
+  LIR_Opr result = gen->new_register(T_OBJECT);
+  __ move(obj, result);
+
+  LIR_Opr thrd = gen->getThreadPointer();
+  LIR_Address* active_flag_addr =
+    new LIR_Address(thrd,
+                    in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
+                    T_BYTE);
+  // Read and check the gc-state-flag.
+  LIR_Opr flag_val = gen->new_register(T_INT);
+  __ load(active_flag_addr, flag_val);
+  LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::HAS_FORWARDED |
+                                       ShenandoahHeap::EVACUATION |
+                                       ShenandoahHeap::TRAVERSAL);
+  LIR_Opr mask_reg = gen->new_register(T_INT);
+  __ move(mask, mask_reg);
+
+  if (TwoOperandLIRForm) {
+    __ logical_and(flag_val, mask_reg, flag_val);
+  } else {
+    LIR_Opr masked_flag = gen->new_register(T_INT);
+    __ logical_and(flag_val, mask_reg, masked_flag);
+    flag_val = masked_flag;
+  }
+  __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
+
+  CodeStub* slow = new ShenandoahWriteBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check);
+  __ branch(lir_cond_notEqual, T_INT, slow);
+  __ branch_destination(slow->continuation());
+
+  return result;
+}
+
+LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) {
+  if (!obj->is_register()) {
+    LIR_Opr obj_reg = gen->new_register(T_OBJECT);
+    if (obj->is_constant()) {
+      __ move(obj, obj_reg);
+    } else {
+      __ leal(obj, obj_reg);
+    }
+    obj = obj_reg;
+  }
+  return obj;
+}
+
+LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
+  bool need_null_check = (decorators & IS_NOT_NULL) == 0;
+  if (ShenandoahStoreValEnqueueBarrier) {
+    obj = write_barrier_impl(gen, obj, info, need_null_check);
+    pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
+  }
+  if (ShenandoahStoreValReadBarrier) {
+    obj = read_barrier_impl(gen, obj, info, true /*need_null_check*/);
+  }
+  return obj;
+}
+
+LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
+  DecoratorSet decorators = access.decorators();
+  bool is_array = (decorators & IS_ARRAY) != 0;
+  bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
+
+  bool is_write = (decorators & ACCESS_WRITE) != 0;
+  bool needs_null_check = (decorators & IS_NOT_NULL) == 0;
+
+  LIR_Opr base = access.base().item().result();
+  LIR_Opr offset = access.offset().opr();
+  LIRGenerator* gen = access.gen();
+
+  if (is_write) {
+    base = write_barrier(gen, base, access.access_emit_info(), needs_null_check);
+  } else {
+    base = read_barrier(gen, base, access.access_emit_info(), needs_null_check);
+  }
+
+  LIR_Opr addr_opr;
+  if (is_array) {
+    addr_opr = LIR_OprFact::address(gen->emit_array_address(base, offset, access.type()));
+  } else if (needs_patching) {
+    // we need to patch the offset in the instruction so don't allow
+    // generate_address to try to be smart about emitting the -1.
+    // Otherwise the patching code won't know how to find the
+    // instruction to patch.
+    addr_opr = LIR_OprFact::address(new LIR_Address(base, PATCHED_ADDR, access.type()));
+  } else {
+    addr_opr = LIR_OprFact::address(gen->generate_address(base, offset, 0, 0, access.type()));
+  }
+
+  if (resolve_in_register) {
+    LIR_Opr resolved_addr = gen->new_pointer_register();
+    __ leal(addr_opr, resolved_addr);
+    resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
+    return resolved_addr;
+  } else {
+    return addr_opr;
+  }
+}
+
+void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
+  if (access.is_oop()) {
+    if (ShenandoahSATBBarrier) {
+      pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
+    }
+    value = storeval_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
+  }
+  BarrierSetC1::store_at_resolved(access, value);
+}
+
+void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+  BarrierSetC1::load_at_resolved(access, result);
+
+  if (ShenandoahKeepAliveBarrier) {
+    DecoratorSet decorators = access.decorators();
+    bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
+    bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
+    bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
+    LIRGenerator *gen = access.gen();
+    if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
+      // Register the value in the referent field with the pre-barrier
+      LabelObj *Lcont_anonymous;
+      if (is_anonymous) {
+        Lcont_anonymous = new LabelObj();
+        generate_referent_check(access, Lcont_anonymous);
+      }
+      pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr /* addr_opr */,
+                  result /* pre_val */);
+      if (is_anonymous) {
+        __ branch_destination(Lcont_anonymous->label());
+      }
+    }
+  }
+}
+
+LIR_Opr ShenandoahBarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
+  return BarrierSetC1::atomic_add_at_resolved(access, value);
+}
+
+LIR_Opr ShenandoahBarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
+  bool is_write = decorators & ACCESS_WRITE;
+  if (is_write) {
+    return write_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
+  } else {
+    return read_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
+  }
+}
+
+class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
+  virtual OopMapSet* generate_code(StubAssembler* sasm) {
+    ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+    bs->generate_c1_pre_barrier_runtime_stub(sasm);
+    return NULL;
+  }
+};
+
+void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
+  C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
+  _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
+                                                              "shenandoah_pre_barrier_slow",
+                                                              false, &pre_code_gen_cl);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP
+#define SHARE_VM_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP
+
+#include "c1/c1_CodeStubs.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
+
+class ShenandoahPreBarrierStub: public CodeStub {
+  friend class ShenandoahBarrierSetC1;
+private:
+  bool _do_load;
+  LIR_Opr _addr;
+  LIR_Opr _pre_val;
+  LIR_PatchCode _patch_code;
+  CodeEmitInfo* _info;
+
+public:
+  // Version that _does_ generate a load of the previous value from addr.
+  // addr (the address of the field to be read) must be a LIR_Address
+  // pre_val (a temporary register) must be a register;
+  ShenandoahPreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
+    _do_load(true), _addr(addr), _pre_val(pre_val),
+    _patch_code(patch_code), _info(info)
+  {
+    assert(_pre_val->is_register(), "should be temporary register");
+    assert(_addr->is_address(), "should be the address of the field");
+  }
+
+  // Version that _does not_ generate load of the previous value; the
+  // previous value is assumed to have already been loaded into pre_val.
+  ShenandoahPreBarrierStub(LIR_Opr pre_val) :
+    _do_load(false), _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val),
+    _patch_code(lir_patch_none), _info(NULL)
+  {
+    assert(_pre_val->is_register(), "should be a register");
+  }
+
+  LIR_Opr addr() const { return _addr; }
+  LIR_Opr pre_val() const { return _pre_val; }
+  LIR_PatchCode patch_code() const { return _patch_code; }
+  CodeEmitInfo* info() const { return _info; }
+  bool do_load() const { return _do_load; }
+
+  virtual void emit_code(LIR_Assembler* e);
+  virtual void visit(LIR_OpVisitState* visitor) {
+    if (_do_load) {
+      // don't pass in the code emit info since it's processed in the fast
+      // path
+      if (_info != NULL)
+        visitor->do_slow_case(_info);
+      else
+        visitor->do_slow_case();
+
+      visitor->do_input(_addr);
+      visitor->do_temp(_pre_val);
+    } else {
+      visitor->do_slow_case();
+      visitor->do_input(_pre_val);
+    }
+  }
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const { out->print("ShenandoahPreBarrierStub"); }
+#endif // PRODUCT
+};
+
+class ShenandoahWriteBarrierStub: public CodeStub {
+  friend class ShenandoahBarrierSetC1;
+private:
+  LIR_Opr _obj;
+  LIR_Opr _result;
+  CodeEmitInfo* _info;
+  bool _needs_null_check;
+
+public:
+  ShenandoahWriteBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) :
+    _obj(obj), _result(result), _info(info), _needs_null_check(needs_null_check)
+  {
+    assert(_obj->is_register(), "should be register");
+    assert(_result->is_register(), "should be register");
+  }
+
+  LIR_Opr obj() const { return _obj; }
+  LIR_Opr result() const { return _result; }
+  CodeEmitInfo* info() const { return _info; }
+  bool needs_null_check() const { return _needs_null_check; }
+
+  virtual void emit_code(LIR_Assembler* e);
+  virtual void visit(LIR_OpVisitState* visitor) {
+    visitor->do_slow_case();
+    visitor->do_input(_obj);
+    visitor->do_temp(_result);
+  }
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const { out->print("ShenandoahWritePreBarrierStub"); }
+#endif // PRODUCT
+};
+
+class LIR_OpShenandoahCompareAndSwap : public LIR_Op {
+ friend class LIR_OpVisitState;
+
+private:
+  LIR_Opr _addr;
+  LIR_Opr _cmp_value;
+  LIR_Opr _new_value;
+  LIR_Opr _tmp1;
+  LIR_Opr _tmp2;
+
+public:
+  LIR_OpShenandoahCompareAndSwap(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+                                 LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
+    : LIR_Op(lir_none, result, NULL)  // no info
+    , _addr(addr)
+    , _cmp_value(cmp_value)
+    , _new_value(new_value)
+    , _tmp1(t1)
+    , _tmp2(t2)                                  { }
+
+  LIR_Opr addr()        const                    { return _addr;  }
+  LIR_Opr cmp_value()   const                    { return _cmp_value; }
+  LIR_Opr new_value()   const                    { return _new_value; }
+  LIR_Opr tmp1()        const                    { return _tmp1;      }
+  LIR_Opr tmp2()        const                    { return _tmp2;      }
+
+  virtual void visit(LIR_OpVisitState* state) {
+      assert(_addr->is_valid(),      "used");
+      assert(_cmp_value->is_valid(), "used");
+      assert(_new_value->is_valid(), "used");
+      if (_info)                    state->do_info(_info);
+                                    state->do_input(_addr);
+                                    state->do_temp(_addr);
+                                    state->do_input(_cmp_value);
+                                    state->do_temp(_cmp_value);
+                                    state->do_input(_new_value);
+                                    state->do_temp(_new_value);
+      if (_tmp1->is_valid())        state->do_temp(_tmp1);
+      if (_tmp2->is_valid())        state->do_temp(_tmp2);
+      if (_result->is_valid())      state->do_output(_result);
+  }
+
+  virtual void emit_code(LIR_Assembler* masm);
+
+  virtual void print_instr(outputStream* out) const {
+    addr()->print(out);      out->print(" ");
+    cmp_value()->print(out); out->print(" ");
+    new_value()->print(out); out->print(" ");
+    tmp1()->print(out);      out->print(" ");
+    tmp2()->print(out);      out->print(" ");
+  }
+#ifndef PRODUCT
+  virtual const char* name() const {
+    return "shenandoah_cas_obj";
+  }
+#endif // PRODUCT
+};
+
+class ShenandoahBarrierSetC1 : public BarrierSetC1 {
+private:
+  CodeBlob* _pre_barrier_c1_runtime_code_blob;
+
+  void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val);
+
+  LIR_Opr read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
+  LIR_Opr write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
+  LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators);
+
+  LIR_Opr read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
+  LIR_Opr write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
+
+  LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj);
+
+public:
+  CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; }
+
+protected:
+  virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
+
+  virtual void store_at_resolved(LIRAccess& access, LIR_Opr value);
+  virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
+
+  virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+  virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
+  virtual LIR_Opr atomic_add_at_resolved(LIRAccess& access, LIRItem& value);
+
+public:
+  virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj);
+
+  virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob);
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahRuntime.hpp"
+#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#include "gc/shenandoah/c2/shenandoahSupport.hpp"
+#include "opto/arraycopynode.hpp"
+#include "opto/escape.hpp"
+#include "opto/graphKit.hpp"
+#include "opto/idealKit.hpp"
+#include "opto/macro.hpp"
+#include "opto/movenode.hpp"
+#include "opto/narrowptrnode.hpp"
+#include "opto/rootnode.hpp"
+
+ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
+  return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
+}
+
+ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
+  : _shenandoah_barriers(new (comp_arena) GrowableArray<ShenandoahWriteBarrierNode*>(comp_arena, 8,  0, NULL)) {
+}
+
+int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const {
+  return _shenandoah_barriers->length();
+}
+
+ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const {
+  return _shenandoah_barriers->at(idx);
+}
+
+void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
+  assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list");
+  _shenandoah_barriers->append(n);
+}
+
+void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
+  if (_shenandoah_barriers->contains(n)) {
+    _shenandoah_barriers->remove(n);
+  }
+}
+
+#define __ kit->
+
+Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const {
+  if (ShenandoahReadBarrier) {
+    obj = shenandoah_read_barrier_impl(kit, obj, false, true, true);
+  }
+  return obj;
+}
+
+Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const {
+  if (ShenandoahStoreValEnqueueBarrier) {
+    obj = shenandoah_write_barrier(kit, obj);
+    obj = shenandoah_enqueue_barrier(kit, obj);
+  }
+  if (ShenandoahStoreValReadBarrier) {
+    obj = shenandoah_read_barrier_impl(kit, obj, true, false, false);
+  }
+  return obj;
+}
+
+Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const {
+  const Type* obj_type = obj->bottom_type();
+  if (obj_type->higher_equal(TypePtr::NULL_PTR)) {
+    return obj;
+  }
+  const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
+  Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory();
+
+  if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) {
+    // We know it is null, no barrier needed.
+    return obj;
+  }
+
+  if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
+
+    // We don't know if it's null or not. Need null-check.
+    enum { _not_null_path = 1, _null_path, PATH_LIMIT };
+    RegionNode* region = new RegionNode(PATH_LIMIT);
+    Node*       phi    = new PhiNode(region, obj_type);
+    Node* null_ctrl = __ top();
+    Node* not_null_obj = __ null_check_oop(obj, &null_ctrl);
+
+    region->init_req(_null_path, null_ctrl);
+    phi   ->init_req(_null_path, __ zerocon(T_OBJECT));
+
+    Node* ctrl = use_ctrl ? __ control() : NULL;
+    ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace);
+    Node* n = __ gvn().transform(rb);
+
+    region->init_req(_not_null_path, __ control());
+    phi   ->init_req(_not_null_path, n);
+
+    __ set_control(__ gvn().transform(region));
+    __ record_for_igvn(region);
+    return __ gvn().transform(phi);
+
+  } else {
+    // We know it is not null. Simple barrier is sufficient.
+    Node* ctrl = use_ctrl ? __ control() : NULL;
+    ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace);
+    Node* n = __ gvn().transform(rb);
+    __ record_for_igvn(n);
+    return n;
+  }
+}
+
+Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const {
+  ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj);
+  Node* n = __ gvn().transform(wb);
+  if (n == wb) { // New barrier needs memory projection.
+    Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n));
+    __ set_memory(proj, adr_type);
+  }
+  return n;
+}
+
+Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const {
+  if (ShenandoahWriteBarrier) {
+    obj = shenandoah_write_barrier_impl(kit, obj);
+  }
+  return obj;
+}
+
+Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const {
+  if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) {
+    return obj;
+  }
+  const Type* obj_type = obj->bottom_type();
+  const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
+  Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type);
+  __ record_for_igvn(n);
+  return n;
+}
+
+bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
+                                                         BasicType bt, uint adr_idx) const {
+  intptr_t offset = 0;
+  Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
+  AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
+
+  if (offset == Type::OffsetBot) {
+    return false; // cannot unalias unless there are precise offsets
+  }
+
+  if (alloc == NULL) {
+    return false; // No allocation found
+  }
+
+  intptr_t size_in_bytes = type2aelembytes(bt);
+
+  Node* mem = __ memory(adr_idx); // start searching here...
+
+  for (int cnt = 0; cnt < 50; cnt++) {
+
+    if (mem->is_Store()) {
+
+      Node* st_adr = mem->in(MemNode::Address);
+      intptr_t st_offset = 0;
+      Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
+
+      if (st_base == NULL) {
+        break; // inscrutable pointer
+      }
+
+      // Break we have found a store with same base and offset as ours so break
+      if (st_base == base && st_offset == offset) {
+        break;
+      }
+
+      if (st_offset != offset && st_offset != Type::OffsetBot) {
+        const int MAX_STORE = BytesPerLong;
+        if (st_offset >= offset + size_in_bytes ||
+            st_offset <= offset - MAX_STORE ||
+            st_offset <= offset - mem->as_Store()->memory_size()) {
+          // Success:  The offsets are provably independent.
+          // (You may ask, why not just test st_offset != offset and be done?
+          // The answer is that stores of different sizes can co-exist
+          // in the same sequence of RawMem effects.  We sometimes initialize
+          // a whole 'tile' of array elements with a single jint or jlong.)
+          mem = mem->in(MemNode::Memory);
+          continue; // advance through independent store memory
+        }
+      }
+
+      if (st_base != base
+          && MemNode::detect_ptr_independence(base, alloc, st_base,
+                                              AllocateNode::Ideal_allocation(st_base, phase),
+                                              phase)) {
+        // Success:  The bases are provably independent.
+        mem = mem->in(MemNode::Memory);
+        continue; // advance through independent store memory
+      }
+    } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
+
+      InitializeNode* st_init = mem->in(0)->as_Initialize();
+      AllocateNode* st_alloc = st_init->allocation();
+
+      // Make sure that we are looking at the same allocation site.
+      // The alloc variable is guaranteed to not be null here from earlier check.
+      if (alloc == st_alloc) {
+        // Check that the initialization is storing NULL so that no previous store
+        // has been moved up and directly write a reference
+        Node* captured_store = st_init->find_captured_store(offset,
+                                                            type2aelembytes(T_OBJECT),
+                                                            phase);
+        if (captured_store == NULL || captured_store == st_init->zero_memory()) {
+          return true;
+        }
+      }
+    }
+
+    // Unless there is an explicit 'continue', we must bail out here,
+    // because 'mem' is an inscrutable memory state (e.g., a call).
+    break;
+  }
+
+  return false;
+}
+
+#undef __
+#define __ ideal.
+
+void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
+                                                    bool do_load,
+                                                    Node* obj,
+                                                    Node* adr,
+                                                    uint alias_idx,
+                                                    Node* val,
+                                                    const TypeOopPtr* val_type,
+                                                    Node* pre_val,
+                                                    BasicType bt) const {
+  // Some sanity checks
+  // Note: val is unused in this routine.
+
+  if (do_load) {
+    // We need to generate the load of the previous value
+    assert(obj != NULL, "must have a base");
+    assert(adr != NULL, "where are loading from?");
+    assert(pre_val == NULL, "loaded already?");
+    assert(val_type != NULL, "need a type");
+
+    if (ReduceInitialCardMarks
+        && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
+      return;
+    }
+
+  } else {
+    // In this case both val_type and alias_idx are unused.
+    assert(pre_val != NULL, "must be loaded already");
+    // Nothing to be done if pre_val is null.
+    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
+    assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
+  }
+  assert(bt == T_OBJECT, "or we shouldn't be here");
+
+  IdealKit ideal(kit, true);
+
+  Node* tls = __ thread(); // ThreadLocalStorage
+
+  Node* no_base = __ top();
+  Node* zero  = __ ConI(0);
+  Node* zeroX = __ ConX(0);
+
+  float likely  = PROB_LIKELY(0.999);
+  float unlikely  = PROB_UNLIKELY(0.999);
+
+  // Offsets into the thread
+  const int index_offset   = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
+  const int buffer_offset  = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
+
+  // Now the actual pointers into the thread
+  Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
+  Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
+
+  // Now some of the values
+  Node* marking;
+  Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
+  Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
+  marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
+  assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape");
+
+  // if (!marking)
+  __ if_then(marking, BoolTest::ne, zero, unlikely); {
+    BasicType index_bt = TypeX_X->basic_type();
+    assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
+    Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
+
+    if (do_load) {
+      // load original value
+      // alias_idx correct??
+      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
+    }
+
+    // if (pre_val != NULL)
+    __ if_then(pre_val, BoolTest::ne, kit->null()); {
+      Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
+
+      // is the queue for this thread full?
+      __ if_then(index, BoolTest::ne, zeroX, likely); {
+
+        // decrement the index
+        Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
+
+        // Now get the buffer location we will log the previous value into and store it
+        Node *log_addr = __ AddP(no_base, buffer, next_index);
+        __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
+        // update the index
+        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
+
+      } __ else_(); {
+
+        // logging buffer is full, call the runtime
+        const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
+        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
+      } __ end_if();  // (!index)
+    } __ end_if();  // (pre_val != NULL)
+  } __ end_if();  // (!marking)
+
+  // Final sync IdealKit and GraphKit.
+  kit->final_sync(ideal);
+
+  if (ShenandoahSATBBarrier && adr != NULL) {
+    Node* c = kit->control();
+    Node* call = c->in(1)->in(1)->in(1)->in(0);
+    assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
+    call->add_req(adr);
+  }
+}
+
+bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
+  return call->is_CallLeaf() &&
+         call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry);
+}
+
+bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) {
+  return call->is_CallLeaf() &&
+         call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT);
+}
+
+bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
+  if (n->Opcode() != Op_If) {
+    return false;
+  }
+
+  Node* bol = n->in(1);
+  assert(bol->is_Bool(), "");
+  Node* cmpx = bol->in(1);
+  if (bol->as_Bool()->_test._test == BoolTest::ne &&
+      cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
+      is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
+      cmpx->in(1)->in(2)->is_Con() &&
+      cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) {
+    return true;
+  }
+
+  return false;
+}
+
+bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
+  if (!n->is_Load()) return false;
+  const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
+  return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
+         && n->in(2)->in(3)->is_Con()
+         && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
+}
+
+void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
+                                                          bool do_load,
+                                                          Node* obj,
+                                                          Node* adr,
+                                                          uint alias_idx,
+                                                          Node* val,
+                                                          const TypeOopPtr* val_type,
+                                                          Node* pre_val,
+                                                          BasicType bt) const {
+  if (ShenandoahSATBBarrier) {
+    IdealKit ideal(kit);
+    kit->sync_kit(ideal);
+
+    satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt);
+
+    ideal.sync_kit(kit);
+    kit->final_sync(ideal);
+  }
+}
+
+Node* ShenandoahBarrierSetC2::shenandoah_enqueue_barrier(GraphKit* kit, Node* pre_val) const {
+  return kit->gvn().transform(new ShenandoahEnqueueBarrierNode(pre_val));
+}
+
+// Helper that guards and inserts a pre-barrier.
+void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
+                                                Node* pre_val, bool need_mem_bar) const {
+  // We could be accessing the referent field of a reference object. If so, when G1
+  // is enabled, we need to log the value in the referent field in an SATB buffer.
+  // This routine performs some compile time filters and generates suitable
+  // runtime filters that guard the pre-barrier code.
+  // Also add memory barrier for non volatile load from the referent field
+  // to prevent commoning of loads across safepoint.
+
+  // Some compile time checks.
+
+  // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
+  const TypeX* otype = offset->find_intptr_t_type();
+  if (otype != NULL && otype->is_con() &&
+      otype->get_con() != java_lang_ref_Reference::referent_offset) {
+    // Constant offset but not the reference_offset so just return
+    return;
+  }
+
+  // We only need to generate the runtime guards for instances.
+  const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
+  if (btype != NULL) {
+    if (btype->isa_aryptr()) {
+      // Array type so nothing to do
+      return;
+    }
+
+    const TypeInstPtr* itype = btype->isa_instptr();
+    if (itype != NULL) {
+      // Can the klass of base_oop be statically determined to be
+      // _not_ a sub-class of Reference and _not_ Object?
+      ciKlass* klass = itype->klass();
+      if ( klass->is_loaded() &&
+          !klass->is_subtype_of(kit->env()->Reference_klass()) &&
+          !kit->env()->Object_klass()->is_subtype_of(klass)) {
+        return;
+      }
+    }
+  }
+
+  // The compile time filters did not reject base_oop/offset so
+  // we need to generate the following runtime filters
+  //
+  // if (offset == java_lang_ref_Reference::_reference_offset) {
+  //   if (instance_of(base, java.lang.ref.Reference)) {
+  //     pre_barrier(_, pre_val, ...);
+  //   }
+  // }
+
+  float likely   = PROB_LIKELY(  0.999);
+  float unlikely = PROB_UNLIKELY(0.999);
+
+  IdealKit ideal(kit);
+
+  Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
+
+  __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
+      // Update graphKit memory and control from IdealKit.
+      kit->sync_kit(ideal);
+
+      Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
+      Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
+
+      // Update IdealKit memory and control from graphKit.
+      __ sync_kit(kit);
+
+      Node* one = __ ConI(1);
+      // is_instof == 0 if base_oop == NULL
+      __ if_then(is_instof, BoolTest::eq, one, unlikely); {
+
+        // Update graphKit from IdeakKit.
+        kit->sync_kit(ideal);
+
+        // Use the pre-barrier to record the value in the referent field
+        satb_write_barrier_pre(kit, false /* do_load */,
+                               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
+                               pre_val /* pre_val */,
+                               T_OBJECT);
+        if (need_mem_bar) {
+          // Add memory barrier to prevent commoning reads from this field
+          // across safepoint since GC can change its value.
+          kit->insert_mem_bar(Op_MemBarCPUOrder);
+        }
+        // Update IdealKit from graphKit.
+        __ sync_kit(kit);
+
+      } __ end_if(); // _ref_type != ref_none
+  } __ end_if(); // offset == referent_offset
+
+  // Final sync IdealKit and GraphKit.
+  kit->final_sync(ideal);
+}
+
+#undef __
+
+const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
+  const Type **fields = TypeTuple::fields(2);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
+  fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+
+  // create result type (range)
+  fields = TypeTuple::fields(0);
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
+const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
+  const Type **fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  // create result type (range)
+  fields = TypeTuple::fields(0);
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
+const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() {
+  const Type **fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  // create result type (range)
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
+void ShenandoahBarrierSetC2::resolve_address(C2Access& access) const {
+  const TypePtr* adr_type = access.addr().type();
+
+  if ((access.decorators() & IN_NATIVE) == 0 && (adr_type->isa_instptr() || adr_type->isa_aryptr())) {
+    int off = adr_type->is_ptr()->offset();
+    int base_off = adr_type->isa_instptr() ? instanceOopDesc::base_offset_in_bytes() :
+      arrayOopDesc::base_offset_in_bytes(adr_type->is_aryptr()->elem()->array_element_basic_type());
+    assert(off != Type::OffsetTop, "unexpected offset");
+    if (off == Type::OffsetBot || off >= base_off) {
+      DecoratorSet decorators = access.decorators();
+      bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
+      GraphKit* kit = NULL;
+      if (access.is_parse_access()) {
+        C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
+        kit = parse_access.kit();
+      }
+      Node* adr = access.addr().node();
+      assert(adr->is_AddP(), "unexpected address shape");
+      Node* base = adr->in(AddPNode::Base);
+
+      if (is_write) {
+        if (kit != NULL) {
+          base = shenandoah_write_barrier(kit, base);
+        } else {
+          assert(access.is_opt_access(), "either parse or opt access");
+          assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for clone");
+        }
+      } else {
+        if (adr_type->isa_instptr()) {
+          Compile* C = access.gvn().C;
+          ciField* field = C->alias_type(adr_type)->field();
+
+          // Insert read barrier for Shenandoah.
+          if (field != NULL &&
+              ((ShenandoahOptimizeStaticFinals   && field->is_static()  && field->is_final()) ||
+               (ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) ||
+               (ShenandoahOptimizeStableFinals   && field->is_stable()))) {
+            // Skip the barrier for special fields
+          } else {
+            if (kit != NULL) {
+              base = shenandoah_read_barrier(kit, base);
+            } else {
+              assert(access.is_opt_access(), "either parse or opt access");
+              assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
+            }
+          }
+        } else {
+          if (kit != NULL) {
+            base = shenandoah_read_barrier(kit, base);
+          } else {
+            assert(access.is_opt_access(), "either parse or opt access");
+            assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
+          }
+        }
+      }
+      if (base != adr->in(AddPNode::Base)) {
+        assert(kit != NULL, "no barrier should have been added");
+
+        Node* address = adr->in(AddPNode::Address);
+
+        if (address->is_AddP()) {
+          assert(address->in(AddPNode::Base) == adr->in(AddPNode::Base), "unexpected address shape");
+          assert(!address->in(AddPNode::Address)->is_AddP(), "unexpected address shape");
+          assert(address->in(AddPNode::Address) == adr->in(AddPNode::Base), "unexpected address shape");
+          address = address->clone();
+          address->set_req(AddPNode::Base, base);
+          address->set_req(AddPNode::Address, base);
+          address = kit->gvn().transform(address);
+        } else {
+          assert(address == adr->in(AddPNode::Base), "unexpected address shape");
+          address = base;
+        }
+        adr = adr->clone();
+        adr->set_req(AddPNode::Base, base);
+        adr->set_req(AddPNode::Address, address);
+        adr = kit->gvn().transform(adr);
+        access.addr().set_node(adr);
+      }
+    }
+  }
+}
+
+Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
+  DecoratorSet decorators = access.decorators();
+
+  const TypePtr* adr_type = access.addr().type();
+  Node* adr = access.addr().node();
+
+  bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
+  bool on_heap = (decorators & IN_HEAP) != 0;
+
+  if (!access.is_oop() || (!on_heap && !anonymous)) {
+    return BarrierSetC2::store_at_resolved(access, val);
+  }
+
+  if (access.is_parse_access()) {
+    C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
+    GraphKit* kit = parse_access.kit();
+
+    uint adr_idx = kit->C->get_alias_index(adr_type);
+    assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
+    Node* value = val.node();
+    value = shenandoah_storeval_barrier(kit, value);
+    val.set_node(value);
+    shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
+                                 static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
+  } else {
+    assert(access.is_opt_access(), "only for optimization passes");
+    assert(((decorators & C2_TIGHLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
+    C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
+    PhaseGVN& gvn =  opt_access.gvn();
+    MergeMemNode* mm = opt_access.mem();
+
+    if (ShenandoahStoreValReadBarrier) {
+      RegionNode* region = new RegionNode(3);
+      const Type* v_t = gvn.type(val.node());
+      Node* phi = new PhiNode(region, v_t->isa_oopptr() ? v_t->is_oopptr()->cast_to_nonconst() : v_t);
+      Node* cmp = gvn.transform(new CmpPNode(val.node(), gvn.zerocon(T_OBJECT)));
+      Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne));
+      IfNode* iff = new IfNode(opt_access.ctl(), bol, PROB_LIKELY_MAG(3), COUNT_UNKNOWN);
+
+      gvn.transform(iff);
+      if (gvn.is_IterGVN()) {
+        gvn.is_IterGVN()->_worklist.push(iff);
+      } else {
+        gvn.record_for_igvn(iff);
+      }
+
+      Node* null_true = gvn.transform(new IfFalseNode(iff));
+      Node* null_false = gvn.transform(new IfTrueNode(iff));
+      region->init_req(1, null_true);
+      region->init_req(2, null_false);
+      phi->init_req(1, gvn.zerocon(T_OBJECT));
+      Node* cast = new CastPPNode(val.node(), gvn.type(val.node())->join_speculative(TypePtr::NOTNULL));
+      cast->set_req(0, null_false);
+      cast = gvn.transform(cast);
+      Node* rb = gvn.transform(new ShenandoahReadBarrierNode(null_false, gvn.C->immutable_memory(), cast, false));
+      phi->init_req(2, rb);
+      opt_access.set_ctl(gvn.transform(region));
+      val.set_node(gvn.transform(phi));
+    }
+    if (ShenandoahStoreValEnqueueBarrier) {
+      const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(gvn.type(val.node()));
+      int alias = gvn.C->get_alias_index(adr_type);
+      Node* wb = new ShenandoahWriteBarrierNode(gvn.C, opt_access.ctl(), mm->memory_at(alias), val.node());
+      Node* wb_transformed = gvn.transform(wb);
+      Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(wb_transformed));
+      if (wb_transformed == wb) {
+        Node* proj = gvn.transform(new ShenandoahWBMemProjNode(wb));
+        mm->set_memory_at(alias, proj);
+      }
+      val.set_node(enqueue);
+    }
+  }
+  return BarrierSetC2::store_at_resolved(access, val);
+}
+
+Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
+  DecoratorSet decorators = access.decorators();
+
+  Node* adr = access.addr().node();
+  Node* obj = access.base();
+
+  bool mismatched = (decorators & C2_MISMATCHED) != 0;
+  bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
+  bool on_heap = (decorators & IN_HEAP) != 0;
+  bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
+  bool is_unordered = (decorators & MO_UNORDERED) != 0;
+  bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;
+
+  Node* top = Compile::current()->top();
+
+  Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
+  Node* load = BarrierSetC2::load_at_resolved(access, val_type);
+
+  // If we are reading the value of the referent field of a Reference
+  // object (either by using Unsafe directly or through reflection)
+  // then, if SATB is enabled, we need to record the referent in an
+  // SATB log buffer using the pre-barrier mechanism.
+  // Also we need to add memory barrier to prevent commoning reads
+  // from this field across safepoint since GC can change its value.
+  bool need_read_barrier = ShenandoahKeepAliveBarrier &&
+    (on_heap && (on_weak || (unknown && offset != top && obj != top)));
+
+  if (!access.is_oop() || !need_read_barrier) {
+    return load;
+  }
+
+  assert(access.is_parse_access(), "entry not supported at optimization time");
+  C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
+  GraphKit* kit = parse_access.kit();
+
+  if (on_weak) {
+    // Use the pre-barrier to record the value in the referent field
+    satb_write_barrier_pre(kit, false /* do_load */,
+                           NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
+                           load /* pre_val */, T_OBJECT);
+    // Add memory barrier to prevent commoning reads from this field
+    // across safepoint since GC can change its value.
+    kit->insert_mem_bar(Op_MemBarCPUOrder);
+  } else if (unknown) {
+    // We do not require a mem bar inside pre_barrier if need_mem_bar
+    // is set: the barriers would be emitted by us.
+    insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
+  }
+
+  return load;
+}
+
+Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
+                                                   Node* new_val, const Type* value_type) const {
+  GraphKit* kit = access.kit();
+  if (access.is_oop()) {
+    new_val = shenandoah_storeval_barrier(kit, new_val);
+    shenandoah_write_barrier_pre(kit, false /* do_load */,
+                                 NULL, NULL, max_juint, NULL, NULL,
+                                 expected_val /* pre_val */, T_OBJECT);
+
+    MemNode::MemOrd mo = access.mem_node_mo();
+    Node* mem = access.memory();
+    Node* adr = access.addr().node();
+    const TypePtr* adr_type = access.addr().type();
+    Node* load_store = NULL;
+
+#ifdef _LP64
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+      Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
+      Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
+      load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
+    } else
+#endif
+    {
+      load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
+    }
+
+    access.set_raw_access(load_store);
+    pin_atomic_op(access);
+
+#ifdef _LP64
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+      return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
+    }
+#endif
+    return load_store;
+  }
+  return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
+}
+
+Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
+                                                              Node* new_val, const Type* value_type) const {
+  GraphKit* kit = access.kit();
+  if (access.is_oop()) {
+    new_val = shenandoah_storeval_barrier(kit, new_val);
+    shenandoah_write_barrier_pre(kit, false /* do_load */,
+                                 NULL, NULL, max_juint, NULL, NULL,
+                                 expected_val /* pre_val */, T_OBJECT);
+    DecoratorSet decorators = access.decorators();
+    MemNode::MemOrd mo = access.mem_node_mo();
+    Node* mem = access.memory();
+    bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
+    Node* load_store = NULL;
+    Node* adr = access.addr().node();
+#ifdef _LP64
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+      Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
+      Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
+      if (is_weak_cas) {
+        load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
+      } else {
+        load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
+      }
+    } else
+#endif
+    {
+      if (is_weak_cas) {
+        load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
+      } else {
+        load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
+      }
+    }
+    access.set_raw_access(load_store);
+    pin_atomic_op(access);
+    return load_store;
+  }
+  return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
+}
+
+Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* val, const Type* value_type) const {
+  GraphKit* kit = access.kit();
+  if (access.is_oop()) {
+    val = shenandoah_storeval_barrier(kit, val);
+  }
+  Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
+  if (access.is_oop()) {
+    shenandoah_write_barrier_pre(kit, false /* do_load */,
+                                 NULL, NULL, max_juint, NULL, NULL,
+                                 result /* pre_val */, T_OBJECT);
+  }
+  return result;
+}
+
+void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
+  assert(!src->is_AddP(), "unexpected input");
+  src = shenandoah_read_barrier(kit, src);
+  BarrierSetC2::clone(kit, src, dst, size, is_array);
+}
+
+Node* ShenandoahBarrierSetC2::resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const {
+  bool is_write = decorators & ACCESS_WRITE;
+  if (is_write) {
+    return shenandoah_write_barrier(kit, n);
+  } else {
+  return shenandoah_read_barrier(kit, n);
+  }
+}
+
+Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
+                                           Node*& i_o, Node*& needgc_ctrl,
+                                           Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
+                                           intx prefetch_lines) const {
+  PhaseIterGVN& igvn = macro->igvn();
+
+  // Allocate several words more for the Shenandoah brooks pointer.
+  size_in_bytes = new AddXNode(size_in_bytes, igvn.MakeConX(ShenandoahBrooksPointer::byte_size()));
+  macro->transform_later(size_in_bytes);
+
+  Node* fast_oop = BarrierSetC2::obj_allocate(macro, ctrl, mem, toobig_false, size_in_bytes,
+                                              i_o, needgc_ctrl, fast_oop_ctrl, fast_oop_rawmem,
+                                              prefetch_lines);
+
+  // Bump up object for Shenandoah brooks pointer.
+  fast_oop = new AddPNode(macro->top(), fast_oop, igvn.MakeConX(ShenandoahBrooksPointer::byte_size()));
+  macro->transform_later(fast_oop);
+
+  // Initialize Shenandoah brooks pointer to point to the object itself.
+  fast_oop_rawmem = macro->make_store(fast_oop_ctrl, fast_oop_rawmem, fast_oop, ShenandoahBrooksPointer::byte_offset(), fast_oop, T_OBJECT);
+
+  return fast_oop;
+}
+
+// Support for GC barriers emitted during parsing
+bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
+  if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
+    return false;
+  }
+  CallLeafNode *call = node->as_CallLeaf();
+  if (call->_name == NULL) {
+    return false;
+  }
+
+  return strcmp(call->_name, "shenandoah_clone_barrier") == 0 ||
+         strcmp(call->_name, "shenandoah_cas_obj") == 0 ||
+         strcmp(call->_name, "shenandoah_wb_pre") == 0;
+}
+
+Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
+  return ShenandoahBarrierNode::skip_through_barrier(c);
+}
+
+bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
+  return !ShenandoahWriteBarrierNode::expand(C, igvn);
+}
+
+bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
+  if (mode == LoopOptsShenandoahExpand) {
+    assert(UseShenandoahGC, "only for shenandoah");
+    ShenandoahWriteBarrierNode::pin_and_expand(phase);
+    return true;
+  } else if (mode == LoopOptsShenandoahPostExpand) {
+    assert(UseShenandoahGC, "only for shenandoah");
+    visited.Clear();
+    ShenandoahWriteBarrierNode::optimize_after_expansion(visited, nstack, worklist, phase);
+    return true;
+  }
+  GrowableArray<MemoryGraphFixer*> memory_graph_fixers;
+  ShenandoahWriteBarrierNode::optimize_before_expansion(phase, memory_graph_fixers, false);
+  return false;
+}
+
+bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const {
+  bool is_oop = type == T_OBJECT || type == T_ARRAY;
+  if (!is_oop) {
+    return false;
+  }
+
+  if (tightly_coupled_alloc) {
+    if (phase == Optimization) {
+      return false;
+    }
+    return !is_clone;
+  }
+  if (phase == Optimization) {
+    return !ShenandoahStoreValEnqueueBarrier;
+  }
+  return true;
+}
+
+bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn) {
+  Node* src = ac->in(ArrayCopyNode::Src);
+  const TypeOopPtr* src_type = igvn.type(src)->is_oopptr();
+  if (src_type->isa_instptr() != NULL) {
+    ciInstanceKlass* ik = src_type->klass()->as_instance_klass();
+    if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) {
+      if (ik->has_object_fields()) {
+        return true;
+      } else {
+        if (!src_type->klass_is_exact()) {
+          igvn.C->dependencies()->assert_leaf_type(ik);
+        }
+      }
+    } else {
+      return true;
+    }
+  } else if (src_type->isa_aryptr()) {
+    BasicType src_elem  = src_type->klass()->as_array_klass()->element_type()->basic_type();
+    if (src_elem == T_OBJECT || src_elem == T_ARRAY) {
+      return true;
+    }
+  } else {
+    return true;
+  }
+  return false;
+}
+
+void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const {
+  assert(ac->is_clonebasic(), "no other kind of arraycopy here");
+
+  if (!clone_needs_postbarrier(ac, igvn)) {
+    BarrierSetC2::clone_barrier_at_expansion(ac, call, igvn);
+    return;
+  }
+
+  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
+  Node* c = new ProjNode(call,TypeFunc::Control);
+  c = igvn.transform(c);
+  Node* m = new ProjNode(call, TypeFunc::Memory);
+  c = igvn.transform(m);
+
+  Node* dest = ac->in(ArrayCopyNode::Dest);
+  assert(dest->is_AddP(), "bad input");
+  Node* barrier_call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(),
+                                        CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
+                                        "shenandoah_clone_barrier", raw_adr_type);
+  barrier_call->init_req(TypeFunc::Control, c);
+  barrier_call->init_req(TypeFunc::I_O    , igvn.C->top());
+  barrier_call->init_req(TypeFunc::Memory , m);
+  barrier_call->init_req(TypeFunc::ReturnAdr, igvn.C->top());
+  barrier_call->init_req(TypeFunc::FramePtr, igvn.C->top());
+  barrier_call->init_req(TypeFunc::Parms+0, dest->in(AddPNode::Base));
+
+  barrier_call = igvn.transform(barrier_call);
+  c = new ProjNode(barrier_call,TypeFunc::Control);
+  c = igvn.transform(c);
+  m = new ProjNode(barrier_call, TypeFunc::Memory);
+  m = igvn.transform(m);
+
+  Node* out_c = ac->proj_out(TypeFunc::Control);
+  Node* out_m = ac->proj_out(TypeFunc::Memory);
+  igvn.replace_node(out_c, c);
+  igvn.replace_node(out_m, m);
+}
+
+
+// Support for macro expanded GC barriers
+void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
+  if (node->Opcode() == Op_ShenandoahWriteBarrier) {
+    state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
+  }
+}
+
+void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
+  if (node->Opcode() == Op_ShenandoahWriteBarrier) {
+    state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
+  }
+}
+
+void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const {
+  if (is_shenandoah_wb_pre_call(n)) {
+    shenandoah_eliminate_wb_pre(n, &macro->igvn());
+  }
+}
+
+void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
+  assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
+  Node* c = call->as_Call()->proj_out(TypeFunc::Control);
+  c = c->unique_ctrl_out();
+  assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
+  c = c->unique_ctrl_out();
+  assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
+  Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
+  assert(iff->is_If(), "expect test");
+  if (!is_shenandoah_marking_if(igvn, iff)) {
+    c = c->unique_ctrl_out();
+    assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
+    iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
+    assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
+  }
+  Node* cmpx = iff->in(1)->in(1);
+  igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
+  igvn->rehash_node_delayed(call);
+  call->del_req(call->req()-1);
+}
+
+void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {
+  if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) {
+    igvn->add_users_to_worklist(node);
+  }
+}
+
+void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {
+  for (uint i = 0; i < useful.size(); i++) {
+    Node* n = useful.at(i);
+    if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) {
+      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+        C->record_for_igvn(n->fast_out(i));
+      }
+    }
+  }
+  for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) {
+    ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i);
+    if (!useful.member(n)) {
+      state()->remove_shenandoah_barrier(n);
+    }
+  }
+
+}
+
+bool ShenandoahBarrierSetC2::has_special_unique_user(const Node* node) const {
+  assert(node->outcnt() == 1, "match only for unique out");
+  Node* n = node->unique_out();
+  return node->Opcode() == Op_ShenandoahWriteBarrier && n->Opcode() == Op_ShenandoahWBMemProj;
+}
+
+void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
+
+void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
+  return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
+}
+
+ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
+  return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
+}
+
+// If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
+// expanded later, then now is the time to do so.
+bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; }
+
+#ifdef ASSERT
+void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
+  if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) {
+    ShenandoahBarrierNode::verify(Compile::current()->root());
+  } else if (phase == BarrierSetC2::BeforeCodeGen) {
+    // Verify G1 pre-barriers
+    const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
+
+    ResourceArea *area = Thread::current()->resource_area();
+    Unique_Node_List visited(area);
+    Node_List worklist(area);
+    // We're going to walk control flow backwards starting from the Root
+    worklist.push(compile->root());
+    while (worklist.size() > 0) {
+      Node *x = worklist.pop();
+      if (x == NULL || x == compile->top()) continue;
+      if (visited.member(x)) {
+        continue;
+      } else {
+        visited.push(x);
+      }
+
+      if (x->is_Region()) {
+        for (uint i = 1; i < x->req(); i++) {
+          worklist.push(x->in(i));
+        }
+      } else {
+        worklist.push(x->in(0));
+        // We are looking for the pattern:
+        //                            /->ThreadLocal
+        // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
+        //              \->ConI(0)
+        // We want to verify that the If and the LoadB have the same control
+        // See GraphKit::g1_write_barrier_pre()
+        if (x->is_If()) {
+          IfNode *iff = x->as_If();
+          if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
+            CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
+            if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
+                && cmp->in(1)->is_Load()) {
+              LoadNode *load = cmp->in(1)->as_Load();
+              if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
+                  && load->in(2)->in(3)->is_Con()
+                  && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
+
+                Node *if_ctrl = iff->in(0);
+                Node *load_ctrl = load->in(0);
+
+                if (if_ctrl != load_ctrl) {
+                  // Skip possible CProj->NeverBranch in infinite loops
+                  if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
+                      && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
+                    if_ctrl = if_ctrl->in(0)->in(0);
+                  }
+                }
+                assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+#endif
+
+Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const {
+  if (is_shenandoah_wb_pre_call(n)) {
+    uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
+    if (n->req() > cnt) {
+      Node* addp = n->in(cnt);
+      if (has_only_shenandoah_wb_pre_uses(addp)) {
+        n->del_req(cnt);
+        if (can_reshape) {
+          phase->is_IterGVN()->_worklist.push(addp);
+        }
+        return n;
+      }
+    }
+  }
+  if (n->Opcode() == Op_CmpP) {
+    Node* in1 = n->in(1);
+    Node* in2 = n->in(2);
+    if (in1->bottom_type() == TypePtr::NULL_PTR) {
+      in2 = step_over_gc_barrier(in2);
+    }
+    if (in2->bottom_type() == TypePtr::NULL_PTR) {
+      in1 = step_over_gc_barrier(in1);
+    }
+    PhaseIterGVN* igvn = phase->is_IterGVN();
+    if (in1 != n->in(1)) {
+      if (igvn != NULL) {
+        n->set_req_X(1, in1, igvn);
+      } else {
+        n->set_req(1, in1);
+      }
+      assert(in2 == n->in(2), "only one change");
+      return n;
+    }
+    if (in2 != n->in(2)) {
+      if (igvn != NULL) {
+        n->set_req_X(2, in2, igvn);
+      } else {
+        n->set_req(2, in2);
+      }
+      return n;
+    }
+  } else if (can_reshape &&
+             n->Opcode() == Op_If &&
+             ShenandoahWriteBarrierNode::is_heap_stable_test(n) &&
+             n->in(0) != NULL) {
+    Node* dom = n->in(0);
+    Node* prev_dom = n;
+    int op = n->Opcode();
+    int dist = 16;
+    // Search up the dominator tree for another heap stable test
+    while (dom->Opcode() != op    ||  // Not same opcode?
+           !ShenandoahWriteBarrierNode::is_heap_stable_test(dom) ||  // Not same input 1?
+           prev_dom->in(0) != dom) {  // One path of test does not dominate?
+      if (dist < 0) return NULL;
+
+      dist--;
+      prev_dom = dom;
+      dom = IfNode::up_one_dom(dom);
+      if (!dom) return NULL;
+    }
+
+    // Check that we did not follow a loop back to ourselves
+    if (n == dom) {
+      return NULL;
+    }
+
+    return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN());
+  }
+
+  return NULL;
+}
+
+Node* ShenandoahBarrierSetC2::identity_node(PhaseGVN* phase, Node* n) const {
+  if (n->is_Load()) {
+    Node *mem = n->in(MemNode::Memory);
+    Node *value = n->as_Load()->can_see_stored_value(mem, phase);
+    if (value) {
+      PhaseIterGVN *igvn = phase->is_IterGVN();
+      if (igvn != NULL &&
+          value->is_Phi() &&
+          value->req() > 2 &&
+          value->in(1) != NULL &&
+          value->in(1)->is_ShenandoahBarrier()) {
+        if (igvn->_worklist.member(value) ||
+            igvn->_worklist.member(value->in(0)) ||
+            (value->in(0)->in(1) != NULL &&
+             value->in(0)->in(1)->is_IfProj() &&
+             (igvn->_worklist.member(value->in(0)->in(1)) ||
+              (value->in(0)->in(1)->in(0) != NULL &&
+               igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) {
+          igvn->_worklist.push(n);
+          return n;
+        }
+      }
+      // (This works even when value is a Con, but LoadNode::Value
+      // usually runs first, producing the singleton type of the Con.)
+      Node *value_no_barrier = step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value);
+      if (value->Opcode() == Op_EncodeP) {
+        if (value_no_barrier != value->in(1)) {
+          Node *encode = value->clone();
+          encode->set_req(1, value_no_barrier);
+          encode = phase->transform(encode);
+          return encode;
+        }
+      } else {
+        return value_no_barrier;
+      }
+    }
+  }
+  return n;
+}
+
+bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    Node* u = n->fast_out(i);
+    if (!is_shenandoah_wb_pre_call(u)) {
+      return false;
+    }
+  }
+  return n->outcnt() > 0;
+}
+
+bool ShenandoahBarrierSetC2::flatten_gc_alias_type(const TypePtr*& adr_type) const {
+  int offset = adr_type->offset();
+  if (offset == ShenandoahBrooksPointer::byte_offset()) {
+    if (adr_type->isa_aryptr()) {
+      adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, offset);
+    } else if (adr_type->isa_instptr()) {
+      adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, offset);
+    }
+    return true;
+  } else {
+    return false;
+  }
+}
+
+bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
+  switch (opcode) {
+    case Op_CallLeaf:
+    case Op_CallLeafNoFP: {
+      assert (n->is_Call(), "");
+      CallNode *call = n->as_Call();
+      if (ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(call)) {
+        uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
+        if (call->req() > cnt) {
+          assert(call->req() == cnt + 1, "only one extra input");
+          Node *addp = call->in(cnt);
+          assert(!ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(addp), "useless address computation?");
+          call->del_req(cnt);
+        }
+      }
+      return false;
+    }
+    case Op_ShenandoahCompareAndSwapP:
+    case Op_ShenandoahCompareAndSwapN:
+    case Op_ShenandoahWeakCompareAndSwapN:
+    case Op_ShenandoahWeakCompareAndSwapP:
+    case Op_ShenandoahCompareAndExchangeP:
+    case Op_ShenandoahCompareAndExchangeN:
+#ifdef ASSERT
+      if( VerifyOptoOopOffsets ) {
+        MemNode* mem  = n->as_Mem();
+        // Check to see if address types have grounded out somehow.
+        const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
+        ciInstanceKlass *k = tp->klass()->as_instance_klass();
+        bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
+        assert( !tp || oop_offset_is_sane, "" );
+      }
+#endif
+      return true;
+    case Op_ShenandoahReadBarrier:
+      return true;
+    case Op_ShenandoahWriteBarrier:
+      assert(false, "should have been expanded already");
+      return true;
+    default:
+      return false;
+  }
+}
+
+#ifdef ASSERT
+bool ShenandoahBarrierSetC2::verify_gc_alias_type(const TypePtr* adr_type, int offset) const {
+  if (offset == ShenandoahBrooksPointer::byte_offset() &&
+      (adr_type->base() == Type::AryPtr || adr_type->base() == Type::OopPtr)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+#endif
+
+bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
+  switch (opcode) {
+    case Op_ShenandoahCompareAndExchangeP:
+    case Op_ShenandoahCompareAndExchangeN:
+      conn_graph->add_objload_to_connection_graph(n, delayed_worklist);
+      // fallthrough
+    case Op_ShenandoahWeakCompareAndSwapP:
+    case Op_ShenandoahWeakCompareAndSwapN:
+    case Op_ShenandoahCompareAndSwapP:
+    case Op_ShenandoahCompareAndSwapN:
+      conn_graph->add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
+      return true;
+    case Op_StoreP: {
+      Node* adr = n->in(MemNode::Address);
+      const Type* adr_type = gvn->type(adr);
+      // Pointer stores in G1 barriers looks like unsafe access.
+      // Ignore such stores to be able scalar replace non-escaping
+      // allocations.
+      if (adr_type->isa_rawptr() && adr->is_AddP()) {
+        Node* base = conn_graph->get_addp_base(adr);
+        if (base->Opcode() == Op_LoadP &&
+          base->in(MemNode::Address)->is_AddP()) {
+          adr = base->in(MemNode::Address);
+          Node* tls = conn_graph->get_addp_base(adr);
+          if (tls->Opcode() == Op_ThreadLocal) {
+             int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
+             const int buf_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
+             if (offs == buf_offset) {
+               return true; // Pre barrier previous oop value store.
+             }
+          }
+        }
+      }
+      return false;
+    }
+    case Op_ShenandoahReadBarrier:
+    case Op_ShenandoahWriteBarrier:
+      // Barriers 'pass through' its arguments. I.e. what goes in, comes out.
+      // It doesn't escape.
+      conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), delayed_worklist);
+      break;
+    case Op_ShenandoahEnqueueBarrier:
+      conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
+      break;
+    default:
+      // Nothing
+      break;
+  }
+  return false;
+}
+
+bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const {
+  switch (opcode) {
+    case Op_ShenandoahCompareAndExchangeP:
+    case Op_ShenandoahCompareAndExchangeN: {
+      Node *adr = n->in(MemNode::Address);
+      conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
+      // fallthrough
+    }
+    case Op_ShenandoahCompareAndSwapP:
+    case Op_ShenandoahCompareAndSwapN:
+    case Op_ShenandoahWeakCompareAndSwapP:
+    case Op_ShenandoahWeakCompareAndSwapN:
+      return conn_graph->add_final_edges_unsafe_access(n, opcode);
+    case Op_ShenandoahReadBarrier:
+    case Op_ShenandoahWriteBarrier:
+      // Barriers 'pass through' its arguments. I.e. what goes in, comes out.
+      // It doesn't escape.
+      conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), NULL);
+      return true;
+    case Op_ShenandoahEnqueueBarrier:
+      conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
+      return true;
+    default:
+      // Nothing
+      break;
+  }
+  return false;
+}
+
+bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const {
+  return n->has_out_with(Op_ShenandoahCompareAndExchangeP) || n->has_out_with(Op_ShenandoahCompareAndExchangeN) ||
+         n->has_out_with(Op_ShenandoahCompareAndSwapP, Op_ShenandoahCompareAndSwapN, Op_ShenandoahWeakCompareAndSwapP, Op_ShenandoahWeakCompareAndSwapN);
+
+}
+
+bool ShenandoahBarrierSetC2::escape_is_barrier_node(Node* n) const {
+  return n->is_ShenandoahBarrier();
+}
+
+bool ShenandoahBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
+  switch (opcode) {
+    case Op_ShenandoahReadBarrier:
+      if (n->in(ShenandoahBarrierNode::ValueIn)->is_DecodeNarrowPtr()) {
+        matcher->set_shared(n->in(ShenandoahBarrierNode::ValueIn)->in(1));
+      }
+      matcher->set_shared(n);
+      return true;
+    default:
+      break;
+  }
+  return false;
+}
+
+bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
+  switch (opcode) {
+    case Op_ShenandoahCompareAndExchangeP:
+    case Op_ShenandoahCompareAndExchangeN:
+    case Op_ShenandoahWeakCompareAndSwapP:
+    case Op_ShenandoahWeakCompareAndSwapN:
+    case Op_ShenandoahCompareAndSwapP:
+    case Op_ShenandoahCompareAndSwapN: {   // Convert trinary to binary-tree
+      Node* newval = n->in(MemNode::ValueIn);
+      Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
+      Node* pair = new BinaryNode(oldval, newval);
+      n->set_req(MemNode::ValueIn,pair);
+      n->del_req(LoadStoreConditionalNode::ExpectedIn);
+      return true;
+    }
+    default:
+      break;
+  }
+  return false;
+}
+
+bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) const {
+  return xop == Op_ShenandoahCompareAndExchangeP ||
+         xop == Op_ShenandoahCompareAndExchangeN ||
+         xop == Op_ShenandoahWeakCompareAndSwapP ||
+         xop == Op_ShenandoahWeakCompareAndSwapN ||
+         xop == Op_ShenandoahCompareAndSwapN ||
+         xop == Op_ShenandoahCompareAndSwapP;
+}
+
+void ShenandoahBarrierSetC2::igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const {
+  if (use->is_ShenandoahBarrier()) {
+    for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+      Node* u = use->fast_out(i2);
+      Node* cmp = use->find_out_with(Op_CmpP);
+      if (u->Opcode() == Op_CmpP) {
+        igvn->_worklist.push(cmp);
+      }
+    }
+  }
+}
+
+void ShenandoahBarrierSetC2::ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {
+  if (use->is_ShenandoahBarrier()) {
+    for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+      Node* p = use->fast_out(i2);
+      if (p->Opcode() == Op_AddP) {
+        for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) {
+          Node* q = p->fast_out(i3);
+          if (q->is_Load()) {
+            if(q->bottom_type() != ccp->type(q)) {
+              worklist.push(q);
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+Node* ShenandoahBarrierSetC2::split_if_pre(PhaseIdealLoop* phase, Node* n) const {
+  if (n->Opcode() == Op_ShenandoahReadBarrier) {
+    ((ShenandoahReadBarrierNode*)n)->try_move(phase);
+  } else if (n->Opcode() == Op_ShenandoahWriteBarrier) {
+    return ((ShenandoahWriteBarrierNode*)n)->try_split_thru_phi(phase);
+  }
+
+  return NULL;
+}
+
+bool ShenandoahBarrierSetC2::build_loop_late_post(PhaseIdealLoop* phase, Node* n) const {
+  return ShenandoahBarrierNode::build_loop_late_post(phase, n);
+}
+
+bool ShenandoahBarrierSetC2::sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const {
+  if (n->is_ShenandoahBarrier()) {
+    return x->as_ShenandoahBarrier()->sink_node(phase, x_ctrl, n_ctrl);
+  }
+  if (n->is_MergeMem()) {
+    // PhaseIdealLoop::split_if_with_blocks_post() would:
+    // _igvn._worklist.yank(x);
+    // which sometimes causes chains of MergeMem which some of
+    // shenandoah specific code doesn't support
+    phase->register_new_node(x, x_ctrl);
+    return true;
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
+#define SHARE_VM_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
+
+#include "gc/shared/c2/barrierSetC2.hpp"
+#include "gc/shenandoah/c2/shenandoahSupport.hpp"
+#include "utilities/growableArray.hpp"
+
+class ShenandoahBarrierSetC2State : public ResourceObj {
+private:
+  GrowableArray<ShenandoahWriteBarrierNode*>* _shenandoah_barriers;
+
+public:
+  ShenandoahBarrierSetC2State(Arena* comp_arena);
+  int shenandoah_barriers_count() const;
+  ShenandoahWriteBarrierNode* shenandoah_barrier(int idx) const;
+  void add_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
+  void remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
+};
+
+class ShenandoahBarrierSetC2 : public BarrierSetC2 {
+private:
+  void shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const;
+
+  bool satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
+                                   BasicType bt, uint adr_idx) const;
+  void satb_write_barrier_pre(GraphKit* kit, bool do_load,
+                              Node* obj,
+                              Node* adr,
+                              uint alias_idx,
+                              Node* val,
+                              const TypeOopPtr* val_type,
+                              Node* pre_val,
+                              BasicType bt) const;
+
+  void shenandoah_write_barrier_pre(GraphKit* kit,
+                                    bool do_load,
+                                    Node* obj,
+                                    Node* adr,
+                                    uint alias_idx,
+                                    Node* val,
+                                    const TypeOopPtr* val_type,
+                                    Node* pre_val,
+                                    BasicType bt) const;
+
+  Node* shenandoah_enqueue_barrier(GraphKit* kit, Node* val) const;
+  Node* shenandoah_read_barrier(GraphKit* kit, Node* obj) const;
+  Node* shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const;
+  Node* shenandoah_write_barrier(GraphKit* kit, Node* obj) const;
+  Node* shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const;
+  Node* shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const;
+  Node* shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const;
+
+  void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
+                          Node* pre_val, bool need_mem_bar) const;
+
+  static bool clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn);
+
+protected:
+  virtual void resolve_address(C2Access& access) const;
+  virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
+  virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
+  virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
+                                               Node* new_val, const Type* val_type) const;
+  virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
+                                                Node* new_val, const Type* value_type) const;
+  virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
+
+public:
+  static ShenandoahBarrierSetC2* bsc2();
+
+  static bool is_shenandoah_wb_pre_call(Node* call);
+  static bool is_shenandoah_wb_call(Node* call);
+  static bool is_shenandoah_marking_if(PhaseTransform *phase, Node* n);
+  static bool is_shenandoah_state_load(Node* n);
+  static bool has_only_shenandoah_wb_pre_uses(Node* n);
+
+  ShenandoahBarrierSetC2State* state() const;
+
+  static const TypeFunc* write_ref_field_pre_entry_Type();
+  static const TypeFunc* shenandoah_clone_barrier_Type();
+  static const TypeFunc* shenandoah_write_barrier_Type();
+
+  // This is the entry-point for the backend to perform accesses through the Access API.
+  virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
+
+  virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const;
+
+  virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
+                             Node*& i_o, Node*& needgc_ctrl,
+                             Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
+                             intx prefetch_lines) const;
+
+  // These are general helper methods used by C2
+  virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const;
+  virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const;
+
+  // Support for GC barriers emitted during parsing
+  virtual bool is_gc_barrier_node(Node* node) const;
+  virtual Node* step_over_gc_barrier(Node* c) const;
+  virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const;
+  virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const;
+  virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return mode == LoopOptsShenandoahExpand || mode == LoopOptsShenandoahPostExpand; }
+  virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return mode == LoopOptsShenandoahExpand || mode == LoopOptsShenandoahPostExpand; }
+
+  // Support for macro expanded GC barriers
+  virtual void register_potential_barrier_node(Node* node) const;
+  virtual void unregister_potential_barrier_node(Node* node) const;
+  virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;
+  virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const;
+  virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const;
+  virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
+
+  // Allow barrier sets to have shared state that is preserved across a compilation unit.
+  // This could for example comprise macro nodes to be expanded during macro expansion.
+  virtual void* create_barrier_state(Arena* comp_arena) const;
+  // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
+  // expanded later, then now is the time to do so.
+  virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
+
+#ifdef ASSERT
+  virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
+#endif
+
+  virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const;
+#ifdef ASSERT
+  virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const;
+#endif
+
+  virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const;
+  virtual Node* identity_node(PhaseGVN* phase, Node* n) const;
+  virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const;
+
+  virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
+  virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const;
+  virtual bool escape_has_out_with_unsafe_object(Node* n) const;
+  virtual bool escape_is_barrier_node(Node* n) const;
+
+  virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const;
+  virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const;
+  virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const;
+
+  virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const;
+  virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const;
+
+  virtual bool has_special_unique_user(const Node* node) const;
+  virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const;
+  virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const;
+  virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,4278 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/c2/shenandoahSupport.hpp"
+#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahRuntime.hpp"
+#include "opto/arraycopynode.hpp"
+#include "opto/block.hpp"
+#include "opto/callnode.hpp"
+#include "opto/castnode.hpp"
+#include "opto/movenode.hpp"
+#include "opto/phaseX.hpp"
+#include "opto/rootnode.hpp"
+#include "opto/runtime.hpp"
+#include "opto/subnode.hpp"
+
+Node* ShenandoahBarrierNode::skip_through_barrier(Node* n) {
+  if (n == NULL) {
+    return NULL;
+  }
+  if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
+    n = n->in(1);
+  }
+
+  if (n->is_ShenandoahBarrier()) {
+    return n->in(ValueIn);
+  } else if (n->is_Phi() &&
+             n->req() == 3 &&
+             n->in(1) != NULL &&
+             n->in(1)->is_ShenandoahBarrier() &&
+             n->in(2) != NULL &&
+             n->in(2)->bottom_type() == TypePtr::NULL_PTR &&
+             n->in(0) != NULL &&
+             n->in(0)->in(1) != NULL &&
+             n->in(0)->in(1)->is_IfProj() &&
+             n->in(0)->in(2) != NULL &&
+             n->in(0)->in(2)->is_IfProj() &&
+             n->in(0)->in(1)->in(0) != NULL &&
+             n->in(0)->in(1)->in(0) == n->in(0)->in(2)->in(0) &&
+             n->in(1)->in(ValueIn)->Opcode() == Op_CastPP) {
+    Node* iff = n->in(0)->in(1)->in(0);
+    Node* res = n->in(1)->in(ValueIn)->in(1);
+    if (iff->is_If() &&
+        iff->in(1) != NULL &&
+        iff->in(1)->is_Bool() &&
+        iff->in(1)->as_Bool()->_test._test == BoolTest::ne &&
+        iff->in(1)->in(1) != NULL &&
+        iff->in(1)->in(1)->Opcode() == Op_CmpP &&
+        iff->in(1)->in(1)->in(1) != NULL &&
+        iff->in(1)->in(1)->in(1) == res &&
+        iff->in(1)->in(1)->in(2) != NULL &&
+        iff->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
+      return res;
+    }
+  }
+  return n;
+}
+
+bool ShenandoahBarrierNode::needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace) {
+  Unique_Node_List visited;
+  return needs_barrier_impl(phase, orig, n, rb_mem, allow_fromspace, visited);
+}
+
+bool ShenandoahBarrierNode::needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited) {
+  if (visited.member(n)) {
+    return false; // Been there.
+  }
+  visited.push(n);
+
+  if (n->is_Allocate()) {
+    return false;
+  }
+
+  if (n->is_CallJava() || n->Opcode() == Op_CallLeafNoFP) {
+    return true;
+  }
+
+  const Type* type = phase->type(n);
+  if (type == Type::TOP) {
+    return false;
+  }
+  if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
+    return false;
+  }
+  if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
+    return false;
+  }
+
+  if (ShenandoahOptimizeStableFinals) {
+    const TypeAryPtr* ary = type->isa_aryptr();
+    if (ary && ary->is_stable() && allow_fromspace) {
+      return false;
+    }
+  }
+
+  if (n->is_CheckCastPP() || n->is_ConstraintCast() || n->Opcode() == Op_ShenandoahEnqueueBarrier) {
+    return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited);
+  }
+  if (n->is_Parm()) {
+    return true;
+  }
+  if (n->is_Proj()) {
+    return needs_barrier_impl(phase, orig, n->in(0), rb_mem, allow_fromspace, visited);
+  }
+
+  if (n->Opcode() == Op_ShenandoahWBMemProj) {
+    return needs_barrier_impl(phase, orig, n->in(ShenandoahWBMemProjNode::WriteBarrier), rb_mem, allow_fromspace, visited);
+  }
+  if (n->is_Phi()) {
+    bool need_barrier = false;
+    for (uint i = 1; i < n->req() && ! need_barrier; i++) {
+      Node* input = n->in(i);
+      if (input == NULL) {
+        need_barrier = true; // Phi not complete yet?
+      } else if (needs_barrier_impl(phase, orig, input, rb_mem, allow_fromspace, visited)) {
+        need_barrier = true;
+      }
+    }
+    return need_barrier;
+  }
+  if (n->is_CMove()) {
+    return needs_barrier_impl(phase, orig, n->in(CMoveNode::IfFalse), rb_mem, allow_fromspace, visited) ||
+           needs_barrier_impl(phase, orig, n->in(CMoveNode::IfTrue ), rb_mem, allow_fromspace, visited);
+  }
+  if (n->Opcode() == Op_CreateEx) {
+    return true;
+  }
+  if (n->Opcode() == Op_ShenandoahWriteBarrier) {
+    return false;
+  }
+  if (n->Opcode() == Op_ShenandoahReadBarrier) {
+    if (rb_mem == n->in(Memory)) {
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  if (n->Opcode() == Op_LoadP ||
+      n->Opcode() == Op_LoadN ||
+      n->Opcode() == Op_GetAndSetP ||
+      n->Opcode() == Op_CompareAndExchangeP ||
+      n->Opcode() == Op_ShenandoahCompareAndExchangeP ||
+      n->Opcode() == Op_GetAndSetN ||
+      n->Opcode() == Op_CompareAndExchangeN ||
+      n->Opcode() == Op_ShenandoahCompareAndExchangeN) {
+    return true;
+  }
+  if (n->Opcode() == Op_DecodeN ||
+      n->Opcode() == Op_EncodeP) {
+    return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited);
+  }
+
+#ifdef ASSERT
+  tty->print("need barrier on?: "); n->dump();
+  ShouldNotReachHere();
+#endif
+  return true;
+}
+
+bool ShenandoahReadBarrierNode::dominates_memory_rb_impl(PhaseGVN* phase,
+                                                         Node* b1,
+                                                         Node* b2,
+                                                         Node* current,
+                                                         bool linear) {
+  ResourceMark rm;
+  VectorSet visited(Thread::current()->resource_area());
+  Node_Stack phis(0);
+
+  for(int i = 0; i < 10; i++) {
+    if (current == NULL) {
+      return false;
+    } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) {
+      current = NULL;
+      while (phis.is_nonempty() && current == NULL) {
+        uint idx = phis.index();
+        Node* phi = phis.node();
+        if (idx >= phi->req()) {
+          phis.pop();
+        } else {
+          current = phi->in(idx);
+          phis.set_index(idx+1);
+        }
+      }
+      if (current == NULL) {
+        return true;
+      }
+    } else if (current == phase->C->immutable_memory()) {
+      return false;
+    } else if (current->isa_Phi()) {
+      if (!linear) {
+        return false;
+      }
+      phis.push(current, 2);
+      current = current->in(1);
+    } else if (current->Opcode() == Op_ShenandoahWriteBarrier) {
+      const Type* in_type = current->bottom_type();
+      const Type* this_type = b2->bottom_type();
+      if (is_independent(in_type, this_type)) {
+        current = current->in(Memory);
+      } else {
+        return false;
+      }
+    } else if (current->Opcode() == Op_ShenandoahWBMemProj) {
+      current = current->in(ShenandoahWBMemProjNode::WriteBarrier);
+    } else if (current->is_Proj()) {
+      current = current->in(0);
+    } else if (current->is_Call()) {
+      return false; // TODO: Maybe improve by looking at the call's memory effects?
+    } else if (current->is_MemBar()) {
+      return false; // TODO: Do we need to stop at *any* membar?
+    } else if (current->is_MergeMem()) {
+      const TypePtr* adr_type = brooks_pointer_type(phase->type(b2));
+      uint alias_idx = phase->C->get_alias_index(adr_type);
+      current = current->as_MergeMem()->memory_at(alias_idx);
+    } else {
+#ifdef ASSERT
+      current->dump();
+#endif
+      ShouldNotReachHere();
+      return false;
+    }
+  }
+  return false;
+}
+
+bool ShenandoahReadBarrierNode::is_independent(Node* mem) {
+  if (mem->is_Phi() || mem->is_Proj() || mem->is_MergeMem()) {
+    return true;
+  } else if (mem->Opcode() == Op_ShenandoahWBMemProj) {
+    return true;
+  } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) {
+    const Type* mem_type = mem->bottom_type();
+    const Type* this_type = bottom_type();
+    if (is_independent(mem_type, this_type)) {
+      return true;
+    } else {
+      return false;
+    }
+  } else if (mem->is_Call() || mem->is_MemBar()) {
+    return false;
+  }
+#ifdef ASSERT
+  mem->dump();
+#endif
+  ShouldNotReachHere();
+  return true;
+}
+
+bool ShenandoahReadBarrierNode::dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear) {
+  return dominates_memory_rb_impl(phase, b1->in(Memory), b2, b2->in(Memory), linear);
+}
+
+bool ShenandoahReadBarrierNode::is_independent(const Type* in_type, const Type* this_type) {
+  assert(in_type->isa_oopptr(), "expect oop ptr");
+  assert(this_type->isa_oopptr(), "expect oop ptr");
+
+  ciKlass* in_kls = in_type->is_oopptr()->klass();
+  ciKlass* this_kls = this_type->is_oopptr()->klass();
+  if (in_kls != NULL && this_kls != NULL &&
+      in_kls->is_loaded() && this_kls->is_loaded() &&
+      (!in_kls->is_subclass_of(this_kls)) &&
+      (!this_kls->is_subclass_of(in_kls))) {
+    return true;
+  }
+  return false;
+}
+
+Node* ShenandoahReadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if (! can_reshape) {
+    return NULL;
+  }
+
+  if (in(Memory) == phase->C->immutable_memory()) return NULL;
+
+  // If memory input is a MergeMem, take the appropriate slice out of it.
+  Node* mem_in = in(Memory);
+  if (mem_in->isa_MergeMem()) {
+    const TypePtr* adr_type = brooks_pointer_type(bottom_type());
+    uint alias_idx = phase->C->get_alias_index(adr_type);
+    mem_in = mem_in->as_MergeMem()->memory_at(alias_idx);
+    set_req(Memory, mem_in);
+    return this;
+  }
+
+  Node* input = in(Memory);
+  if (input->Opcode() == Op_ShenandoahWBMemProj) {
+    ResourceMark rm;
+    VectorSet seen(Thread::current()->resource_area());
+    Node* n = in(Memory);
+    while (n->Opcode() == Op_ShenandoahWBMemProj &&
+           n->in(ShenandoahWBMemProjNode::WriteBarrier) != NULL &&
+           n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier &&
+           n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory) != NULL) {
+      if (seen.test_set(n->_idx)) {
+        return NULL; // loop
+      }
+      n = n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory);
+    }
+
+    Node* wb = input->in(ShenandoahWBMemProjNode::WriteBarrier);
+    const Type* in_type = phase->type(wb);
+    // is_top() test not sufficient here: we can come here after CCP
+    // in a dead branch of the graph that has not yet been removed.
+    if (in_type == Type::TOP) return NULL; // Dead path.
+    assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier");
+    if (is_independent(in_type, _type)) {
+      phase->igvn_rehash_node_delayed(wb);
+      set_req(Memory, wb->in(Memory));
+      if (can_reshape && input->outcnt() == 0) {
+        phase->is_IterGVN()->_worklist.push(input);
+      }
+      return this;
+    }
+  }
+  return NULL;
+}
+
+ShenandoahWriteBarrierNode::ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj)
+  : ShenandoahBarrierNode(ctrl, mem, obj, false) {
+  assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
+  ShenandoahBarrierSetC2::bsc2()->state()->add_shenandoah_barrier(this);
+}
+
+Node* ShenandoahWriteBarrierNode::Identity(PhaseGVN* phase) {
+  assert(in(0) != NULL, "should have control");
+  PhaseIterGVN* igvn = phase->is_IterGVN();
+  Node* mem_in = in(Memory);
+  Node* mem_proj = NULL;
+
+  if (igvn != NULL) {
+    mem_proj = find_out_with(Op_ShenandoahWBMemProj);
+    if (mem_in == mem_proj) {
+      return this;
+    }
+  }
+
+  Node* replacement = Identity_impl(phase);
+  if (igvn != NULL) {
+    if (replacement != NULL && replacement != this && mem_proj != NULL) {
+      igvn->replace_node(mem_proj, mem_in);
+    }
+  }
+  return replacement;
+}
+
+Node* ShenandoahWriteBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  assert(in(0) != NULL, "should have control");
+  if (!can_reshape) {
+    return NULL;
+  }
+
+  Node* mem_in = in(Memory);
+
+  if (mem_in->isa_MergeMem()) {
+    const TypePtr* adr_type = brooks_pointer_type(bottom_type());
+    uint alias_idx = phase->C->get_alias_index(adr_type);
+    mem_in = mem_in->as_MergeMem()->memory_at(alias_idx);
+    set_req(Memory, mem_in);
+    return this;
+  }
+
+  Node* val = in(ValueIn);
+  if (val->is_ShenandoahBarrier()) {
+    set_req(ValueIn, val->in(ValueIn));
+    return this;
+  }
+
+  return NULL;
+}
+
+bool ShenandoahWriteBarrierNode::expand(Compile* C, PhaseIterGVN& igvn) {
+  if (UseShenandoahGC) {
+    if (ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() > 0 || (!ShenandoahWriteBarrier && ShenandoahStoreValEnqueueBarrier)) {
+      bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
+      C->clear_major_progress();
+      PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
+      if (C->failing()) return false;
+      PhaseIdealLoop::verify(igvn);
+      DEBUG_ONLY(ShenandoahBarrierNode::verify_raw_mem(C->root());)
+      if (attempt_more_loopopts) {
+        C->set_major_progress();
+        if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
+          return false;
+        }
+        C->clear_major_progress();
+      }
+    }
+  }
+  return true;
+}
+
+bool ShenandoahWriteBarrierNode::is_heap_state_test(Node* iff, int mask) {
+  if (!UseShenandoahGC) {
+    return false;
+  }
+  assert(iff->is_If(), "bad input");
+  if (iff->Opcode() != Op_If) {
+    return false;
+  }
+  Node* bol = iff->in(1);
+  if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
+    return false;
+  }
+  Node* cmp = bol->in(1);
+  if (cmp->Opcode() != Op_CmpI) {
+    return false;
+  }
+  Node* in1 = cmp->in(1);
+  Node* in2 = cmp->in(2);
+  if (in2->find_int_con(-1) != 0) {
+    return false;
+  }
+  if (in1->Opcode() != Op_AndI) {
+    return false;
+  }
+  in2 = in1->in(2);
+  if (in2->find_int_con(-1) != mask) {
+    return false;
+  }
+  in1 = in1->in(1);
+
+  return is_gc_state_load(in1);
+}
+
+bool ShenandoahWriteBarrierNode::is_heap_stable_test(Node* iff) {
+  return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
+}
+
+bool ShenandoahWriteBarrierNode::is_gc_state_load(Node *n) {
+  if (!UseShenandoahGC) {
+    return false;
+  }
+  if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
+    return false;
+  }
+  Node* addp = n->in(MemNode::Address);
+  if (!addp->is_AddP()) {
+    return false;
+  }
+  Node* base = addp->in(AddPNode::Address);
+  Node* off = addp->in(AddPNode::Offset);
+  if (base->Opcode() != Op_ThreadLocal) {
+    return false;
+  }
+  if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
+    return false;
+  }
+  return true;
+}
+
+bool ShenandoahWriteBarrierNode::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
+  assert(phase->is_dominator(stop, start), "bad inputs");
+  ResourceMark rm;
+  Unique_Node_List wq;
+  wq.push(start);
+  for (uint next = 0; next < wq.size(); next++) {
+    Node *m = wq.at(next);
+    if (m == stop) {
+      continue;
+    }
+    if (m->is_SafePoint() && !m->is_CallLeaf()) {
+      return true;
+    }
+    if (m->is_Region()) {
+      for (uint i = 1; i < m->req(); i++) {
+        wq.push(m->in(i));
+      }
+    } else {
+      wq.push(m->in(0));
+    }
+  }
+  return false;
+}
+
+bool ShenandoahWriteBarrierNode::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
+  assert(is_gc_state_load(n), "inconsistent");
+  Node* addp = n->in(MemNode::Address);
+  Node* dominator = NULL;
+  for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
+    Node* u = addp->fast_out(i);
+    assert(is_gc_state_load(u), "inconsistent");
+    if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
+      if (dominator == NULL) {
+        dominator = u;
+      } else {
+        if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
+          dominator = u;
+        }
+      }
+    }
+  }
+  if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
+    return false;
+  }
+  phase->igvn().replace_node(n, dominator);
+
+  return true;
+}
+
+bool ShenandoahBarrierNode::dominates_memory_impl(PhaseGVN* phase,
+                                                  Node* b1,
+                                                  Node* b2,
+                                                  Node* current,
+                                                  bool linear) {
+  ResourceMark rm;
+  VectorSet visited(Thread::current()->resource_area());
+  Node_Stack phis(0);
+
+  for(int i = 0; i < 10; i++) {
+    if (current == NULL) {
+      return false;
+    } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) {
+      current = NULL;
+      while (phis.is_nonempty() && current == NULL) {
+        uint idx = phis.index();
+        Node* phi = phis.node();
+        if (idx >= phi->req()) {
+          phis.pop();
+        } else {
+          current = phi->in(idx);
+          phis.set_index(idx+1);
+        }
+      }
+      if (current == NULL) {
+        return true;
+      }
+    } else if (current == b2) {
+      return false;
+    } else if (current == phase->C->immutable_memory()) {
+      return false;
+    } else if (current->isa_Phi()) {
+      if (!linear) {
+        return false;
+      }
+      phis.push(current, 2);
+      current = current->in(1);
+    } else if (current->Opcode() == Op_ShenandoahWriteBarrier) {
+      current = current->in(Memory);
+    } else if (current->Opcode() == Op_ShenandoahWBMemProj) {
+      current = current->in(ShenandoahWBMemProjNode::WriteBarrier);
+    } else if (current->is_Proj()) {
+      current = current->in(0);
+    } else if (current->is_Call()) {
+      current = current->in(TypeFunc::Memory);
+    } else if (current->is_MemBar()) {
+      current = current->in(TypeFunc::Memory);
+    } else if (current->is_MergeMem()) {
+      const TypePtr* adr_type = brooks_pointer_type(phase->type(b2));
+      uint alias_idx = phase->C->get_alias_index(adr_type);
+      current = current->as_MergeMem()->memory_at(alias_idx);
+    } else {
+#ifdef ASSERT
+      current->dump();
+#endif
+      ShouldNotReachHere();
+      return false;
+    }
+  }
+  return false;
+}
+
+/**
+ * Determines if b1 dominates b2 through memory inputs. It returns true if:
+ * - b1 can be reached by following each branch in b2's memory input (through phis, etc)
+ * - or we get back to b2 (i.e. through a loop) without seeing b1
+ * In all other cases, (in particular, if we reach immutable_memory without having seen b1)
+ * we return false.
+ */
+bool ShenandoahBarrierNode::dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear) {
+  return dominates_memory_impl(phase, b1, b2, b2->in(Memory), linear);
+}
+
+Node* ShenandoahBarrierNode::Identity_impl(PhaseGVN* phase) {
+  Node* n = in(ValueIn);
+
+  Node* rb_mem = Opcode() == Op_ShenandoahReadBarrier ? in(Memory) : NULL;
+  if (! needs_barrier(phase, this, n, rb_mem, _allow_fromspace)) {
+    return n;
+  }
+
+  // Try to find a write barrier sibling with identical inputs that we can fold into.
+  for (DUIterator i = n->outs(); n->has_out(i); i++) {
+    Node* sibling = n->out(i);
+    if (sibling == this) {
+      continue;
+    }
+    if (sibling->Opcode() != Op_ShenandoahWriteBarrier) {
+      continue;
+    }
+
+    assert(sibling->in(ValueIn) == in(ValueIn), "sanity");
+    assert(sibling->Opcode() == Op_ShenandoahWriteBarrier, "sanity");
+
+    if (dominates_memory(phase, sibling, this, phase->is_IterGVN() == NULL)) {
+      return sibling;
+    }
+  }
+  return this;
+}
+
+#ifndef PRODUCT
+void ShenandoahBarrierNode::dump_spec(outputStream *st) const {
+  const TypePtr* adr = adr_type();
+  if (adr == NULL) {
+    return;
+  }
+  st->print(" @");
+  adr->dump_on(st);
+  st->print(" (");
+  Compile::current()->alias_type(adr)->adr_type()->dump_on(st);
+  st->print(") ");
+}
+#endif
+
+Node* ShenandoahReadBarrierNode::Identity(PhaseGVN* phase) {
+  Node* id = Identity_impl(phase);
+
+  if (id == this && phase->is_IterGVN()) {
+    Node* n = in(ValueIn);
+    // No success in super call. Try to combine identical read barriers.
+    for (DUIterator i = n->outs(); n->has_out(i); i++) {
+      Node* sibling = n->out(i);
+      if (sibling == this || sibling->Opcode() != Op_ShenandoahReadBarrier) {
+        continue;
+      }
+      assert(sibling->in(ValueIn)  == in(ValueIn), "sanity");
+      if (phase->is_IterGVN()->hash_find(sibling) &&
+          sibling->bottom_type() == bottom_type() &&
+          sibling->in(Control) == in(Control) &&
+          dominates_memory_rb(phase, sibling, this, phase->is_IterGVN() == NULL)) {
+        return sibling;
+      }
+    }
+  }
+  return id;
+}
+
+const Type* ShenandoahBarrierNode::Value(PhaseGVN* phase) const {
+  // Either input is TOP ==> the result is TOP
+  const Type *t1 = phase->type(in(Memory));
+  if (t1 == Type::TOP) return Type::TOP;
+  const Type *t2 = phase->type(in(ValueIn));
+  if( t2 == Type::TOP ) return Type::TOP;
+
+  if (t2 == TypePtr::NULL_PTR) {
+    return _type;
+  }
+
+  const Type* type = t2->is_oopptr()->cast_to_nonconst();
+  return type;
+}
+
+uint ShenandoahBarrierNode::hash() const {
+  return TypeNode::hash() + _allow_fromspace;
+}
+
+uint ShenandoahBarrierNode::cmp(const Node& n) const {
+  return _allow_fromspace == ((ShenandoahBarrierNode&) n)._allow_fromspace
+    && TypeNode::cmp(n);
+}
+
+uint ShenandoahBarrierNode::size_of() const {
+  return sizeof(*this);
+}
+
+Node* ShenandoahWBMemProjNode::Identity(PhaseGVN* phase) {
+  Node* wb = in(WriteBarrier);
+  if (wb->is_top()) return phase->C->top(); // Dead path.
+
+  assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier");
+  PhaseIterGVN* igvn = phase->is_IterGVN();
+  // We can't do the below unless the graph is fully constructed.
+  if (igvn == NULL) {
+    return this;
+  }
+
+  // If the mem projection has no barrier users, it's not needed anymore.
+  if (wb->outcnt() == 1) {
+    return wb->in(ShenandoahBarrierNode::Memory);
+  }
+
+  return this;
+}
+
+#ifdef ASSERT
+bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
+  assert(phis.size() == 0, "");
+
+  while (true) {
+    if (in->bottom_type() == TypePtr::NULL_PTR) {
+      if (trace) {tty->print_cr("NULL");}
+    } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
+      if (trace) {tty->print_cr("Non oop");}
+    } else if (t == ShenandoahLoad && ShenandoahOptimizeStableFinals &&
+               in->bottom_type()->make_ptr()->isa_aryptr() &&
+               in->bottom_type()->make_ptr()->is_aryptr()->is_stable()) {
+      if (trace) {tty->print_cr("Stable array load");}
+    } else {
+      if (in->is_ConstraintCast()) {
+        in = in->in(1);
+        continue;
+      } else if (in->is_AddP()) {
+        assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
+        in = in->in(AddPNode::Address);
+        continue;
+      } else if (in->is_Con()) {
+        if (trace) {tty->print("Found constant"); in->dump();}
+      } else if (in->is_ShenandoahBarrier()) {
+        if (t == ShenandoahOopStore) {
+          if (in->Opcode() != Op_ShenandoahWriteBarrier) {
+            return false;
+          }
+          uint i = 0;
+          for (; i < phis.size(); i++) {
+            Node* n = phis.node_at(i);
+            if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
+              break;
+            }
+          }
+          if (i == phis.size()) {
+            return false;
+          }
+        } else if (t == ShenandoahStore && in->Opcode() != Op_ShenandoahWriteBarrier) {
+          return false;
+        }
+        barriers_used.push(in);
+        if (trace) {tty->print("Found barrier"); in->dump();}
+      } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
+        if (t != ShenandoahOopStore) {
+          in = in->in(1);
+          continue;
+        }
+        if (trace) {tty->print("Found enqueue barrier"); in->dump();}
+        phis.push(in, in->req());
+        in = in->in(1);
+        continue;
+      } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
+        if (trace) {tty->print("Found alloc"); in->in(0)->dump();}
+      } else if (in->is_Phi()) {
+        if (!visited.test_set(in->_idx)) {
+          if (trace) {tty->print("Pushed phi:"); in->dump();}
+          phis.push(in, 2);
+          in = in->in(1);
+          continue;
+        }
+        if (trace) {tty->print("Already seen phi:"); in->dump();}
+      } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
+        if (!visited.test_set(in->_idx)) {
+          if (trace) {tty->print("Pushed cmovep:"); in->dump();}
+          phis.push(in, CMoveNode::IfTrue);
+          in = in->in(CMoveNode::IfFalse);
+          continue;
+        }
+        if (trace) {tty->print("Already seen cmovep:"); in->dump();}
+      } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
+        in = in->in(1);
+        continue;
+      } else {
+        return false;
+      }
+    }
+    bool cont = false;
+    while (phis.is_nonempty()) {
+      uint idx = phis.index();
+      Node* phi = phis.node();
+      if (idx >= phi->req()) {
+        if (trace) {tty->print("Popped phi:"); phi->dump();}
+        phis.pop();
+        continue;
+      }
+      if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
+      in = phi->in(idx);
+      phis.set_index(idx+1);
+      cont = true;
+      break;
+    }
+    if (!cont) {
+      break;
+    }
+  }
+  return true;
+}
+
+void ShenandoahBarrierNode::report_verify_failure(const char *msg, Node *n1, Node *n2) {
+  if (n1 != NULL) {
+    n1->dump(+10);
+  }
+  if (n2 != NULL) {
+    n2->dump(+10);
+  }
+  fatal("%s", msg);
+}
+
+void ShenandoahBarrierNode::verify(RootNode* root) {
+  ResourceMark rm;
+  Unique_Node_List wq;
+  GrowableArray<Node*> barriers;
+  Unique_Node_List barriers_used;
+  Node_Stack phis(0);
+  VectorSet visited(Thread::current()->resource_area());
+  const bool trace = false;
+  const bool verify_no_useless_barrier = false;
+
+  wq.push(root);
+  for (uint next = 0; next < wq.size(); next++) {
+    Node *n = wq.at(next);
+    if (n->is_Load()) {
+      const bool trace = false;
+      if (trace) {tty->print("Verifying"); n->dump();}
+      if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
+        if (trace) {tty->print_cr("Load range/klass");}
+      } else {
+        const TypePtr* adr_type = n->as_Load()->adr_type();
+
+        if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
+          if (trace) {tty->print_cr("Mark load");}
+        } else if (adr_type->isa_instptr() &&
+                   adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
+                   adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
+          if (trace) {tty->print_cr("Reference.get()");}
+        } else {
+          bool verify = true;
+          if (adr_type->isa_instptr()) {
+            const TypeInstPtr* tinst = adr_type->is_instptr();
+            ciKlass* k = tinst->klass();
+            assert(k->is_instance_klass(), "");
+            ciInstanceKlass* ik = (ciInstanceKlass*)k;
+            int offset = adr_type->offset();
+
+            if ((ik->debug_final_field_at(offset) && ShenandoahOptimizeInstanceFinals) ||
+                (ik->debug_stable_field_at(offset) && ShenandoahOptimizeStableFinals)) {
+              if (trace) {tty->print_cr("Final/stable");}
+              verify = false;
+            } else if (k == ciEnv::current()->Class_klass() &&
+                       tinst->const_oop() != NULL &&
+                       tinst->offset() >= (ik->size_helper() * wordSize)) {
+              ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
+              ciField* field = k->get_field_by_offset(tinst->offset(), true);
+              if ((ShenandoahOptimizeStaticFinals && field->is_final()) ||
+                  (ShenandoahOptimizeStableFinals && field->is_stable())) {
+                verify = false;
+              }
+            }
+          }
+
+          if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
+            report_verify_failure("Shenandoah verification: Load should have barriers", n);
+          }
+        }
+      }
+    } else if (n->is_Store()) {
+      const bool trace = false;
+
+      if (trace) {tty->print("Verifying"); n->dump();}
+      if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
+        Node* adr = n->in(MemNode::Address);
+        bool verify = true;
+
+        if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
+          adr = adr->in(AddPNode::Address);
+          if (adr->is_AddP()) {
+            assert(adr->in(AddPNode::Base)->is_top(), "");
+            adr = adr->in(AddPNode::Address);
+            if (adr->Opcode() == Op_LoadP &&
+                adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
+                adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
+                adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
+              if (trace) {tty->print_cr("SATB prebarrier");}
+              verify = false;
+            }
+          }
+        }
+
+        if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
+          report_verify_failure("Shenandoah verification: Store should have barriers", n);
+        }
+      }
+      if (!ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
+        report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
+      }
+    } else if (n->Opcode() == Op_CmpP) {
+      const bool trace = false;
+
+      Node* in1 = n->in(1);
+      Node* in2 = n->in(2);
+      if (in1->bottom_type()->isa_oopptr()) {
+        if (trace) {tty->print("Verifying"); n->dump();}
+
+        bool mark_inputs = false;
+        if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
+            (in1->is_Con() || in2->is_Con())) {
+          if (trace) {tty->print_cr("Comparison against a constant");}
+          mark_inputs = true;
+        } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
+                   (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
+          if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
+          mark_inputs = true;
+        } else {
+          assert(in2->bottom_type()->isa_oopptr(), "");
+
+          if (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
+              !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
+            report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
+          }
+        }
+        if (verify_no_useless_barrier &&
+            mark_inputs &&
+            (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
+             !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
+          phis.clear();
+          visited.Reset();
+        }
+      }
+    } else if (n->is_LoadStore()) {
+      if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
+          !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
+        report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
+      }
+
+      if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
+        report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
+      }
+    } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
+      CallNode* call = n->as_Call();
+
+      static struct {
+        const char* name;
+        struct {
+          int pos;
+          verify_type t;
+        } args[6];
+      } calls[] = {
+        "aescrypt_encryptBlock",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "aescrypt_decryptBlock",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "multiplyToLen",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "squareToLen",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "montgomery_multiply",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
+          { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "montgomery_square",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "mulAdd",
+        { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "vectorizedMismatch",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "updateBytesCRC32",
+        { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "updateBytesAdler32",
+        { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "updateBytesCRC32C",
+        { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "counterMode_AESCrypt",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
+          { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
+        "cipherBlockChaining_encryptAESCrypt",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
+          { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "cipherBlockChaining_decryptAESCrypt",
+        { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
+          { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "shenandoah_clone_barrier",
+        { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "ghash_processBlocks",
+        { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "sha1_implCompress",
+        { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "sha256_implCompress",
+        { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "sha512_implCompress",
+        { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "sha1_implCompressMB",
+        { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "sha256_implCompressMB",
+        { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+        "sha512_implCompressMB",
+        { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
+          { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
+      };
+
+      if (call->is_call_to_arraycopystub()) {
+        Node* dest = NULL;
+        const TypeTuple* args = n->as_Call()->_tf->domain();
+        for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
+          if (args->field_at(i)->isa_ptr()) {
+            j++;
+            if (j == 2) {
+              dest = n->in(i);
+              break;
+            }
+          }
+        }
+        if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
+            !ShenandoahBarrierNode::verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
+          report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
+        }
+      } else if (strlen(call->_name) > 5 &&
+                 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
+        if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
+          report_verify_failure("Shenandoah verification: _fill should have barriers", n);
+        }
+      } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
+        // skip
+      } else {
+        const int calls_len = sizeof(calls) / sizeof(calls[0]);
+        int i = 0;
+        for (; i < calls_len; i++) {
+          if (!strcmp(calls[i].name, call->_name)) {
+            break;
+          }
+        }
+        if (i != calls_len) {
+          const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
+          for (uint j = 0; j < args_len; j++) {
+            int pos = calls[i].args[j].pos;
+            if (pos == -1) {
+              break;
+            }
+            if (!ShenandoahBarrierNode::verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
+              report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
+            }
+          }
+          for (uint j = TypeFunc::Parms; j < call->req(); j++) {
+            if (call->in(j)->bottom_type()->make_ptr() &&
+                call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
+              uint k = 0;
+              for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
+              if (k == args_len) {
+                fatal("arg %d for call %s not covered", j, call->_name);
+              }
+            }
+          }
+        } else {
+          for (uint j = TypeFunc::Parms; j < call->req(); j++) {
+            if (call->in(j)->bottom_type()->make_ptr() &&
+                call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
+              fatal("%s not covered", call->_name);
+            }
+          }
+        }
+      }
+    } else if (n->is_ShenandoahBarrier()) {
+      assert(!barriers.contains(n), "");
+      assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->find_out_with(Op_ShenandoahWBMemProj) != NULL, "bad shenandoah write barrier");
+      assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->outcnt() > 1, "bad shenandoah write barrier");
+      barriers.push(n);
+    } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
+      // skip
+    } else if (n->Opcode() == Op_ShenandoahWBMemProj) {
+      assert(n->in(0) == NULL && n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier, "strange ShenandoahWBMemProj");
+    } else if (n->is_AddP()
+               || n->is_Phi()
+               || n->is_ConstraintCast()
+               || n->Opcode() == Op_Return
+               || n->Opcode() == Op_CMoveP
+               || n->Opcode() == Op_CMoveN
+               || n->Opcode() == Op_Rethrow
+               || n->is_MemBar()
+               || n->Opcode() == Op_Conv2B
+               || n->Opcode() == Op_SafePoint
+               || n->is_CallJava()
+               || n->Opcode() == Op_Unlock
+               || n->Opcode() == Op_EncodeP
+               || n->Opcode() == Op_DecodeN) {
+      // nothing to do
+    } else {
+      static struct {
+        int opcode;
+        struct {
+          int pos;
+          verify_type t;
+        } inputs[2];
+      } others[] = {
+        Op_FastLock,
+        { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
+        Op_Lock,
+        { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
+        Op_ArrayCopy,
+        { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
+        Op_StrCompressedCopy,
+        { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
+        Op_StrInflatedCopy,
+        { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
+        Op_AryEq,
+        { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
+        Op_StrIndexOf,
+        { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
+        Op_StrComp,
+        { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
+        Op_StrEquals,
+        { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
+        Op_EncodeISOArray,
+        { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
+        Op_HasNegatives,
+        { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
+        Op_CastP2X,
+        { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
+        Op_StrIndexOfChar,
+        { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
+      };
+
+      const int others_len = sizeof(others) / sizeof(others[0]);
+      int i = 0;
+      for (; i < others_len; i++) {
+        if (others[i].opcode == n->Opcode()) {
+          break;
+        }
+      }
+      uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
+      if (i != others_len) {
+        const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
+        for (uint j = 0; j < inputs_len; j++) {
+          int pos = others[i].inputs[j].pos;
+          if (pos == -1) {
+            break;
+          }
+          if (!ShenandoahBarrierNode::verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
+            report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
+          }
+        }
+        for (uint j = 1; j < stop; j++) {
+          if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
+              n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
+            uint k = 0;
+            for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
+            if (k == inputs_len) {
+              fatal("arg %d for node %s not covered", j, n->Name());
+            }
+          }
+        }
+      } else {
+        for (uint j = 1; j < stop; j++) {
+          if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
+              n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
+            fatal("%s not covered", n->Name());
+          }
+        }
+      }
+    }
+
+    if (n->is_SafePoint()) {
+      SafePointNode* sfpt = n->as_SafePoint();
+      if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
+        for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
+          if (!ShenandoahBarrierNode::verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
+            phis.clear();
+            visited.Reset();
+          }
+        }
+      }
+    }
+    for( uint i = 0; i < n->len(); ++i ) {
+      Node *m = n->in(i);
+      if (m == NULL) continue;
+
+      // In most cases, inputs should be known to be non null. If it's
+      // not the case, it could be a missing cast_not_null() in an
+      // intrinsic or support might be needed in AddPNode::Ideal() to
+      // avoid a NULL+offset input.
+      if (!(n->is_Phi() ||
+            (n->is_SafePoint() && (!n->is_CallRuntime() || !strcmp(n->as_Call()->_name, "shenandoah_wb_pre") || !strcmp(n->as_Call()->_name, "unsafe_arraycopy"))) ||
+            n->Opcode() == Op_CmpP ||
+            n->Opcode() == Op_CmpN ||
+            (n->Opcode() == Op_StoreP && i == StoreNode::ValueIn) ||
+            (n->Opcode() == Op_StoreN && i == StoreNode::ValueIn) ||
+            n->is_ConstraintCast() ||
+            n->Opcode() == Op_Return ||
+            n->Opcode() == Op_Conv2B ||
+            n->is_AddP() ||
+            n->Opcode() == Op_CMoveP ||
+            n->Opcode() == Op_CMoveN ||
+            n->Opcode() == Op_Rethrow ||
+            n->is_MemBar() ||
+            n->is_Mem() ||
+            n->Opcode() == Op_AryEq ||
+            n->Opcode() == Op_SCMemProj ||
+            n->Opcode() == Op_EncodeP ||
+            n->Opcode() == Op_DecodeN ||
+            n->Opcode() == Op_ShenandoahWriteBarrier ||
+            n->Opcode() == Op_ShenandoahWBMemProj ||
+            n->Opcode() == Op_ShenandoahEnqueueBarrier)) {
+        if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) {
+          report_verify_failure("Shenandoah verification: null input", n, m);
+        }
+      }
+
+      wq.push(m);
+    }
+  }
+
+  if (verify_no_useless_barrier) {
+    for (int i = 0; i < barriers.length(); i++) {
+      Node* n = barriers.at(i);
+      if (!barriers_used.member(n)) {
+        tty->print("XXX useless barrier"); n->dump(-2);
+        ShouldNotReachHere();
+      }
+    }
+  }
+}
+#endif
+
+bool ShenandoahBarrierNode::is_dominator_same_ctrl(Node*c, Node* d, Node* n, PhaseIdealLoop* phase) {
+  // That both nodes have the same control is not sufficient to prove
+  // domination, verify that there's no path from d to n
+  ResourceMark rm;
+  Unique_Node_List wq;
+  wq.push(d);
+  for (uint next = 0; next < wq.size(); next++) {
+    Node *m = wq.at(next);
+    if (m == n) {
+      return false;
+    }
+    if (m->is_Phi() && m->in(0)->is_Loop()) {
+      assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
+    } else {
+      for (uint i = 0; i < m->req(); i++) {
+        if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
+          wq.push(m->in(i));
+        }
+      }
+    }
+  }
+  return true;
+}
+
+bool ShenandoahBarrierNode::is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
+  if (d_c != n_c) {
+    return phase->is_dominator(d_c, n_c);
+  }
+  return is_dominator_same_ctrl(d_c, d, n, phase);
+}
+
+Node* next_mem(Node* mem, int alias) {
+  Node* res = NULL;
+  if (mem->is_Proj()) {
+    res = mem->in(0);
+  } else if (mem->is_SafePoint() || mem->is_MemBar()) {
+    res = mem->in(TypeFunc::Memory);
+  } else if (mem->is_Phi()) {
+    res = mem->in(1);
+  } else if (mem->is_ShenandoahBarrier()) {
+    res = mem->in(ShenandoahBarrierNode::Memory);
+  } else if (mem->is_MergeMem()) {
+    res = mem->as_MergeMem()->memory_at(alias);
+  } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
+    assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
+    res = mem->in(MemNode::Memory);
+  } else if (mem->Opcode() == Op_ShenandoahWBMemProj) {
+    res = mem->in(ShenandoahWBMemProjNode::WriteBarrier);
+  } else {
+#ifdef ASSERT
+    mem->dump();
+#endif
+    ShouldNotReachHere();
+  }
+  return res;
+}
+
+Node* ShenandoahBarrierNode::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
+  Node* iffproj = NULL;
+  while (c != dom) {
+    Node* next = phase->idom(c);
+    assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
+    if (c->is_Region()) {
+      ResourceMark rm;
+      Unique_Node_List wq;
+      wq.push(c);
+      for (uint i = 0; i < wq.size(); i++) {
+        Node *n = wq.at(i);
+        if (n == next) {
+          continue;
+        }
+        if (n->is_Region()) {
+          for (uint j = 1; j < n->req(); j++) {
+            wq.push(n->in(j));
+          }
+        } else {
+          wq.push(n->in(0));
+        }
+      }
+      for (uint i = 0; i < wq.size(); i++) {
+        Node *n = wq.at(i);
+        assert(n->is_CFG(), "");
+        if (n->is_Multi()) {
+          for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
+            Node* u = n->fast_out(j);
+            if (u->is_CFG()) {
+              if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
+                return NodeSentinel;
+              }
+            }
+          }
+        }
+      }
+    } else  if (c->is_Proj()) {
+      if (c->is_IfProj()) {
+        if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
+          // continue;
+        } else {
+          if (!allow_one_proj) {
+            return NodeSentinel;
+          }
+          if (iffproj == NULL) {
+            iffproj = c;
+          } else {
+            return NodeSentinel;
+          }
+        }
+      } else if (c->Opcode() == Op_JumpProj) {
+        return NodeSentinel; // unsupported
+      } else if (c->Opcode() == Op_CatchProj) {
+        return NodeSentinel; // unsupported
+      } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
+        return NodeSentinel; // unsupported
+      } else {
+        assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
+      }
+    }
+    c = next;
+  }
+  return iffproj;
+}
+
+bool ShenandoahBarrierNode::build_loop_late_post(PhaseIdealLoop* phase, Node* n) {
+  if (n->Opcode() == Op_ShenandoahReadBarrier ||
+      n->Opcode() == Op_ShenandoahWriteBarrier ||
+      n->Opcode() == Op_ShenandoahWBMemProj) {
+
+    phase->build_loop_late_post_work(n, false);
+
+    if (n->Opcode() == Op_ShenandoahWriteBarrier) {
+      // The write barrier and its memory proj must have the same
+      // control otherwise some loop opts could put nodes (Phis) between
+      // them
+      Node* proj = n->find_out_with(Op_ShenandoahWBMemProj);
+      if (proj != NULL) {
+        phase->set_ctrl_and_loop(proj, phase->get_ctrl(n));
+      }
+    }
+    return true;
+  }
+  return false;
+}
+
+bool ShenandoahBarrierNode::sink_node(PhaseIdealLoop* phase, Node* ctrl, Node* n_ctrl) {
+  ctrl = phase->find_non_split_ctrl(ctrl);
+  assert(phase->dom_depth(n_ctrl) <= phase->dom_depth(ctrl), "n is later than its clone");
+  set_req(0, ctrl);
+  phase->register_new_node(this, ctrl);
+  return true;
+}
+
+#ifdef ASSERT
+void ShenandoahWriteBarrierNode::memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase) {
+  const bool trace = false;
+  if (trace) { tty->print("X control is"); c->dump(); }
+
+  uint start = controls.size();
+  controls.push(c);
+  for (uint i = start; i < controls.size(); i++) {
+    Node *n = controls.at(i);
+
+    if (trace) { tty->print("X from"); n->dump(); }
+
+    if (n == rep_ctrl) {
+      continue;
+    }
+
+    if (n->is_Proj()) {
+      Node* n_dom = n->in(0);
+      IdealLoopTree* n_dom_loop = phase->get_loop(n_dom);
+      if (n->is_IfProj() && n_dom->outcnt() == 2) {
+        n_dom_loop = phase->get_loop(n_dom->as_If()->proj_out(n->as_Proj()->_con == 0 ? 1 : 0));
+      }
+      if (n_dom_loop != phase->ltree_root()) {
+        Node* tail = n_dom_loop->tail();
+        if (tail->is_Region()) {
+          for (uint j = 1; j < tail->req(); j++) {
+            if (phase->is_dominator(n_dom, tail->in(j)) && !phase->is_dominator(n, tail->in(j))) {
+              assert(phase->is_dominator(rep_ctrl, tail->in(j)), "why are we here?");
+              // entering loop from below, mark backedge
+              if (trace) { tty->print("X pushing backedge"); tail->in(j)->dump(); }
+              controls.push(tail->in(j));
+              //assert(n->in(0) == n_dom, "strange flow control");
+            }
+          }
+        } else if (phase->get_loop(n) != n_dom_loop && phase->is_dominator(n_dom, tail)) {
+          // entering loop from below, mark backedge
+          if (trace) { tty->print("X pushing backedge"); tail->dump(); }
+          controls.push(tail);
+          //assert(n->in(0) == n_dom, "strange flow control");
+        }
+      }
+    }
+
+    if (n->is_Loop()) {
+      Node* c = n->in(LoopNode::EntryControl);
+      if (trace) { tty->print("X pushing"); c->dump(); }
+      controls.push(c);
+    } else if (n->is_Region()) {
+      for (uint i = 1; i < n->req(); i++) {
+        Node* c = n->in(i);
+        if (trace) { tty->print("X pushing"); c->dump(); }
+        controls.push(c);
+      }
+    } else {
+      Node* c = n->in(0);
+      if (trace) { tty->print("X pushing"); c->dump(); }
+      controls.push(c);
+    }
+  }
+}
+
+bool ShenandoahWriteBarrierNode::memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) {
+  const bool trace = false;
+  if (trace) {
+    tty->print("XXX mem is"); mem->dump();
+    tty->print("XXX rep ctrl is"); rep_ctrl->dump();
+    tty->print_cr("XXX alias is %d", alias);
+  }
+  ResourceMark rm;
+  Unique_Node_List wq;
+  Unique_Node_List controls;
+  wq.push(mem);
+  for (uint next = 0; next < wq.size(); next++) {
+    Node *nn = wq.at(next);
+    if (trace) { tty->print("XX from mem"); nn->dump(); }
+    assert(nn->bottom_type() == Type::MEMORY, "memory only");
+
+    if (nn->is_Phi()) {
+      Node* r = nn->in(0);
+      for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
+        Node* u = r->fast_out(j);
+        if (u->is_Phi() && u->bottom_type() == Type::MEMORY && u != nn &&
+            (u->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(u->adr_type()) == alias)) {
+          if (trace) { tty->print("XX Next mem (other phi)"); u->dump(); }
+          wq.push(u);
+        }
+      }
+    }
+
+    for (DUIterator_Fast imax, i = nn->fast_outs(imax); i < imax; i++) {
+      Node* use = nn->fast_out(i);
+
+      if (trace) { tty->print("XX use %p", use->adr_type()); use->dump(); }
+      if (use->is_CFG() && use->in(TypeFunc::Memory) == nn) {
+        Node* c = use->in(0);
+        if (phase->is_dominator(rep_ctrl, c)) {
+          memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase);
+        } else if (use->is_CallStaticJava() && use->as_CallStaticJava()->uncommon_trap_request() != 0 && c->is_Region()) {
+          Node* region = c;
+          if (trace) { tty->print("XX unc region"); region->dump(); }
+          for (uint j = 1; j < region->req(); j++) {
+            if (phase->is_dominator(rep_ctrl, region->in(j))) {
+              if (trace) { tty->print("XX unc follows"); region->in(j)->dump(); }
+              memory_dominates_all_paths_helper(region->in(j), rep_ctrl, controls, phase);
+            }
+          }
+        }
+        //continue;
+      } else if (use->is_Phi()) {
+        assert(use->bottom_type() == Type::MEMORY, "bad phi");
+        if ((use->adr_type() == TypePtr::BOTTOM) ||
+            phase->C->get_alias_index(use->adr_type()) == alias) {
+          for (uint j = 1; j < use->req(); j++) {
+            if (use->in(j) == nn) {
+              Node* c = use->in(0)->in(j);
+              if (phase->is_dominator(rep_ctrl, c)) {
+                memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase);
+              }
+            }
+          }
+        }
+        //        continue;
+      }
+
+      if (use->is_MergeMem()) {
+        if (use->as_MergeMem()->memory_at(alias) == nn) {
+          if (trace) { tty->print("XX Next mem"); use->dump(); }
+          // follow the memory edges
+          wq.push(use);
+        }
+      } else if (use->is_Phi()) {
+        assert(use->bottom_type() == Type::MEMORY, "bad phi");
+        if ((use->adr_type() == TypePtr::BOTTOM) ||
+            phase->C->get_alias_index(use->adr_type()) == alias) {
+          if (trace) { tty->print("XX Next mem"); use->dump(); }
+          // follow the memory edges
+          wq.push(use);
+        }
+      } else if (use->bottom_type() == Type::MEMORY &&
+                 (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) {
+        if (trace) { tty->print("XX Next mem"); use->dump(); }
+        // follow the memory edges
+        wq.push(use);
+      } else if ((use->is_SafePoint() || use->is_MemBar()) &&
+                 (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) {
+        for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
+          Node* u = use->fast_out(j);
+          if (u->bottom_type() == Type::MEMORY) {
+            if (trace) { tty->print("XX Next mem"); u->dump(); }
+            // follow the memory edges
+            wq.push(u);
+          }
+        }
+      } else if (use->Opcode() == Op_ShenandoahWriteBarrier && phase->C->get_alias_index(use->adr_type()) == alias) {
+        Node* m = use->find_out_with(Op_ShenandoahWBMemProj);
+        if (m != NULL) {
+          if (trace) { tty->print("XX Next mem"); m->dump(); }
+          // follow the memory edges
+          wq.push(m);
+        }
+      }
+    }
+  }
+
+  if (controls.size() == 0) {
+    return false;
+  }
+
+  for (uint i = 0; i < controls.size(); i++) {
+    Node *n = controls.at(i);
+
+    if (trace) { tty->print("X checking"); n->dump(); }
+
+    if (n->unique_ctrl_out() != NULL) {
+      continue;
+    }
+
+    if (n->Opcode() == Op_NeverBranch) {
+      Node* taken = n->as_Multi()->proj_out(0);
+      if (!controls.member(taken)) {
+        if (trace) { tty->print("X not seen"); taken->dump(); }
+        return false;
+      }
+      continue;
+    }
+
+    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
+      Node* u = n->fast_out(j);
+
+      if (u->is_CFG()) {
+        if (!controls.member(u)) {
+          if (u->is_Proj() && u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
+            if (trace) { tty->print("X not seen but unc"); u->dump(); }
+          } else {
+            Node* c = u;
+            do {
+              c = c->unique_ctrl_out();
+            } while (c != NULL && c->is_Region());
+            if (c != NULL && c->Opcode() == Op_Halt) {
+              if (trace) { tty->print("X not seen but halt"); c->dump(); }
+            } else {
+              if (trace) { tty->print("X not seen"); u->dump(); }
+              return false;
+            }
+          }
+        } else {
+          if (trace) { tty->print("X seen"); u->dump(); }
+        }
+      }
+    }
+  }
+  return true;
+}
+#endif
+
+Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) {
+  ResourceMark rm;
+  VectorSet wq(Thread::current()->resource_area());
+  wq.set(mem->_idx);
+  mem_ctrl = phase->get_ctrl(mem);
+  while (!is_dominator(mem_ctrl, rep_ctrl, mem, n, phase)) {
+    mem = next_mem(mem, alias);
+    if (wq.test_set(mem->_idx)) {
+      return NULL; // hit an unexpected loop
+    }
+    mem_ctrl = phase->ctrl_or_self(mem);
+  }
+  if (mem->is_MergeMem()) {
+    mem = mem->as_MergeMem()->memory_at(alias);
+    mem_ctrl = phase->ctrl_or_self(mem);
+  }
+  return mem;
+}
+
+Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
+  ResourceMark rm;
+  VectorSet wq(Thread::current()->resource_area());
+  wq.set(mem->_idx);
+  mem_ctrl = phase->ctrl_or_self(mem);
+  while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
+    mem = next_mem(mem, alias);
+    if (wq.test_set(mem->_idx)) {
+      return NULL;
+    }
+    mem_ctrl = phase->ctrl_or_self(mem);
+  }
+  if (mem->is_MergeMem()) {
+    mem = mem->as_MergeMem()->memory_at(alias);
+    mem_ctrl = phase->ctrl_or_self(mem);
+  }
+  return mem;
+}
+
+static void disconnect_barrier_mem(Node* wb, PhaseIterGVN& igvn) {
+  Node* mem_in = wb->in(ShenandoahBarrierNode::Memory);
+  Node* proj = wb->find_out_with(Op_ShenandoahWBMemProj);
+
+  for (DUIterator_Last imin, i = proj->last_outs(imin); i >= imin; ) {
+    Node* u = proj->last_out(i);
+    igvn.rehash_node_delayed(u);
+    int nb = u->replace_edge(proj, mem_in);
+    assert(nb > 0, "no replacement?");
+    i -= nb;
+  }
+}
+
+Node* ShenandoahWriteBarrierNode::move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase) {
+  Node* entry = cl->skip_strip_mined(-1)->in(LoopNode::EntryControl);
+  Node* above_pred = phase->skip_all_loop_predicates(entry);
+  Node* ctrl = entry;
+  while (ctrl != above_pred) {
+    Node* next = ctrl->in(0);
+    if (!phase->is_dominator(val_ctrl, next)) {
+      break;
+    }
+    ctrl = next;
+  }
+  return ctrl;
+}
+
+static MemoryGraphFixer* find_fixer(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, int alias) {
+  for (int i = 0; i < memory_graph_fixers.length(); i++) {
+    if (memory_graph_fixers.at(i)->alias() == alias) {
+      return memory_graph_fixers.at(i);
+    }
+  }
+  return NULL;
+}
+
+static MemoryGraphFixer* create_fixer(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, int alias, PhaseIdealLoop* phase, bool include_lsm) {
+  assert(find_fixer(memory_graph_fixers, alias) == NULL, "none should exist yet");
+  MemoryGraphFixer* fixer = new MemoryGraphFixer(alias, include_lsm, phase);
+  memory_graph_fixers.push(fixer);
+  return fixer;
+}
+
+void ShenandoahWriteBarrierNode::try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) {
+  assert(cl->is_Loop(), "bad control");
+  Node* ctrl = move_above_predicates(cl, val_ctrl, phase);
+  Node* mem_ctrl = NULL;
+  int alias = phase->C->get_alias_index(adr_type());
+
+  MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);
+  if (fixer == NULL) {
+    fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm);
+  }
+
+  Node* proj = find_out_with(Op_ShenandoahWBMemProj);
+
+  fixer->remove(proj);
+  Node* mem = fixer->find_mem(ctrl, NULL);
+
+  assert(!ShenandoahVerifyOptoBarriers || memory_dominates_all_paths(mem, ctrl, alias, phase), "can't fix the memory graph");
+
+  phase->set_ctrl_and_loop(this, ctrl);
+  phase->igvn().replace_input_of(this, Control, ctrl);
+
+  disconnect_barrier_mem(this, phase->igvn());
+
+  phase->igvn().replace_input_of(this, Memory, mem);
+  phase->set_ctrl_and_loop(proj, ctrl);
+
+  fixer->fix_mem(ctrl, ctrl, mem, mem, proj, uses);
+  assert(proj->outcnt() > 0, "disconnected write barrier");
+}
+
+LoopNode* ShenandoahWriteBarrierNode::try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase) {
+  // A write barrier between a pre and main loop can get in the way of
+  // vectorization. Move it above the pre loop if possible
+  CountedLoopNode* cl = NULL;
+  if (c->is_IfFalse() &&
+      c->in(0)->is_CountedLoopEnd()) {
+    cl = c->in(0)->as_CountedLoopEnd()->loopnode();
+  } else if (c->is_IfProj() &&
+             c->in(0)->is_If() &&
+             c->in(0)->in(0)->is_IfFalse() &&
+             c->in(0)->in(0)->in(0)->is_CountedLoopEnd()) {
+    cl = c->in(0)->in(0)->in(0)->as_CountedLoopEnd()->loopnode();
+  }
+  if (cl != NULL &&
+      cl->is_pre_loop() &&
+      val_ctrl != cl &&
+      phase->is_dominator(val_ctrl, cl)) {
+    return cl;
+  }
+  return NULL;
+}
+
+void ShenandoahWriteBarrierNode::try_move_before_loop(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) {
+  Node *n_ctrl = phase->get_ctrl(this);
+  IdealLoopTree *n_loop = phase->get_loop(n_ctrl);
+  Node* val = in(ValueIn);
+  Node* val_ctrl = phase->get_ctrl(val);
+  if (n_loop != phase->ltree_root() && !n_loop->_irreducible) {
+    IdealLoopTree *val_loop = phase->get_loop(val_ctrl);
+    Node* mem = in(Memory);
+    IdealLoopTree *mem_loop = phase->get_loop(phase->get_ctrl(mem));
+    if (!n_loop->is_member(val_loop) &&
+        n_loop->is_member(mem_loop)) {
+      Node* n_loop_head = n_loop->_head;
+
+      if (n_loop_head->is_Loop()) {
+        LoopNode* loop = n_loop_head->as_Loop();
+        if (n_loop_head->is_CountedLoop() && n_loop_head->as_CountedLoop()->is_main_loop()) {
+          LoopNode* res = try_move_before_pre_loop(n_loop_head->in(LoopNode::EntryControl), val_ctrl, phase);
+          if (res != NULL) {
+            loop = res;
+          }
+        }
+
+        try_move_before_loop_helper(loop, val_ctrl, memory_graph_fixers, phase, include_lsm, uses);
+      }
+    }
+  }
+  LoopNode* ctrl = try_move_before_pre_loop(in(0), val_ctrl, phase);
+  if (ctrl != NULL) {
+    try_move_before_loop_helper(ctrl, val_ctrl, memory_graph_fixers, phase, include_lsm, uses);
+  }
+}
+
+Node* ShenandoahWriteBarrierNode::would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase) {
+  Node* val = in(ValueIn);
+  Node* val_ctrl = phase->get_ctrl(val);
+  Node* other_mem = other->in(Memory);
+  Node* other_ctrl = phase->get_ctrl(other);
+  Node* this_ctrl = phase->get_ctrl(this);
+  IdealLoopTree* this_loop = phase->get_loop(this_ctrl);
+  IdealLoopTree* other_loop = phase->get_loop(other_ctrl);
+
+  Node* ctrl = phase->dom_lca(other_ctrl, this_ctrl);
+
+  if (ctrl->is_Proj() &&
+      ctrl->in(0)->is_Call() &&
+      ctrl->unique_ctrl_out() != NULL &&
+      ctrl->unique_ctrl_out()->Opcode() == Op_Catch &&
+      !phase->is_dominator(val_ctrl, ctrl->in(0)->in(0))) {
+    return NULL;
+  }
+
+  IdealLoopTree* loop = phase->get_loop(ctrl);
+
+  // We don't want to move a write barrier in a loop
+  // If the LCA is in a inner loop, try a control out of loop if possible
+  while (!loop->is_member(this_loop) && (other->Opcode() != Op_ShenandoahWriteBarrier || !loop->is_member(other_loop))) {
+    ctrl = phase->idom(ctrl);
+    if (ctrl->is_MultiBranch()) {
+      ctrl = ctrl->in(0);
+    }
+    if (ctrl != val_ctrl && phase->is_dominator(ctrl, val_ctrl)) {
+      return NULL;
+    }
+    loop = phase->get_loop(ctrl);
+  }
+
+  if (ShenandoahDontIncreaseWBFreq) {
+    Node* this_iffproj = no_branches(this_ctrl, ctrl, true, phase);
+    if (other->Opcode() == Op_ShenandoahWriteBarrier) {
+      Node* other_iffproj = no_branches(other_ctrl, ctrl, true, phase);
+      if (other_iffproj == NULL || this_iffproj == NULL) {
+        return ctrl;
+      } else if (other_iffproj != NodeSentinel && this_iffproj != NodeSentinel &&
+                 other_iffproj->in(0) == this_iffproj->in(0)) {
+        return ctrl;
+      }
+    } else if (this_iffproj == NULL) {
+      return ctrl;
+    }
+    return NULL;
+  }
+
+  return ctrl;
+}
+
+void ShenandoahWriteBarrierNode::optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*> memory_graph_fixers, bool include_lsm) {
+  bool progress = false;
+  Unique_Node_List uses;
+  do {
+    progress = false;
+    for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) {
+      ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i);
+
+      wb->try_move_before_loop(memory_graph_fixers, phase, include_lsm, uses);
+
+      Node* val = wb->in(ValueIn);
+
+      for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
+        Node* u = val->fast_out(j);
+        if (u != wb && u->is_ShenandoahBarrier()) {
+          Node* rep_ctrl = wb->would_subsume(u->as_ShenandoahBarrier(), phase);
+
+          if (rep_ctrl != NULL) {
+            Node* other = u;
+            Node* val_ctrl = phase->get_ctrl(val);
+            if (rep_ctrl->is_Proj() &&
+                rep_ctrl->in(0)->is_Call() &&
+                rep_ctrl->unique_ctrl_out() != NULL &&
+                rep_ctrl->unique_ctrl_out()->Opcode() == Op_Catch) {
+              rep_ctrl = rep_ctrl->in(0)->in(0);
+
+              assert(phase->is_dominator(val_ctrl, rep_ctrl), "bad control");
+            } else {
+              LoopNode* c = ShenandoahWriteBarrierNode::try_move_before_pre_loop(rep_ctrl, val_ctrl, phase);
+              if (c != NULL) {
+                rep_ctrl = ShenandoahWriteBarrierNode::move_above_predicates(c, val_ctrl, phase);
+              } else {
+                while (rep_ctrl->is_IfProj()) {
+                  CallStaticJavaNode* unc = rep_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
+                  if (unc != NULL) {
+                    int req = unc->uncommon_trap_request();
+                    Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
+                    if ((trap_reason == Deoptimization::Reason_loop_limit_check ||
+                         trap_reason == Deoptimization::Reason_predicate ||
+                         trap_reason == Deoptimization::Reason_profile_predicate) &&
+                        phase->is_dominator(val_ctrl, rep_ctrl->in(0)->in(0))) {
+                      rep_ctrl = rep_ctrl->in(0)->in(0);
+                      continue;
+                    }
+                  }
+                  break;
+                }
+              }
+            }
+
+            Node* wb_ctrl = phase->get_ctrl(wb);
+            Node* other_ctrl = phase->get_ctrl(other);
+            int alias = phase->C->get_alias_index(wb->adr_type());
+            MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);;
+            if (!is_dominator(wb_ctrl, other_ctrl, wb, other, phase)) {
+              if (fixer == NULL) {
+                fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm);
+              }
+              Node* mem = fixer->find_mem(rep_ctrl, phase->get_ctrl(other) == rep_ctrl ? other : NULL);
+
+              if (mem->has_out_with(Op_Lock) || mem->has_out_with(Op_Unlock)) {
+                continue;
+              }
+
+              Node* wb_proj = wb->find_out_with(Op_ShenandoahWBMemProj);
+              fixer->remove(wb_proj);
+              Node* mem_for_ctrl = fixer->find_mem(rep_ctrl, NULL);
+
+              if (wb->in(Memory) != mem) {
+                disconnect_barrier_mem(wb, phase->igvn());
+                phase->igvn().replace_input_of(wb, Memory, mem);
+              }
+              if (rep_ctrl != wb_ctrl) {
+                phase->set_ctrl_and_loop(wb, rep_ctrl);
+                phase->igvn().replace_input_of(wb, Control, rep_ctrl);
+                phase->set_ctrl_and_loop(wb_proj, rep_ctrl);
+                progress = true;
+              }
+
+              fixer->fix_mem(rep_ctrl, rep_ctrl, mem, mem_for_ctrl, wb_proj, uses);
+
+              assert(!ShenandoahVerifyOptoBarriers || ShenandoahWriteBarrierNode::memory_dominates_all_paths(mem, rep_ctrl, alias, phase), "can't fix the memory graph");
+            }
+
+            if (other->Opcode() == Op_ShenandoahWriteBarrier) {
+              Node* other_proj = other->find_out_with(Op_ShenandoahWBMemProj);
+              if (fixer != NULL) {
+                fixer->remove(other_proj);
+              }
+              phase->igvn().replace_node(other_proj, other->in(Memory));
+            }
+            phase->igvn().replace_node(other, wb);
+            --j; --jmax;
+          }
+        }
+      }
+    }
+  } while(progress);
+}
+
+// Some code duplication with PhaseIdealLoop::split_if_with_blocks_pre()
+Node* ShenandoahWriteBarrierNode::try_split_thru_phi(PhaseIdealLoop* phase) {
+  Node *ctrl = phase->get_ctrl(this);
+  if (ctrl == NULL) {
+    return this;
+  }
+  Node *blk = phase->has_local_phi_input(this);
+  if (blk == NULL) {
+    return this;
+  }
+
+  if (in(0) != blk) {
+    return this;
+  }
+
+  int policy = blk->req() >> 2;
+
+  if (blk->is_CountedLoop()) {
+    IdealLoopTree *lp = phase->get_loop(blk);
+    if (lp && lp->_rce_candidate) {
+      return this;
+    }
+  }
+
+  if (phase->C->live_nodes() > 35000) {
+    return this;
+  }
+
+  uint unique = phase->C->unique();
+  Node *phi = phase->split_thru_phi(this, blk, policy);
+  if (phi == NULL) {
+    return this;
+  }
+
+  Node* mem_phi = new PhiNode(blk, Type::MEMORY, phase->C->alias_type(adr_type())->adr_type());
+  for (uint i = 1; i < blk->req(); i++) {
+    Node* n = phi->in(i);
+    if (n->Opcode() == Op_ShenandoahWriteBarrier &&
+        n->_idx >= unique) {
+      Node* proj = new ShenandoahWBMemProjNode(n);
+      phase->register_new_node(proj, phase->get_ctrl(n));
+      mem_phi->init_req(i, proj);
+    } else {
+      Node* mem = in(ShenandoahBarrierNode::Memory);
+      if (mem->is_Phi() && mem->in(0) == blk) {
+        mem = mem->in(i);
+      }
+      mem_phi->init_req(i, mem);
+    }
+  }
+  phase->register_new_node(mem_phi, blk);
+
+
+  Node* proj = find_out_with(Op_ShenandoahWBMemProj);
+  phase->igvn().replace_node(proj, mem_phi);
+  phase->igvn().replace_node(this, phi);
+
+  return phi;
+}
+
+void ShenandoahReadBarrierNode::try_move(PhaseIdealLoop* phase) {
+  Node *n_ctrl = phase->get_ctrl(this);
+  if (n_ctrl == NULL) {
+    return;
+  }
+  Node* mem = in(MemNode::Memory);
+  int alias = phase->C->get_alias_index(adr_type());
+  const bool trace = false;
+
+#ifdef ASSERT
+  if (trace) { tty->print("Trying to move mem of"); dump(); }
+#endif
+
+  Node* new_mem = mem;
+
+  ResourceMark rm;
+  VectorSet seen(Thread::current()->resource_area());
+  Node_List phis;
+
+  for (;;) {
+#ifdef ASSERT
+    if (trace) { tty->print("Looking for dominator from"); mem->dump(); }
+#endif
+    if (mem->is_Proj() && mem->in(0)->is_Start()) {
+      if (new_mem != in(MemNode::Memory)) {
+#ifdef ASSERT
+        if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
+#endif
+        phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
+      }
+      return;
+    }
+
+    Node* candidate = mem;
+    do {
+      if (!is_independent(mem)) {
+        if (trace) { tty->print_cr("Not independent"); }
+        if (new_mem != in(MemNode::Memory)) {
+#ifdef ASSERT
+          if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
+#endif
+          phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
+        }
+        return;
+      }
+      if (seen.test_set(mem->_idx)) {
+        if (trace) { tty->print_cr("Already seen"); }
+        ShouldNotReachHere();
+        // Strange graph
+        if (new_mem != in(MemNode::Memory)) {
+#ifdef ASSERT
+          if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
+#endif
+          phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
+        }
+        return;
+      }
+      if (mem->is_Phi()) {
+        phis.push(mem);
+      }
+      mem = next_mem(mem, alias);
+      if (mem->bottom_type() == Type::MEMORY) {
+        candidate = mem;
+      }
+      assert(is_dominator(phase->ctrl_or_self(mem), n_ctrl, mem, this, phase) == phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl), "strange dominator");
+#ifdef ASSERT
+      if (trace) { tty->print("Next mem is"); mem->dump(); }
+#endif
+    } while (mem->bottom_type() != Type::MEMORY || !phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl));
+
+    assert(mem->bottom_type() == Type::MEMORY, "bad mem");
+
+    bool not_dom = false;
+    for (uint i = 0; i < phis.size() && !not_dom; i++) {
+      Node* nn = phis.at(i);
+
+#ifdef ASSERT
+      if (trace) { tty->print("Looking from phi"); nn->dump(); }
+#endif
+      assert(nn->is_Phi(), "phis only");
+      for (uint j = 2; j < nn->req() && !not_dom; j++) {
+        Node* m = nn->in(j);
+#ifdef ASSERT
+        if (trace) { tty->print("Input %d is", j); m->dump(); }
+#endif
+        while (m != mem && !seen.test_set(m->_idx)) {
+          if (is_dominator(phase->ctrl_or_self(m), phase->ctrl_or_self(mem), m, mem, phase)) {
+            not_dom = true;
+            // Scheduling anomaly
+#ifdef ASSERT
+            if (trace) { tty->print("Giving up"); m->dump(); }
+#endif
+            break;
+          }
+          if (!is_independent(m)) {
+            if (trace) { tty->print_cr("Not independent"); }
+            if (new_mem != in(MemNode::Memory)) {
+#ifdef ASSERT
+              if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); }
+#endif
+              phase->igvn().replace_input_of(this, MemNode::Memory, new_mem);
+            }
+            return;
+          }
+          if (m->is_Phi()) {
+            phis.push(m);
+          }
+          m = next_mem(m, alias);
+#ifdef ASSERT
+          if (trace) { tty->print("Next mem is"); m->dump(); }
+#endif
+        }
+      }
+    }
+    if (!not_dom) {
+      new_mem = mem;
+      phis.clear();
+    } else {
+      seen.Clear();
+    }
+  }
+}
+
+CallStaticJavaNode* ShenandoahWriteBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
+  Node* val = in(ValueIn);
+
+  const Type* val_t = igvn.type(val);
+
+  if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
+      val->Opcode() == Op_CastPP &&
+      val->in(0) != NULL &&
+      val->in(0)->Opcode() == Op_IfTrue &&
+      val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
+      val->in(0)->in(0)->is_If() &&
+      val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
+      val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
+      val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
+      val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
+      val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
+    assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
+    CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
+    return unc;
+  }
+  return NULL;
+}
+
+void ShenandoahWriteBarrierNode::pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, Unique_Node_List& uses) {
+  Node* unc = pin_and_expand_null_check(phase->igvn());
+  Node* val = in(ValueIn);
+
+  if (unc != NULL) {
+    Node* ctrl = phase->get_ctrl(this);
+    Node* unc_ctrl = val->in(0);
+
+    // Don't move write barrier in a loop
+    IdealLoopTree* loop = phase->get_loop(ctrl);
+    IdealLoopTree* unc_loop = phase->get_loop(unc_ctrl);
+
+    if (!unc_loop->is_member(loop)) {
+      return;
+    }
+
+    Node* branch = no_branches(ctrl, unc_ctrl, false, phase);
+    assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
+    if (branch == NodeSentinel) {
+      return;
+    }
+
+    RegionNode* r = new RegionNode(3);
+    IfNode* iff = unc_ctrl->in(0)->as_If();
+
+    Node* ctrl_use = unc_ctrl->unique_ctrl_out();
+    Node* unc_ctrl_clone = unc_ctrl->clone();
+    phase->register_control(unc_ctrl_clone, loop, iff);
+    Node* c = unc_ctrl_clone;
+    Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
+    r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
+
+    phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
+    phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
+    phase->lazy_replace(c, unc_ctrl);
+    c = NULL;;
+    phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
+    phase->set_ctrl(val, unc_ctrl_clone);
+
+    IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
+    fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
+    Node* iff_proj = iff->proj_out(0);
+    r->init_req(2, iff_proj);
+    phase->register_control(r, phase->ltree_root(), iff);
+
+    Node* new_bol = new_iff->in(1)->clone();
+    Node* new_cmp = new_bol->in(1)->clone();
+    assert(new_cmp->Opcode() == Op_CmpP, "broken");
+    assert(new_cmp->in(1) == val->in(1), "broken");
+    new_bol->set_req(1, new_cmp);
+    new_cmp->set_req(1, this);
+    phase->register_new_node(new_bol, new_iff->in(0));
+    phase->register_new_node(new_cmp, new_iff->in(0));
+    phase->igvn().replace_input_of(new_iff, 1, new_bol);
+    phase->igvn().replace_input_of(new_cast, 1, this);
+
+    for (DUIterator_Fast imax, i = this->fast_outs(imax); i < imax; i++) {
+      Node* u = this->fast_out(i);
+      if (u == new_cast || u->Opcode() == Op_ShenandoahWBMemProj || u == new_cmp) {
+        continue;
+      }
+      phase->igvn().rehash_node_delayed(u);
+      int nb = u->replace_edge(this, new_cast);
+      assert(nb > 0, "no update?");
+      --i; imax -= nb;
+    }
+
+    for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+      Node* u = val->fast_out(i);
+      if (u == this) {
+        continue;
+      }
+      phase->igvn().rehash_node_delayed(u);
+      int nb = u->replace_edge(val, new_cast);
+      assert(nb > 0, "no update?");
+      --i; imax -= nb;
+    }
+
+    Node* new_ctrl = unc_ctrl_clone;
+
+    int alias = phase->C->get_alias_index(adr_type());
+    MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);
+    if (fixer == NULL) {
+      fixer = create_fixer(memory_graph_fixers, alias, phase, true);
+    }
+
+    Node* proj = find_out_with(Op_ShenandoahWBMemProj);
+    fixer->remove(proj);
+    Node* mem = fixer->find_mem(new_ctrl, NULL);
+
+    if (in(Memory) != mem) {
+      disconnect_barrier_mem(this, phase->igvn());
+      phase->igvn().replace_input_of(this, Memory, mem);
+    }
+
+    phase->set_ctrl_and_loop(this, new_ctrl);
+    phase->igvn().replace_input_of(this, Control, new_ctrl);
+    phase->set_ctrl_and_loop(proj, new_ctrl);
+
+    fixer->fix_mem(new_ctrl, new_ctrl, mem, mem, proj, uses);
+  }
+}
+
+void ShenandoahWriteBarrierNode::pin_and_expand_helper(PhaseIdealLoop* phase) {
+  Node* val = in(ValueIn);
+  CallStaticJavaNode* unc = pin_and_expand_null_check(phase->igvn());
+  Node* rep = this;
+  Node* ctrl = phase->get_ctrl(this);
+  if (unc != NULL && val->in(0) == ctrl) {
+    Node* unc_ctrl = val->in(0);
+    IfNode* other_iff = unc_ctrl->unique_ctrl_out()->as_If();
+    ProjNode* other_unc_ctrl = other_iff->proj_out(1);
+    Node* cast = NULL;
+    for (DUIterator_Fast imax, i = other_unc_ctrl->fast_outs(imax); i < imax && cast == NULL; i++) {
+      Node* u = other_unc_ctrl->fast_out(i);
+      if (u->Opcode() == Op_CastPP && u->in(1) == this) {
+        cast = u;
+      }
+    }
+    assert(other_unc_ctrl->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) == unc, "broken");
+    rep = cast;
+  }
+
+  // Replace all uses of barrier's input that are dominated by ctrl
+  // with the value returned by the barrier: no need to keep both
+  // live.
+  for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+    Node* u = val->fast_out(i);
+    if (u != this) {
+      if (u->is_Phi()) {
+        int nb = 0;
+        for (uint j = 1; j < u->req(); j++) {
+          if (u->in(j) == val) {
+            Node* c = u->in(0)->in(j);
+            if (phase->is_dominator(ctrl, c)) {
+              phase->igvn().replace_input_of(u, j, rep);
+              nb++;
+            }
+          }
+        }
+        if (nb > 0) {
+          imax -= nb;
+          --i;
+        }
+      } else {
+        Node* c = phase->ctrl_or_self(u);
+        if (is_dominator(ctrl, c, this, u, phase)) {
+          phase->igvn().rehash_node_delayed(u);
+          int nb = u->replace_edge(val, rep);
+          assert(nb > 0, "no update?");
+          --i, imax -= nb;
+        }
+      }
+    }
+  }
+}
+
+Node* ShenandoahWriteBarrierNode::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
+  Node* mem = NULL;
+  Node* c = ctrl;
+  do {
+    if (c->is_Region()) {
+      Node* phi_bottom = NULL;
+      for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
+        Node* u = c->fast_out(i);
+        if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
+          if (u->adr_type() == TypePtr::BOTTOM) {
+            mem = u;
+          }
+        }
+      }
+    } else {
+      if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
+        CallProjections projs;
+        c->as_Call()->extract_projections(&projs, true, false);
+        if (projs.fallthrough_memproj != NULL) {
+          if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
+            if (projs.catchall_memproj == NULL) {
+              mem = projs.fallthrough_memproj;
+            } else {
+              if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
+                mem = projs.fallthrough_memproj;
+              } else {
+                assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
+                mem = projs.catchall_memproj;
+              }
+            }
+          }
+        } else {
+          Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
+          if (proj != NULL &&
+              proj->adr_type() == TypePtr::BOTTOM) {
+            mem = proj;
+          }
+        }
+      } else {
+        for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
+          Node* u = c->fast_out(i);
+          if (u->is_Proj() &&
+              u->bottom_type() == Type::MEMORY &&
+              u->adr_type() == TypePtr::BOTTOM) {
+              assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
+              assert(mem == NULL, "only one proj");
+              mem = u;
+          }
+        }
+        assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
+      }
+    }
+    c = phase->idom(c);
+  } while (mem == NULL);
+  return mem;
+}
+
+void ShenandoahWriteBarrierNode::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    Node* u = n->fast_out(i);
+    if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
+      uses.push(u);
+    }
+  }
+}
+
+static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
+  OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
+  Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
+  phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
+  Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
+  phase->register_control(new_le, phase->get_loop(le), le->in(0));
+  phase->lazy_replace(outer, new_outer);
+  phase->lazy_replace(le, new_le);
+  inner->clear_strip_mined();
+}
+
+void ShenandoahWriteBarrierNode::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
+                                                  PhaseIdealLoop* phase) {
+  IdealLoopTree* loop = phase->get_loop(ctrl);
+  Node* thread = new ThreadLocalNode();
+  phase->register_new_node(thread, ctrl);
+  Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  phase->set_ctrl(offset, phase->C->root());
+  Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
+  phase->register_new_node(gc_state_addr, ctrl);
+  uint gc_state_idx = Compile::AliasIdxRaw;
+  const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
+  debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
+
+  Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
+  phase->register_new_node(gc_state, ctrl);
+  Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
+  phase->register_new_node(heap_stable_and, ctrl);
+  Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
+  phase->register_new_node(heap_stable_cmp, ctrl);
+  Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
+  phase->register_new_node(heap_stable_test, ctrl);
+  IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
+  phase->register_control(heap_stable_iff, loop, ctrl);
+
+  heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
+  phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
+  ctrl = new IfTrueNode(heap_stable_iff);
+  phase->register_control(ctrl, loop, heap_stable_iff);
+
+  assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
+}
+
+void ShenandoahWriteBarrierNode::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
+  const Type* val_t = phase->igvn().type(val);
+  if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
+    IdealLoopTree* loop = phase->get_loop(ctrl);
+    Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
+    phase->register_new_node(null_cmp, ctrl);
+    Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
+    phase->register_new_node(null_test, ctrl);
+    IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
+    phase->register_control(null_iff, loop, ctrl);
+    ctrl = new IfTrueNode(null_iff);
+    phase->register_control(ctrl, loop, null_iff);
+    null_ctrl = new IfFalseNode(null_iff);
+    phase->register_control(null_ctrl, loop, null_iff);
+  }
+}
+
+Node* ShenandoahWriteBarrierNode::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
+  IdealLoopTree *loop = phase->get_loop(c);
+  Node* iff = unc_ctrl->in(0);
+  assert(iff->is_If(), "broken");
+  Node* new_iff = iff->clone();
+  new_iff->set_req(0, c);
+  phase->register_control(new_iff, loop, c);
+  Node* iffalse = new IfFalseNode(new_iff->as_If());
+  phase->register_control(iffalse, loop, new_iff);
+  Node* iftrue = new IfTrueNode(new_iff->as_If());
+  phase->register_control(iftrue, loop, new_iff);
+  c = iftrue;
+  const Type *t = phase->igvn().type(val);
+  assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
+  Node* uncasted_val = val->in(1);
+  val = new CastPPNode(uncasted_val, t);
+  val->init_req(0, c);
+  phase->register_new_node(val, c);
+  return val;
+}
+
+void ShenandoahWriteBarrierNode::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
+                                                Unique_Node_List& uses, PhaseIdealLoop* phase) {
+  IfNode* iff = unc_ctrl->in(0)->as_If();
+  Node* proj = iff->proj_out(0);
+  assert(proj != unc_ctrl, "bad projection");
+  Node* use = proj->unique_ctrl_out();
+
+  assert(use == unc || use->is_Region(), "what else?");
+
+  uses.clear();
+  if (use == unc) {
+    phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
+    for (uint i = 1; i < unc->req(); i++) {
+      Node* n = unc->in(i);
+      if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
+        uses.push(n);
+      }
+    }
+  } else {
+    assert(use->is_Region(), "what else?");
+    uint idx = 1;
+    for (; use->in(idx) != proj; idx++);
+    for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
+      Node* u = use->fast_out(i);
+      if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
+        uses.push(u->in(idx));
+      }
+    }
+  }
+  for(uint next = 0; next < uses.size(); next++ ) {
+    Node *n = uses.at(next);
+    assert(phase->get_ctrl(n) == proj, "bad control");
+    phase->set_ctrl_and_loop(n, new_unc_ctrl);
+    if (n->in(0) == proj) {
+      phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
+    }
+    for (uint i = 0; i < n->req(); i++) {
+      Node* m = n->in(i);
+      if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
+        uses.push(m);
+      }
+    }
+  }
+
+  phase->igvn().rehash_node_delayed(use);
+  int nb = use->replace_edge(proj, new_unc_ctrl);
+  assert(nb == 1, "only use expected");
+}
+
+void ShenandoahWriteBarrierNode::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
+  IdealLoopTree *loop = phase->get_loop(ctrl);
+  Node* raw_rbtrue = new CastP2XNode(ctrl, val);
+  phase->register_new_node(raw_rbtrue, ctrl);
+  Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
+  phase->register_new_node(cset_offset, ctrl);
+  Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
+  phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
+  Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
+  phase->register_new_node(in_cset_fast_test_adr, ctrl);
+  uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
+  const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
+  debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
+  Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
+  phase->register_new_node(in_cset_fast_test_load, ctrl);
+  Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
+  phase->register_new_node(in_cset_fast_test_cmp, ctrl);
+  Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
+  phase->register_new_node(in_cset_fast_test_test, ctrl);
+  IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
+  phase->register_control(in_cset_fast_test_iff, loop, ctrl);
+
+  not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
+  phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
+
+  ctrl = new IfFalseNode(in_cset_fast_test_iff);
+  phase->register_control(ctrl, loop, in_cset_fast_test_iff);
+}
+
+void ShenandoahWriteBarrierNode::call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem,
+                                              Node* raw_mem, Node* wb_mem,
+                                              int alias,
+                                              PhaseIdealLoop* phase) {
+  IdealLoopTree*loop = phase->get_loop(ctrl);
+  const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst();
+
+  // The slow path stub consumes and produces raw memory in addition
+  // to the existing memory edges
+  Node* base = find_bottom_mem(ctrl, phase);
+
+  MergeMemNode* mm = MergeMemNode::make(base);
+  mm->set_memory_at(alias, wb_mem);
+  mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
+  phase->register_new_node(mm, ctrl);
+
+  Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_write_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), "shenandoah_write_barrier", TypeRawPtr::BOTTOM);
+  call->init_req(TypeFunc::Control, ctrl);
+  call->init_req(TypeFunc::I_O, phase->C->top());
+  call->init_req(TypeFunc::Memory, mm);
+  call->init_req(TypeFunc::FramePtr, phase->C->top());
+  call->init_req(TypeFunc::ReturnAdr, phase->C->top());
+  call->init_req(TypeFunc::Parms, val);
+  phase->register_control(call, loop, ctrl);
+  ctrl = new ProjNode(call, TypeFunc::Control);
+  phase->register_control(ctrl, loop, call);
+  result_mem = new ProjNode(call, TypeFunc::Memory);
+  phase->register_new_node(result_mem, call);
+  val = new ProjNode(call, TypeFunc::Parms);
+  phase->register_new_node(val, call);
+  val = new CheckCastPPNode(ctrl, val, obj_type);
+  phase->register_new_node(val, ctrl);
+}
+
+void ShenandoahWriteBarrierNode::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
+  Node* ctrl = phase->get_ctrl(barrier);
+  Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
+
+  // Update the control of all nodes that should be after the
+  // barrier control flow
+  uses.clear();
+  // Every node that is control dependent on the barrier's input
+  // control will be after the expanded barrier. The raw memory (if
+  // its memory is control dependent on the barrier's input control)
+  // must stay above the barrier.
+  uses_to_ignore.clear();
+  if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
+    uses_to_ignore.push(init_raw_mem);
+  }
+  for (uint next = 0; next < uses_to_ignore.size(); next++) {
+    Node *n = uses_to_ignore.at(next);
+    for (uint i = 0; i < n->req(); i++) {
+      Node* in = n->in(i);
+      if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
+        uses_to_ignore.push(in);
+      }
+    }
+  }
+  for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
+    Node* u = ctrl->fast_out(i);
+    if (u->_idx < last &&
+        u != barrier &&
+        !uses_to_ignore.member(u) &&
+        (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
+        (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
+      Node* old_c = phase->ctrl_or_self(u);
+      Node* c = old_c;
+      if (c != ctrl ||
+          is_dominator_same_ctrl(old_c, barrier, u, phase) ||
+          ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
+        phase->igvn().rehash_node_delayed(u);
+        int nb = u->replace_edge(ctrl, region);
+        if (u->is_CFG()) {
+          if (phase->idom(u) == ctrl) {
+            phase->set_idom(u, region, phase->dom_depth(region));
+          }
+        } else if (phase->get_ctrl(u) == ctrl) {
+          assert(u != init_raw_mem, "should leave input raw mem above the barrier");
+          uses.push(u);
+        }
+        assert(nb == 1, "more than 1 ctrl input?");
+        --i, imax -= nb;
+      }
+    }
+  }
+}
+
+void ShenandoahWriteBarrierNode::pin_and_expand(PhaseIdealLoop* phase) {
+  Node_List enqueue_barriers;
+  if (ShenandoahStoreValEnqueueBarrier) {
+    Unique_Node_List wq;
+    wq.push(phase->C->root());
+    for (uint i = 0; i < wq.size(); i++) {
+      Node* n = wq.at(i);
+      if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
+        enqueue_barriers.push(n);
+      }
+      for (uint i = 0; i < n->req(); i++) {
+        Node* in = n->in(i);
+        if (in != NULL) {
+          wq.push(in);
+        }
+      }
+    }
+  }
+
+  const bool trace = false;
+
+  // Collect raw memory state at CFG points in the entire graph and
+  // record it in memory_nodes. Optimize the raw memory graph in the
+  // process. Optimizing the memory graph also makes the memory graph
+  // simpler.
+  GrowableArray<MemoryGraphFixer*> memory_graph_fixers;
+
+  // Let's try to common write barriers again
+  optimize_before_expansion(phase, memory_graph_fixers, true);
+
+  Unique_Node_List uses;
+  for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) {
+    ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i);
+    Node* ctrl = phase->get_ctrl(wb);
+
+    Node* val = wb->in(ValueIn);
+    if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
+      assert(is_dominator(phase->get_ctrl(val), ctrl->in(0)->in(0), val, ctrl->in(0), phase), "can't move");
+      phase->set_ctrl(wb, ctrl->in(0)->in(0));
+    } else if (ctrl->is_CallRuntime()) {
+      assert(is_dominator(phase->get_ctrl(val), ctrl->in(0), val, ctrl, phase), "can't move");
+      phase->set_ctrl(wb, ctrl->in(0));
+    }
+
+    assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "only for write barriers");
+    // Look for a null check that dominates this barrier and move the
+    // barrier right after the null check to enable implicit null
+    // checks
+    wb->pin_and_expand_move_barrier(phase, memory_graph_fixers, uses);
+
+    wb->pin_and_expand_helper(phase);
+  }
+
+  MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
+  Unique_Node_List uses_to_ignore;
+  Unique_Node_List outer_lsms;
+  for (uint i = 0; i < enqueue_barriers.size(); i++) {
+    Node* barrier = enqueue_barriers.at(i);
+    Node* pre_val = barrier->in(1);
+
+    if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
+      ShouldNotReachHere();
+      continue;
+    }
+
+    Node* ctrl = phase->get_ctrl(barrier);
+
+    if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
+      assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
+      ctrl = ctrl->in(0)->in(0);
+      phase->set_ctrl(barrier, ctrl);
+    } else if (ctrl->is_CallRuntime()) {
+      assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
+      ctrl = ctrl->in(0);
+      phase->set_ctrl(barrier, ctrl);
+    }
+
+    Node* init_ctrl = ctrl;
+    IdealLoopTree* loop = phase->get_loop(ctrl);
+    if (loop->_head->is_OuterStripMinedLoop()) {
+      outer_lsms.push(loop->_head);
+    }
+    Node* raw_mem = fixer.find_mem(ctrl, barrier);
+    Node* init_raw_mem = raw_mem;
+    Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
+    Node* heap_stable_ctrl = NULL;
+    Node* null_ctrl = NULL;
+    uint last = phase->C->unique();
+
+    enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
+    Node* region = new RegionNode(PATH_LIMIT);
+    Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
+
+    enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
+    Node* region2 = new RegionNode(PATH_LIMIT2);
+    Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
+
+    // Stable path.
+    test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
+    region->init_req(_heap_stable, heap_stable_ctrl);
+    phi->init_req(_heap_stable, raw_mem);
+
+    // Null path
+    Node* reg2_ctrl = NULL;
+    test_null(ctrl, pre_val, null_ctrl, phase);
+    if (null_ctrl != NULL) {
+      reg2_ctrl = null_ctrl->in(0);
+      region2->init_req(_null_path, null_ctrl);
+      phi2->init_req(_null_path, raw_mem);
+    } else {
+      region2->del_req(_null_path);
+      phi2->del_req(_null_path);
+    }
+
+    const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
+    const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
+    Node* thread = new ThreadLocalNode();
+    phase->register_new_node(thread, ctrl);
+    Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
+    phase->register_new_node(buffer_adr, ctrl);
+    Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
+    phase->register_new_node(index_adr, ctrl);
+
+    BasicType index_bt = TypeX_X->basic_type();
+    assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
+    const TypePtr* adr_type = TypeRawPtr::BOTTOM;
+    Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
+    phase->register_new_node(index, ctrl);
+    Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
+    phase->register_new_node(index_cmp, ctrl);
+    Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
+    phase->register_new_node(index_test, ctrl);
+    IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
+    if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
+    phase->register_control(queue_full_iff, loop, ctrl);
+    Node* not_full = new IfTrueNode(queue_full_iff);
+    phase->register_control(not_full, loop, queue_full_iff);
+    Node* full = new IfFalseNode(queue_full_iff);
+    phase->register_control(full, loop, queue_full_iff);
+
+    ctrl = not_full;
+
+    Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
+    phase->register_new_node(next_index, ctrl);
+
+    Node* buffer  = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
+    phase->register_new_node(buffer, ctrl);
+    Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
+    phase->register_new_node(log_addr, ctrl);
+    Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
+    phase->register_new_node(log_store, ctrl);
+    // update the index
+    Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
+    phase->register_new_node(index_update, ctrl);
+
+    // Fast-path case
+    region2->init_req(_fast_path, ctrl);
+    phi2->init_req(_fast_path, index_update);
+
+    ctrl = full;
+
+    Node* base = find_bottom_mem(ctrl, phase);
+
+    MergeMemNode* mm = MergeMemNode::make(base);
+    mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
+    phase->register_new_node(mm, ctrl);
+
+    Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
+    call->init_req(TypeFunc::Control, ctrl);
+    call->init_req(TypeFunc::I_O, phase->C->top());
+    call->init_req(TypeFunc::Memory, mm);
+    call->init_req(TypeFunc::FramePtr, phase->C->top());
+    call->init_req(TypeFunc::ReturnAdr, phase->C->top());
+    call->init_req(TypeFunc::Parms, pre_val);
+    call->init_req(TypeFunc::Parms+1, thread);
+    phase->register_control(call, loop, ctrl);
+
+    Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
+    phase->register_control(ctrl_proj, loop, call);
+    Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
+    phase->register_new_node(mem_proj, call);
+
+    // Slow-path case
+    region2->init_req(_slow_path, ctrl_proj);
+    phi2->init_req(_slow_path, mem_proj);
+
+    phase->register_control(region2, loop, reg2_ctrl);
+    phase->register_new_node(phi2, region2);
+
+    region->init_req(_heap_unstable, region2);
+    phi->init_req(_heap_unstable, phi2);
+
+    phase->register_control(region, loop, heap_stable_ctrl->in(0));
+    phase->register_new_node(phi, region);
+
+    fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
+    for(uint next = 0; next < uses.size(); next++ ) {
+      Node *n = uses.at(next);
+      assert(phase->get_ctrl(n) == init_ctrl, "bad control");
+      assert(n != init_raw_mem, "should leave input raw mem above the barrier");
+      phase->set_ctrl(n, region);
+      follow_barrier_uses(n, init_ctrl, uses, phase);
+    }
+    fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
+
+    phase->igvn().replace_node(barrier, pre_val);
+  }
+
+  for (int i = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i > 0; i--) {
+    int cnt = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count();
+    ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i-1);
+
+    uint last = phase->C->unique();
+    Node* ctrl = phase->get_ctrl(wb);
+    Node* orig_ctrl = ctrl;
+
+    Node* raw_mem = fixer.find_mem(ctrl, wb);
+    Node* init_raw_mem = raw_mem;
+    Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
+    int alias = phase->C->get_alias_index(wb->adr_type());
+    Node* wb_mem =  wb->in(Memory);
+    Node* init_wb_mem = wb_mem;
+
+    Node* val = wb->in(ValueIn);
+    Node* wbproj = wb->find_out_with(Op_ShenandoahWBMemProj);
+    IdealLoopTree *loop = phase->get_loop(ctrl);
+    if (loop->_head->is_OuterStripMinedLoop()) {
+      outer_lsms.push(loop->_head);
+    }
+
+    assert(val->Opcode() != Op_ShenandoahWriteBarrier, "No chain of write barriers");
+
+    CallStaticJavaNode* unc = wb->pin_and_expand_null_check(phase->igvn());
+    Node* unc_ctrl = NULL;
+    if (unc != NULL) {
+      if (val->in(0) != ctrl) {
+        unc = NULL;
+      } else {
+        unc_ctrl = val->in(0);
+      }
+    }
+
+    Node* uncasted_val = val;
+    if (unc != NULL) {
+      uncasted_val = val->in(1);
+    }
+
+    Node* heap_stable_ctrl = NULL;
+    Node* null_ctrl = NULL;
+
+    assert(val->bottom_type()->make_oopptr(), "need oop");
+    assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
+
+    enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
+    Node* region = new RegionNode(PATH_LIMIT);
+    Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
+    Node* mem_phi = PhiNode::make(region, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type());
+    Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
+
+    enum { _not_cset = 1, _not_equal, _evac_path, _null_path, PATH_LIMIT2 };
+    Node* region2 = new RegionNode(PATH_LIMIT2);
+    Node* val_phi2 = new PhiNode(region2, uncasted_val->bottom_type()->is_oopptr());
+    Node* mem_phi2 = PhiNode::make(region2, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type());
+    Node* raw_mem_phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
+
+      // Stable path.
+    test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
+    IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
+
+    // Heap stable case
+    region->init_req(_heap_stable, heap_stable_ctrl);
+    val_phi->init_req(_heap_stable, uncasted_val);
+    mem_phi->init_req(_heap_stable, wb_mem);
+    raw_mem_phi->init_req(_heap_stable, raw_mem);
+
+    Node* reg2_ctrl = NULL;
+    // Null case
+    test_null(ctrl, val, null_ctrl, phase);
+    if (null_ctrl != NULL) {
+      reg2_ctrl = null_ctrl->in(0);
+      region2->init_req(_null_path, null_ctrl);
+      val_phi2->init_req(_null_path, uncasted_val);
+      mem_phi2->init_req(_null_path, wb_mem);
+      raw_mem_phi2->init_req(_null_path, raw_mem);
+    } else {
+      region2->del_req(_null_path);
+      val_phi2->del_req(_null_path);
+      mem_phi2->del_req(_null_path);
+      raw_mem_phi2->del_req(_null_path);
+    }
+
+    // Test for in-cset.
+    // Wires !in_cset(obj) to slot 2 of region and phis
+    Node* not_cset_ctrl = NULL;
+    in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
+    if (not_cset_ctrl != NULL) {
+      if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
+      region2->init_req(_not_cset, not_cset_ctrl);
+      val_phi2->init_req(_not_cset, uncasted_val);
+      mem_phi2->init_req(_not_cset, wb_mem);
+      raw_mem_phi2->init_req(_not_cset, raw_mem);
+    }
+
+    // Resolve object when orig-value is in cset.
+    // Make the unconditional resolve for fwdptr, not the read barrier.
+    Node* new_val = uncasted_val;
+    if (unc_ctrl != NULL) {
+      // Clone the null check in this branch to allow implicit null check
+      new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
+      fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
+
+      IfNode* iff = unc_ctrl->in(0)->as_If();
+      phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
+    }
+    Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(ShenandoahBrooksPointer::byte_offset()));
+    phase->register_new_node(addr, ctrl);
+    assert(val->bottom_type()->isa_oopptr(), "what else?");
+    const TypePtr* obj_type =  val->bottom_type()->is_oopptr();
+    const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
+    Node* fwd = new LoadPNode(ctrl, wb_mem, addr, adr_type, obj_type, MemNode::unordered);
+    phase->register_new_node(fwd, ctrl);
+
+    // Only branch to WB stub if object is not forwarded; otherwise reply with fwd ptr
+    Node* cmp = new CmpPNode(fwd, new_val);
+    phase->register_new_node(cmp, ctrl);
+    Node* bol = new BoolNode(cmp, BoolTest::eq);
+    phase->register_new_node(bol, ctrl);
+
+    IfNode* iff = new IfNode(ctrl, bol, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
+    if (reg2_ctrl == NULL) reg2_ctrl = iff;
+    phase->register_control(iff, loop, ctrl);
+    Node* if_not_eq = new IfFalseNode(iff);
+    phase->register_control(if_not_eq, loop, iff);
+    Node* if_eq = new IfTrueNode(iff);
+    phase->register_control(if_eq, loop, iff);
+
+    // Wire up not-equal-path in slots 3.
+    region2->init_req(_not_equal, if_not_eq);
+    val_phi2->init_req(_not_equal, fwd);
+    mem_phi2->init_req(_not_equal, wb_mem);
+    raw_mem_phi2->init_req(_not_equal, raw_mem);
+
+    // Call wb-stub and wire up that path in slots 4
+    Node* result_mem = NULL;
+    ctrl = if_eq;
+    call_wb_stub(ctrl, new_val, result_mem,
+                 raw_mem, wb_mem,
+                 alias, phase);
+    region2->init_req(_evac_path, ctrl);
+    val_phi2->init_req(_evac_path, new_val);
+    mem_phi2->init_req(_evac_path, result_mem);
+    raw_mem_phi2->init_req(_evac_path, result_mem);
+
+    phase->register_control(region2, loop, reg2_ctrl);
+    phase->register_new_node(val_phi2, region2);
+    phase->register_new_node(mem_phi2, region2);
+    phase->register_new_node(raw_mem_phi2, region2);
+
+    region->init_req(_heap_unstable, region2);
+    val_phi->init_req(_heap_unstable, val_phi2);
+    mem_phi->init_req(_heap_unstable, mem_phi2);
+    raw_mem_phi->init_req(_heap_unstable, raw_mem_phi2);
+
+    phase->register_control(region, loop, heap_stable_iff);
+    Node* out_val = val_phi;
+    phase->register_new_node(val_phi, region);
+    phase->register_new_node(mem_phi, region);
+    phase->register_new_node(raw_mem_phi, region);
+
+    fix_ctrl(wb, region, fixer, uses, uses_to_ignore, last, phase);
+
+    ctrl = orig_ctrl;
+
+    phase->igvn().replace_input_of(wbproj, ShenandoahWBMemProjNode::WriteBarrier, phase->C->top());
+    phase->igvn().replace_node(wbproj, mem_phi);
+    if (unc != NULL) {
+      for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+        Node* u = val->fast_out(i);
+        Node* c = phase->ctrl_or_self(u);
+        if (u != wb && (c != ctrl || is_dominator_same_ctrl(c, wb, u, phase))) {
+          phase->igvn().rehash_node_delayed(u);
+          int nb = u->replace_edge(val, out_val);
+          --i, imax -= nb;
+        }
+      }
+      if (val->outcnt() == 0) {
+        phase->igvn()._worklist.push(val);
+      }
+    }
+    phase->igvn().replace_node(wb, out_val);
+
+    follow_barrier_uses(mem_phi, ctrl, uses, phase);
+    follow_barrier_uses(out_val, ctrl, uses, phase);
+
+    for(uint next = 0; next < uses.size(); next++ ) {
+      Node *n = uses.at(next);
+      assert(phase->get_ctrl(n) == ctrl, "bad control");
+      assert(n != init_raw_mem, "should leave input raw mem above the barrier");
+      phase->set_ctrl(n, region);
+      follow_barrier_uses(n, ctrl, uses, phase);
+    }
+
+    // The slow path call produces memory: hook the raw memory phi
+    // from the expanded write barrier with the rest of the graph
+    // which may require adding memory phis at every post dominated
+    // region and at enclosing loop heads. Use the memory state
+    // collected in memory_nodes to fix the memory graph. Update that
+    // memory state as we go.
+    fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
+    assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == cnt - 1, "not replaced");
+  }
+
+  assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == 0, "all write barrier nodes should have been replaced");
+
+  for (uint i = 0; i < outer_lsms.size(); i++) {
+    // Expanding a barrier here will break loop strip mining
+    // verification. Transform the loop so the loop nest doesn't
+    // appear as strip mined.
+    OuterStripMinedLoopNode* outer = outer_lsms.at(i)->as_OuterStripMinedLoop();
+    hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
+  }
+}
+
+void ShenandoahWriteBarrierNode::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
+  IdealLoopTree *loop = phase->get_loop(iff);
+  Node* loop_head = loop->_head;
+  Node* entry_c = loop_head->in(LoopNode::EntryControl);
+
+  Node* bol = iff->in(1);
+  Node* cmp = bol->in(1);
+  Node* andi = cmp->in(1);
+  Node* load = andi->in(1);
+
+  assert(is_gc_state_load(load), "broken");
+  if (!phase->is_dominator(load->in(0), entry_c)) {
+    Node* mem_ctrl = NULL;
+    Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
+    load = load->clone();
+    load->set_req(MemNode::Memory, mem);
+    load->set_req(0, entry_c);
+    phase->register_new_node(load, entry_c);
+    andi = andi->clone();
+    andi->set_req(1, load);
+    phase->register_new_node(andi, entry_c);
+    cmp = cmp->clone();
+    cmp->set_req(1, andi);
+    phase->register_new_node(cmp, entry_c);
+    bol = bol->clone();
+    bol->set_req(1, cmp);
+    phase->register_new_node(bol, entry_c);
+
+    Node* old_bol =iff->in(1);
+    phase->igvn().replace_input_of(iff, 1, bol);
+  }
+}
+
+bool ShenandoahWriteBarrierNode::identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase) {
+  if (!n->is_If() || n->is_CountedLoopEnd()) {
+    return false;
+  }
+  Node* region = n->in(0);
+
+  if (!region->is_Region()) {
+    return false;
+  }
+  Node* dom = phase->idom(region);
+  if (!dom->is_If()) {
+    return false;
+  }
+
+  if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
+    return false;
+  }
+
+  IfNode* dom_if = dom->as_If();
+  Node* proj_true = dom_if->proj_out(1);
+  Node* proj_false = dom_if->proj_out(0);
+
+  for (uint i = 1; i < region->req(); i++) {
+    if (phase->is_dominator(proj_true, region->in(i))) {
+      continue;
+    }
+    if (phase->is_dominator(proj_false, region->in(i))) {
+      continue;
+    }
+    return false;
+  }
+
+  return true;
+}
+
+void ShenandoahWriteBarrierNode::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
+  assert(is_heap_stable_test(n), "no other tests");
+  if (identical_backtoback_ifs(n, phase)) {
+    Node* n_ctrl = n->in(0);
+    if (phase->can_split_if(n_ctrl)) {
+      IfNode* dom_if = phase->idom(n_ctrl)->as_If();
+      if (is_heap_stable_test(n)) {
+        Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
+        assert(is_gc_state_load(gc_state_load), "broken");
+        Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
+        assert(is_gc_state_load(dom_gc_state_load), "broken");
+        if (gc_state_load != dom_gc_state_load) {
+          phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
+        }
+      }
+      PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
+      Node* proj_true = dom_if->proj_out(1);
+      Node* proj_false = dom_if->proj_out(0);
+      Node* con_true = phase->igvn().makecon(TypeInt::ONE);
+      Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
+
+      for (uint i = 1; i < n_ctrl->req(); i++) {
+        if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
+          bolphi->init_req(i, con_true);
+        } else {
+          assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
+          bolphi->init_req(i, con_false);
+        }
+      }
+      phase->register_new_node(bolphi, n_ctrl);
+      phase->igvn().replace_input_of(n, 1, bolphi);
+      phase->do_split_if(n);
+    }
+  }
+}
+
+IfNode* ShenandoahWriteBarrierNode::find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase) {
+  // Find first invariant test that doesn't exit the loop
+  LoopNode *head = loop->_head->as_Loop();
+  IfNode* unswitch_iff = NULL;
+  Node* n = head->in(LoopNode::LoopBackControl);
+  int loop_has_sfpts = -1;
+  while (n != head) {
+    Node* n_dom = phase->idom(n);
+    if (n->is_Region()) {
+      if (n_dom->is_If()) {
+        IfNode* iff = n_dom->as_If();
+        if (iff->in(1)->is_Bool()) {
+          BoolNode* bol = iff->in(1)->as_Bool();
+          if (bol->in(1)->is_Cmp()) {
+            // If condition is invariant and not a loop exit,
+            // then found reason to unswitch.
+            if (is_heap_stable_test(iff) &&
+                (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
+              assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
+              if (loop_has_sfpts == -1) {
+                for(uint i = 0; i < loop->_body.size(); i++) {
+                  Node *m = loop->_body[i];
+                  if (m->is_SafePoint() && !m->is_CallLeaf()) {
+                    loop_has_sfpts = 1;
+                    break;
+                  }
+                }
+                if (loop_has_sfpts == -1) {
+                  loop_has_sfpts = 0;
+                }
+              }
+              if (!loop_has_sfpts) {
+                unswitch_iff = iff;
+              }
+            }
+          }
+        }
+      }
+    }
+    n = n_dom;
+  }
+  return unswitch_iff;
+}
+
+
+void ShenandoahWriteBarrierNode::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
+  Node_List heap_stable_tests;
+  Node_List gc_state_loads;
+
+  stack.push(phase->C->start(), 0);
+  do {
+    Node* n = stack.node();
+    uint i = stack.index();
+
+    if (i < n->outcnt()) {
+      Node* u = n->raw_out(i);
+      stack.set_index(i+1);
+      if (!visited.test_set(u->_idx)) {
+        stack.push(u, 0);
+      }
+    } else {
+      stack.pop();
+      if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
+        gc_state_loads.push(n);
+      }
+      if (n->is_If() && is_heap_stable_test(n)) {
+        heap_stable_tests.push(n);
+      }
+    }
+  } while (stack.size() > 0);
+
+  bool progress;
+  do {
+    progress = false;
+    for (uint i = 0; i < gc_state_loads.size(); i++) {
+      Node* n = gc_state_loads.at(i);
+      if (n->outcnt() != 0) {
+        progress |= try_common_gc_state_load(n, phase);
+      }
+    }
+  } while (progress);
+
+  for (uint i = 0; i < heap_stable_tests.size(); i++) {
+    Node* n = heap_stable_tests.at(i);
+    assert(is_heap_stable_test(n), "only evacuation test");
+    merge_back_to_back_tests(n, phase);
+  }
+
+  if (!phase->C->major_progress()) {
+    VectorSet seen(Thread::current()->resource_area());
+    for (uint i = 0; i < heap_stable_tests.size(); i++) {
+      Node* n = heap_stable_tests.at(i);
+      IdealLoopTree* loop = phase->get_loop(n);
+      if (loop != phase->ltree_root() &&
+          loop->_child == NULL &&
+          !loop->_irreducible) {
+        LoopNode* head = loop->_head->as_Loop();
+        if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
+            !seen.test_set(head->_idx)) {
+          IfNode* iff = find_unswitching_candidate(loop, phase);
+          if (iff != NULL) {
+            Node* bol = iff->in(1);
+            if (head->is_strip_mined()) {
+              head->verify_strip_mined(0);
+            }
+            move_heap_stable_test_out_of_loop(iff, phase);
+            if (loop->policy_unswitching(phase)) {
+              if (head->is_strip_mined()) {
+                OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
+                hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
+              }
+              phase->do_unswitching(loop, old_new);
+            } else {
+              // Not proceeding with unswitching. Move load back in
+              // the loop.
+              phase->igvn().replace_input_of(iff, 1, bol);
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+#ifdef ASSERT
+void ShenandoahBarrierNode::verify_raw_mem(RootNode* root) {
+  const bool trace = false;
+  ResourceMark rm;
+  Unique_Node_List nodes;
+  Unique_Node_List controls;
+  Unique_Node_List memories;
+
+  nodes.push(root);
+  for (uint next = 0; next < nodes.size(); next++) {
+    Node *n  = nodes.at(next);
+    if (ShenandoahBarrierSetC2::is_shenandoah_wb_call(n)) {
+      controls.push(n);
+      if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
+      for (uint next2 = 0; next2 < controls.size(); next2++) {
+        Node *m = controls.at(next2);
+        for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
+          Node* u = m->fast_out(i);
+          if (u->is_CFG() && !u->is_Root() &&
+              !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
+              !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
+            if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
+            controls.push(u);
+          }
+        }
+      }
+      memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
+      for (uint next2 = 0; next2 < memories.size(); next2++) {
+        Node *m = memories.at(next2);
+        assert(m->bottom_type() == Type::MEMORY, "");
+        for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
+          Node* u = m->fast_out(i);
+          if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
+            if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
+            memories.push(u);
+          } else if (u->is_LoadStore()) {
+            if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
+            memories.push(u->find_out_with(Op_SCMemProj));
+          } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
+            if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
+            memories.push(u);
+          } else if (u->is_Phi()) {
+            assert(u->bottom_type() == Type::MEMORY, "");
+            if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
+              assert(controls.member(u->in(0)), "");
+              if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
+              memories.push(u);
+            }
+          } else if (u->is_SafePoint() || u->is_MemBar()) {
+            for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
+              Node* uu = u->fast_out(j);
+              if (uu->bottom_type() == Type::MEMORY) {
+                if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
+                memories.push(uu);
+              }
+            }
+          }
+        }
+      }
+      for (uint next2 = 0; next2 < controls.size(); next2++) {
+        Node *m = controls.at(next2);
+        if (m->is_Region()) {
+          bool all_in = true;
+          for (uint i = 1; i < m->req(); i++) {
+            if (!controls.member(m->in(i))) {
+              all_in = false;
+              break;
+            }
+          }
+          if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
+          bool found_phi = false;
+          for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
+            Node* u = m->fast_out(j);
+            if (u->is_Phi() && memories.member(u)) {
+              found_phi = true;
+              for (uint i = 1; i < u->req() && found_phi; i++) {
+                Node* k = u->in(i);
+                if (memories.member(k) != controls.member(m->in(i))) {
+                  found_phi = false;
+                }
+              }
+            }
+          }
+          assert(found_phi || all_in, "");
+        }
+      }
+      controls.clear();
+      memories.clear();
+    }
+    for( uint i = 0; i < n->len(); ++i ) {
+      Node *m = n->in(i);
+      if (m != NULL) {
+        nodes.push(m);
+      }
+    }
+  }
+}
+#endif
+
+const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
+  if (in(1) == NULL || in(1)->is_top()) {
+    return Type::TOP;
+  }
+  const Type* t = in(1)->bottom_type();
+  if (t == TypePtr::NULL_PTR) {
+    return t;
+  }
+  return t->is_oopptr()->cast_to_nonconst();
+}
+
+const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
+  if (in(1) == NULL) {
+    return Type::TOP;
+  }
+  const Type* t = phase->type(in(1));
+  if (t == Type::TOP) {
+    return Type::TOP;
+  }
+  if (t == TypePtr::NULL_PTR) {
+    return t;
+  }
+  return t->is_oopptr()->cast_to_nonconst();
+}
+
+int ShenandoahEnqueueBarrierNode::needed(Node* n) {
+  if (n == NULL ||
+      n->is_Allocate() ||
+      n->bottom_type() == TypePtr::NULL_PTR ||
+      (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
+    return NotNeeded;
+  }
+  if (n->is_Phi() ||
+      n->is_CMove()) {
+    return MaybeNeeded;
+  }
+  return Needed;
+}
+
+Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
+  for (;;) {
+    if (n == NULL) {
+      return n;
+    } else if (n->bottom_type() == TypePtr::NULL_PTR) {
+      return n;
+    } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
+      return n;
+    } else if (n->is_ConstraintCast() ||
+               n->Opcode() == Op_DecodeN ||
+               n->Opcode() == Op_EncodeP) {
+      n = n->in(1);
+    } else if (n->is_Proj()) {
+      n = n->in(0);
+    } else {
+      return n;
+    }
+  }
+  ShouldNotReachHere();
+  return NULL;
+}
+
+Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
+  PhaseIterGVN* igvn = phase->is_IterGVN();
+
+  Node* n = next(in(1));
+
+  int cont = needed(n);
+
+  if (cont == NotNeeded) {
+    return in(1);
+  } else if (cont == MaybeNeeded) {
+    if (igvn == NULL) {
+      phase->record_for_igvn(this);
+      return this;
+    } else {
+      ResourceMark rm;
+      Unique_Node_List wq;
+      uint wq_i = 0;
+
+      for (;;) {
+        if (n->is_Phi()) {
+          for (uint i = 1; i < n->req(); i++) {
+            Node* m = n->in(i);
+            if (m != NULL) {
+              wq.push(m);
+            }
+          }
+        } else {
+          assert(n->is_CMove(), "nothing else here");
+          Node* m = n->in(CMoveNode::IfFalse);
+          wq.push(m);
+          m = n->in(CMoveNode::IfTrue);
+          wq.push(m);
+        }
+        Node* orig_n = NULL;
+        do {
+          if (wq_i >= wq.size()) {
+            return in(1);
+          }
+          n = wq.at(wq_i);
+          wq_i++;
+          orig_n = n;
+          n = next(n);
+          cont = needed(n);
+          if (cont == Needed) {
+            return this;
+          }
+        } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
+      }
+    }
+  }
+
+  return this;
+}
+
+#ifdef ASSERT
+static bool has_never_branch(Node* root) {
+  for (uint i = 1; i < root->req(); i++) {
+    Node* in = root->in(i);
+    if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+void MemoryGraphFixer::collect_memory_nodes() {
+  Node_Stack stack(0);
+  VectorSet visited(Thread::current()->resource_area());
+  Node_List regions;
+
+  // Walk the raw memory graph and create a mapping from CFG node to
+  // memory node. Exclude phis for now.
+  stack.push(_phase->C->root(), 1);
+  do {
+    Node* n = stack.node();
+    int opc = n->Opcode();
+    uint i = stack.index();
+    if (i < n->req()) {
+      Node* mem = NULL;
+      if (opc == Op_Root) {
+        Node* in = n->in(i);
+        int in_opc = in->Opcode();
+        if (in_opc == Op_Return || in_opc == Op_Rethrow) {
+          mem = in->in(TypeFunc::Memory);
+        } else if (in_opc == Op_Halt) {
+          if (!in->in(0)->is_Region()) {
+            Node* proj = in->in(0);
+            assert(proj->is_Proj(), "");
+            Node* in = proj->in(0);
+            assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
+            if (in->is_CallStaticJava()) {
+              mem = in->in(TypeFunc::Memory);
+            } else if (in->Opcode() == Op_Catch) {
+              Node* call = in->in(0)->in(0);
+              assert(call->is_Call(), "");
+              mem = call->in(TypeFunc::Memory);
+            }
+          }
+        } else {
+#ifdef ASSERT
+          n->dump();
+          in->dump();
+#endif
+          ShouldNotReachHere();
+        }
+      } else {
+        assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
+        assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
+        mem = n->in(i);
+      }
+      i++;
+      stack.set_index(i);
+      if (mem == NULL) {
+        continue;
+      }
+      for (;;) {
+        if (visited.test_set(mem->_idx) || mem->is_Start()) {
+          break;
+        }
+        if (mem->is_Phi()) {
+          stack.push(mem, 2);
+          mem = mem->in(1);
+        } else if (mem->is_Proj()) {
+          stack.push(mem, mem->req());
+          mem = mem->in(0);
+        } else if (mem->is_SafePoint() || mem->is_MemBar()) {
+          mem = mem->in(TypeFunc::Memory);
+        } else if (mem->is_MergeMem()) {
+          MergeMemNode* mm = mem->as_MergeMem();
+          mem = mm->memory_at(_alias);
+        } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
+          assert(_alias == Compile::AliasIdxRaw, "");
+          stack.push(mem, mem->req());
+          mem = mem->in(MemNode::Memory);
+        } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) {
+          assert(_alias != Compile::AliasIdxRaw, "");
+          mem = mem->in(ShenandoahBarrierNode::Memory);
+        } else if (mem->Opcode() == Op_ShenandoahWBMemProj) {
+          stack.push(mem, mem->req());
+          mem = mem->in(ShenandoahWBMemProjNode::WriteBarrier);
+        } else {
+#ifdef ASSERT
+          mem->dump();
+#endif
+          ShouldNotReachHere();
+        }
+      }
+    } else {
+      if (n->is_Phi()) {
+        // Nothing
+      } else if (!n->is_Root()) {
+        Node* c = get_ctrl(n);
+        _memory_nodes.map(c->_idx, n);
+      }
+      stack.pop();
+    }
+  } while(stack.is_nonempty());
+
+  // Iterate over CFG nodes in rpo and propagate memory state to
+  // compute memory state at regions, creating new phis if needed.
+  Node_List rpo_list;
+  visited.Clear();
+  _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
+  Node* root = rpo_list.pop();
+  assert(root == _phase->C->root(), "");
+
+  const bool trace = false;
+#ifdef ASSERT
+  if (trace) {
+    for (int i = rpo_list.size() - 1; i >= 0; i--) {
+      Node* c = rpo_list.at(i);
+      if (_memory_nodes[c->_idx] != NULL) {
+        tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
+      }
+    }
+  }
+#endif
+  uint last = _phase->C->unique();
+
+#ifdef ASSERT
+  uint8_t max_depth = 0;
+  for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
+    IdealLoopTree* lpt = iter.current();
+    max_depth = MAX2(max_depth, lpt->_nest);
+  }
+#endif
+
+  bool progress = true;
+  int iteration = 0;
+  Node_List dead_phis;
+  while (progress) {
+    progress = false;
+    iteration++;
+    assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop(), "");
+    if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
+    IdealLoopTree* last_updated_ilt = NULL;
+    for (int i = rpo_list.size() - 1; i >= 0; i--) {
+      Node* c = rpo_list.at(i);
+
+      Node* prev_mem = _memory_nodes[c->_idx];
+      if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
+        Node* prev_region = regions[c->_idx];
+        Node* unique = NULL;
+        for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
+          Node* m = _memory_nodes[c->in(j)->_idx];
+          assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
+          if (m != NULL) {
+            if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
+              assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
+              // continue
+            } else if (unique == NULL) {
+              unique = m;
+            } else if (m == unique) {
+              // continue
+            } else {
+              unique = NodeSentinel;
+            }
+          }
+        }
+        assert(unique != NULL, "empty phi???");
+        if (unique != NodeSentinel) {
+          if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
+            dead_phis.push(prev_region);
+          }
+          regions.map(c->_idx, unique);
+        } else {
+          Node* phi = NULL;
+          if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
+            phi = prev_region;
+            for (uint k = 1; k < c->req(); k++) {
+              Node* m = _memory_nodes[c->in(k)->_idx];
+              assert(m != NULL, "expect memory state");
+              phi->set_req(k, m);
+            }
+          } else {
+            for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
+              Node* u = c->fast_out(j);
+              if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
+                  (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
+                phi = u;
+                for (uint k = 1; k < c->req() && phi != NULL; k++) {
+                  Node* m = _memory_nodes[c->in(k)->_idx];
+                  assert(m != NULL, "expect memory state");
+                  if (u->in(k) != m) {
+                    phi = NULL;
+                  }
+                }
+              }
+            }
+            if (phi == NULL) {
+              phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
+              for (uint k = 1; k < c->req(); k++) {
+                Node* m = _memory_nodes[c->in(k)->_idx];
+                assert(m != NULL, "expect memory state");
+                phi->init_req(k, m);
+              }
+            }
+          }
+          assert(phi != NULL, "");
+          regions.map(c->_idx, phi);
+        }
+        Node* current_region = regions[c->_idx];
+        if (current_region != prev_region) {
+          progress = true;
+          if (prev_region == prev_mem) {
+            _memory_nodes.map(c->_idx, current_region);
+          }
+        }
+      } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
+        Node* m = _memory_nodes[_phase->idom(c)->_idx];
+        assert(m != NULL, "expect memory state");
+        if (m != prev_mem) {
+          _memory_nodes.map(c->_idx, m);
+          progress = true;
+        }
+      }
+#ifdef ASSERT
+      if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
+#endif
+    }
+  }
+
+  // Replace existing phi with computed memory state for that region
+  // if different (could be a new phi or a dominating memory node if
+  // that phi was found to be useless).
+  while (dead_phis.size() > 0) {
+    Node* n = dead_phis.pop();
+    n->replace_by(_phase->C->top());
+    n->destruct();
+  }
+  for (int i = rpo_list.size() - 1; i >= 0; i--) {
+    Node* c = rpo_list.at(i);
+    if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
+      Node* n = regions[c->_idx];
+      if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
+        _phase->register_new_node(n, c);
+      }
+    }
+  }
+  for (int i = rpo_list.size() - 1; i >= 0; i--) {
+    Node* c = rpo_list.at(i);
+    if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
+      Node* n = regions[c->_idx];
+      for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
+        Node* u = c->fast_out(i);
+        if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
+            u != n) {
+          if (u->adr_type() == TypePtr::BOTTOM) {
+            fix_memory_uses(u, n, n, c);
+          } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
+            _phase->lazy_replace(u, n);
+            --i; --imax;
+          }
+        }
+      }
+    }
+  }
+}
+
+Node* MemoryGraphFixer::get_ctrl(Node* n) const {
+  Node* c = _phase->get_ctrl(n);
+  if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
+    assert(c == n->in(0), "");
+    CallNode* call = c->as_Call();
+    CallProjections projs;
+    call->extract_projections(&projs, true, false);
+    if (projs.catchall_memproj != NULL) {
+      if (projs.fallthrough_memproj == n) {
+        c = projs.fallthrough_catchproj;
+      } else {
+        assert(projs.catchall_memproj == n, "");
+        c = projs.catchall_catchproj;
+      }
+    }
+  }
+  return c;
+}
+
+Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
+  if (_phase->has_ctrl(n))
+    return get_ctrl(n);
+  else {
+    assert (n->is_CFG(), "must be a CFG node");
+    return n;
+  }
+}
+
+bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
+  return m != NULL && get_ctrl(m) == c;
+}
+
+Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
+  assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
+  Node* mem = _memory_nodes[ctrl->_idx];
+  Node* c = ctrl;
+  while (!mem_is_valid(mem, c) &&
+         (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
+    c = _phase->idom(c);
+    mem = _memory_nodes[c->_idx];
+  }
+  if (n != NULL && mem_is_valid(mem, c)) {
+    while (!ShenandoahWriteBarrierNode::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
+      mem = next_mem(mem, _alias);
+    }
+    if (mem->is_MergeMem()) {
+      mem = mem->as_MergeMem()->memory_at(_alias);
+    }
+    if (!mem_is_valid(mem, c)) {
+      do {
+        c = _phase->idom(c);
+        mem = _memory_nodes[c->_idx];
+      } while (!mem_is_valid(mem, c) &&
+               (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
+    }
+  }
+  assert(mem->bottom_type() == Type::MEMORY, "");
+  return mem;
+}
+
+bool MemoryGraphFixer::has_mem_phi(Node* region) const {
+  for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
+    Node* use = region->fast_out(i);
+    if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
+        (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
+  assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
+  const bool trace = false;
+  DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
+  DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
+  GrowableArray<Node*> phis;
+  if (mem_for_ctrl != mem) {
+    Node* old = mem_for_ctrl;
+    Node* prev = NULL;
+    while (old != mem) {
+      prev = old;
+      if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
+        assert(_alias == Compile::AliasIdxRaw, "");
+        old = old->in(MemNode::Memory);
+      } else if (old->Opcode() == Op_SCMemProj) {
+        assert(_alias == Compile::AliasIdxRaw, "");
+        old = old->in(0);
+      } else if (old->Opcode() == Op_ShenandoahWBMemProj) {
+        assert(_alias != Compile::AliasIdxRaw, "");
+        old = old->in(ShenandoahWBMemProjNode::WriteBarrier);
+      } else if (old->Opcode() == Op_ShenandoahWriteBarrier) {
+        assert(_alias != Compile::AliasIdxRaw, "");
+        old = old->in(ShenandoahBarrierNode::Memory);
+      } else {
+        ShouldNotReachHere();
+      }
+    }
+    assert(prev != NULL, "");
+    if (new_ctrl != ctrl) {
+      _memory_nodes.map(ctrl->_idx, mem);
+      _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
+    }
+    uint input = prev->Opcode() == Op_ShenandoahWriteBarrier ? (uint)ShenandoahBarrierNode::Memory : (uint)MemNode::Memory;
+    _phase->igvn().replace_input_of(prev, input, new_mem);
+  } else {
+    uses.clear();
+    _memory_nodes.map(new_ctrl->_idx, new_mem);
+    uses.push(new_ctrl);
+    for(uint next = 0; next < uses.size(); next++ ) {
+      Node *n = uses.at(next);
+      assert(n->is_CFG(), "");
+      DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
+      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+        Node* u = n->fast_out(i);
+        if (!u->is_Root() && u->is_CFG() && u != n) {
+          Node* m = _memory_nodes[u->_idx];
+          if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
+              !has_mem_phi(u) &&
+              u->unique_ctrl_out()->Opcode() != Op_Halt) {
+            DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
+            DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
+
+            if (!mem_is_valid(m, u) || !m->is_Phi()) {
+              bool push = true;
+              bool create_phi = true;
+              if (_phase->is_dominator(new_ctrl, u)) {
+                create_phi = false;
+              } else if (!_phase->C->has_irreducible_loop()) {
+                IdealLoopTree* loop = _phase->get_loop(ctrl);
+                bool do_check = true;
+                IdealLoopTree* l = loop;
+                create_phi = false;
+                while (l != _phase->ltree_root()) {
+                  if (_phase->is_dominator(l->_head, u) && _phase->is_dominator(_phase->idom(u), l->_head)) {
+                    create_phi = true;
+                    do_check = false;
+                    break;
+                  }
+                  l = l->_parent;
+                }
+
+                if (do_check) {
+                  assert(!create_phi, "");
+                  IdealLoopTree* u_loop = _phase->get_loop(u);
+                  if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
+                    Node* c = ctrl;
+                    while (!_phase->is_dominator(c, u_loop->tail())) {
+                      c = _phase->idom(c);
+                    }
+                    if (!_phase->is_dominator(c, u)) {
+                      do_check = false;
+                    }
+                  }
+                }
+
+                if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
+                  create_phi = true;
+                }
+              }
+              if (create_phi) {
+                Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
+                _phase->register_new_node(phi, u);
+                phis.push(phi);
+                DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
+                if (!mem_is_valid(m, u)) {
+                  DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
+                  _memory_nodes.map(u->_idx, phi);
+                } else {
+                  DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
+                  for (;;) {
+                    assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj() || m->Opcode() == Op_ShenandoahWriteBarrier || m->Opcode() == Op_ShenandoahWBMemProj, "");
+                    Node* next = NULL;
+                    if (m->is_Proj()) {
+                      next = m->in(0);
+                    } else if (m->Opcode() == Op_ShenandoahWBMemProj) {
+                      next = m->in(ShenandoahWBMemProjNode::WriteBarrier);
+                    } else if (m->is_Mem() || m->is_LoadStore()) {
+                      assert(_alias == Compile::AliasIdxRaw, "");
+                      next = m->in(MemNode::Memory);
+                    } else {
+                      assert(_alias != Compile::AliasIdxRaw, "");
+                      assert (m->Opcode() == Op_ShenandoahWriteBarrier, "");
+                      next = m->in(ShenandoahBarrierNode::Memory);
+                    }
+                    if (_phase->get_ctrl(next) != u) {
+                      break;
+                    }
+                    if (next->is_MergeMem()) {
+                      assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
+                      break;
+                    }
+                    if (next->is_Phi()) {
+                      assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
+                      break;
+                    }
+                    m = next;
+                  }
+
+                  DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
+                  assert(m->is_Mem() || m->is_LoadStore() || m->Opcode() == Op_ShenandoahWriteBarrier, "");
+                  uint input = (m->is_Mem() || m->is_LoadStore()) ? (uint)MemNode::Memory : (uint)ShenandoahBarrierNode::Memory;
+                  _phase->igvn().replace_input_of(m, input, phi);
+                  push = false;
+                }
+              } else {
+                DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
+              }
+              if (push) {
+                uses.push(u);
+              }
+            }
+          } else if (!mem_is_valid(m, u) &&
+                     !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
+            uses.push(u);
+          }
+        }
+      }
+    }
+    for (int i = 0; i < phis.length(); i++) {
+      Node* n = phis.at(i);
+      Node* r = n->in(0);
+      DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
+      for (uint j = 1; j < n->req(); j++) {
+        Node* m = find_mem(r->in(j), NULL);
+        _phase->igvn().replace_input_of(n, j, m);
+        DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
+      }
+    }
+  }
+  uint last = _phase->C->unique();
+  MergeMemNode* mm = NULL;
+  int alias = _alias;
+  DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
+  for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
+    Node* u = mem->out(i);
+    if (u->_idx < last) {
+      if (u->is_Mem()) {
+        if (_phase->C->get_alias_index(u->adr_type()) == alias) {
+          Node* m = find_mem(_phase->get_ctrl(u), u);
+          if (m != mem) {
+            DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
+            _phase->igvn().replace_input_of(u, MemNode::Memory, m);
+            --i;
+          }
+        }
+      } else if (u->is_MergeMem()) {
+        MergeMemNode* u_mm = u->as_MergeMem();
+        if (u_mm->memory_at(alias) == mem) {
+          MergeMemNode* newmm = NULL;
+          for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
+            Node* uu = u->fast_out(j);
+            assert(!uu->is_MergeMem(), "chain of MergeMems?");
+            if (uu->is_Phi()) {
+              assert(uu->adr_type() == TypePtr::BOTTOM, "");
+              Node* region = uu->in(0);
+              int nb = 0;
+              for (uint k = 1; k < uu->req(); k++) {
+                if (uu->in(k) == u) {
+                  Node* m = find_mem(region->in(k), NULL);
+                  if (m != mem) {
+                    DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
+                    newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
+                    if (newmm != u) {
+                      _phase->igvn().replace_input_of(uu, k, newmm);
+                      nb++;
+                      --jmax;
+                    }
+                  }
+                }
+              }
+              if (nb > 0) {
+                --j;
+              }
+            } else {
+              Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
+              if (m != mem) {
+                DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
+                newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
+                if (newmm != u) {
+                  _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
+                  --j, --jmax;
+                }
+              }
+            }
+          }
+        }
+      } else if (u->is_Phi()) {
+        assert(u->bottom_type() == Type::MEMORY, "what else?");
+        if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
+          Node* region = u->in(0);
+          bool replaced = false;
+          for (uint j = 1; j < u->req(); j++) {
+            if (u->in(j) == mem) {
+              Node* m = find_mem(region->in(j), NULL);
+              Node* nnew = m;
+              if (m != mem) {
+                if (u->adr_type() == TypePtr::BOTTOM) {
+                  mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
+                  nnew = mm;
+                }
+                DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
+                _phase->igvn().replace_input_of(u, j, nnew);
+                replaced = true;
+              }
+            }
+          }
+          if (replaced) {
+            --i;
+          }
+        }
+      } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
+                 u->adr_type() == NULL) {
+        assert(u->adr_type() != NULL ||
+               u->Opcode() == Op_Rethrow ||
+               u->Opcode() == Op_Return ||
+               u->Opcode() == Op_SafePoint ||
+               (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
+               (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
+               u->Opcode() == Op_CallLeaf, "");
+        Node* m = find_mem(_phase->ctrl_or_self(u), u);
+        if (m != mem) {
+          mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
+          _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
+          --i;
+        }
+      } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
+        Node* m = find_mem(_phase->ctrl_or_self(u), u);
+        if (m != mem) {
+          DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
+          _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
+          --i;
+        }
+      } else if (u->adr_type() != TypePtr::BOTTOM &&
+                 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
+        Node* m = find_mem(_phase->ctrl_or_self(u), u);
+        assert(m != mem, "");
+        // u is on the wrong slice...
+        assert(u->is_ClearArray(), "");
+        DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
+        _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
+        --i;
+      }
+    }
+  }
+#ifdef ASSERT
+  assert(new_mem->outcnt() > 0, "");
+  for (int i = 0; i < phis.length(); i++) {
+    Node* n = phis.at(i);
+    assert(n->outcnt() > 0, "new phi must have uses now");
+  }
+#endif
+}
+
+MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
+  MergeMemNode* mm = MergeMemNode::make(mem);
+  mm->set_memory_at(_alias, rep_proj);
+  _phase->register_new_node(mm, rep_ctrl);
+  return mm;
+}
+
+MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
+  MergeMemNode* newmm = NULL;
+  MergeMemNode* u_mm = u->as_MergeMem();
+  Node* c = _phase->get_ctrl(u);
+  if (_phase->is_dominator(c, rep_ctrl)) {
+    c = rep_ctrl;
+  } else {
+    assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
+  }
+  if (u->outcnt() == 1) {
+    if (u->req() > (uint)_alias && u->in(_alias) == mem) {
+      _phase->igvn().replace_input_of(u, _alias, rep_proj);
+      --i;
+    } else {
+      _phase->igvn().rehash_node_delayed(u);
+      u_mm->set_memory_at(_alias, rep_proj);
+    }
+    newmm = u_mm;
+    _phase->set_ctrl_and_loop(u, c);
+  } else {
+    // can't simply clone u and then change one of its input because
+    // it adds and then removes an edge which messes with the
+    // DUIterator
+    newmm = MergeMemNode::make(u_mm->base_memory());
+    for (uint j = 0; j < u->req(); j++) {
+      if (j < newmm->req()) {
+        if (j == (uint)_alias) {
+          newmm->set_req(j, rep_proj);
+        } else if (newmm->in(j) != u->in(j)) {
+          newmm->set_req(j, u->in(j));
+        }
+      } else if (j == (uint)_alias) {
+        newmm->add_req(rep_proj);
+      } else {
+        newmm->add_req(u->in(j));
+      }
+    }
+    if ((uint)_alias >= u->req()) {
+      newmm->set_memory_at(_alias, rep_proj);
+    }
+    _phase->register_new_node(newmm, c);
+  }
+  return newmm;
+}
+
+bool MemoryGraphFixer::should_process_phi(Node* phi) const {
+  if (phi->adr_type() == TypePtr::BOTTOM) {
+    Node* region = phi->in(0);
+    for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
+      Node* uu = region->fast_out(j);
+      if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
+        return false;
+      }
+    }
+    return true;
+  }
+  return _phase->C->get_alias_index(phi->adr_type()) == _alias;
+}
+
+void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
+  uint last = _phase-> C->unique();
+  MergeMemNode* mm = NULL;
+  assert(mem->bottom_type() == Type::MEMORY, "");
+  for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
+    Node* u = mem->out(i);
+    if (u != replacement && u->_idx < last) {
+      if (u->is_ShenandoahBarrier() && _alias != Compile::AliasIdxRaw) {
+        if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
+          _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
+          assert(u->find_edge(mem) == -1, "only one edge");
+          --i;
+        }
+      } else if (u->is_Mem()) {
+        if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
+          assert(_alias == Compile::AliasIdxRaw , "only raw memory can lead to a memory operation");
+          _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
+          assert(u->find_edge(mem) == -1, "only one edge");
+          --i;
+        }
+      } else if (u->is_MergeMem()) {
+        MergeMemNode* u_mm = u->as_MergeMem();
+        if (u_mm->memory_at(_alias) == mem) {
+          MergeMemNode* newmm = NULL;
+          for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
+            Node* uu = u->fast_out(j);
+            assert(!uu->is_MergeMem(), "chain of MergeMems?");
+            if (uu->is_Phi()) {
+              if (should_process_phi(uu)) {
+                Node* region = uu->in(0);
+                int nb = 0;
+                for (uint k = 1; k < uu->req(); k++) {
+                  if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
+                    if (newmm == NULL) {
+                      newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
+                    }
+                    if (newmm != u) {
+                      _phase->igvn().replace_input_of(uu, k, newmm);
+                      nb++;
+                      --jmax;
+                    }
+                  }
+                }
+                if (nb > 0) {
+                  --j;
+                }
+              }
+            } else {
+              if (rep_ctrl != uu && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
+                if (newmm == NULL) {
+                  newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
+                }
+                if (newmm != u) {
+                  _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
+                  --j, --jmax;
+                }
+              }
+            }
+          }
+        }
+      } else if (u->is_Phi()) {
+        assert(u->bottom_type() == Type::MEMORY, "what else?");
+        Node* region = u->in(0);
+        if (should_process_phi(u)) {
+          bool replaced = false;
+          for (uint j = 1; j < u->req(); j++) {
+            if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
+              Node* nnew = rep_proj;
+              if (u->adr_type() == TypePtr::BOTTOM) {
+                if (mm == NULL) {
+                  mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
+                }
+                nnew = mm;
+              }
+              _phase->igvn().replace_input_of(u, j, nnew);
+              replaced = true;
+            }
+          }
+          if (replaced) {
+            --i;
+          }
+
+        }
+      } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
+                 u->adr_type() == NULL) {
+        assert(u->adr_type() != NULL ||
+               u->Opcode() == Op_Rethrow ||
+               u->Opcode() == Op_Return ||
+               u->Opcode() == Op_SafePoint ||
+               (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
+               (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
+               u->Opcode() == Op_CallLeaf, "");
+        if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
+          if (mm == NULL) {
+            mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
+          }
+          _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
+          --i;
+        }
+      } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
+        if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
+          _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
+          --i;
+        }
+      }
+    }
+  }
+}
+
+void MemoryGraphFixer::remove(Node* n) {
+  assert(n->Opcode() == Op_ShenandoahWBMemProj, "");
+  Node* c = _phase->get_ctrl(n);
+  Node* mem = find_mem(c, NULL);
+  if (mem == n) {
+    _memory_nodes.map(c->_idx, mem->in(ShenandoahWBMemProjNode::WriteBarrier)->in(ShenandoahBarrierNode::Memory));
+  }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_C2_SHENANDOAH_SUPPORT_HPP
+#define SHARE_VM_GC_SHENANDOAH_C2_SHENANDOAH_SUPPORT_HPP
+
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "memory/allocation.hpp"
+#include "opto/addnode.hpp"
+#include "opto/graphKit.hpp"
+#include "opto/machnode.hpp"
+#include "opto/memnode.hpp"
+#include "opto/multnode.hpp"
+#include "opto/node.hpp"
+
+class PhaseGVN;
+class MemoryGraphFixer;
+
+class ShenandoahBarrierNode : public TypeNode {
+private:
+  bool _allow_fromspace;
+
+#ifdef ASSERT
+  enum verify_type {
+    ShenandoahLoad,
+    ShenandoahStore,
+    ShenandoahValue,
+    ShenandoahOopStore,
+    ShenandoahNone,
+  };
+
+  static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used);
+#endif
+
+public:
+  enum { Control,
+         Memory,
+         ValueIn
+  };
+
+  ShenandoahBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
+    : TypeNode(obj->bottom_type()->isa_oopptr() ? obj->bottom_type()->is_oopptr()->cast_to_nonconst() : obj->bottom_type(), 3),
+      _allow_fromspace(allow_fromspace) {
+
+    init_req(Control, ctrl);
+    init_req(Memory, mem);
+    init_req(ValueIn, obj);
+
+    init_class_id(Class_ShenandoahBarrier);
+  }
+
+  static Node* skip_through_barrier(Node* n);
+
+  static const TypeOopPtr* brooks_pointer_type(const Type* t) {
+    return t->is_oopptr()->cast_to_nonconst()->add_offset(ShenandoahBrooksPointer::byte_offset())->is_oopptr();
+  }
+
+  virtual const TypePtr* adr_type() const {
+    if (bottom_type() == Type::TOP) {
+      return NULL;
+    }
+    //const TypePtr* adr_type = in(MemNode::Address)->bottom_type()->is_ptr();
+    const TypePtr* adr_type = brooks_pointer_type(bottom_type());
+    assert(adr_type->offset() == ShenandoahBrooksPointer::byte_offset(), "sane offset");
+    assert(Compile::current()->alias_type(adr_type)->is_rewritable(), "brooks ptr must be rewritable");
+    return adr_type;
+  }
+
+  virtual uint  ideal_reg() const { return Op_RegP; }
+  virtual uint match_edge(uint idx) const {
+    return idx >= ValueIn;
+  }
+
+  Node* Identity_impl(PhaseGVN* phase);
+
+  virtual const Type* Value(PhaseGVN* phase) const;
+  virtual bool depends_only_on_test() const {
+    return true;
+  };
+
+  static bool needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace);
+
+#ifdef ASSERT
+  static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL);
+  static void verify(RootNode* root);
+  static void verify_raw_mem(RootNode* root);
+#endif
+#ifndef PRODUCT
+  virtual void dump_spec(outputStream *st) const;
+#endif
+
+  // protected:
+  static Node* dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
+  static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase);
+  static bool is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase);
+  static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
+  static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase);
+  static bool build_loop_late_post(PhaseIdealLoop* phase, Node* n);
+  bool sink_node(PhaseIdealLoop* phase, Node* ctrl, Node* n_ctrl);
+
+protected:
+  uint hash() const;
+  uint cmp(const Node& n) const;
+  uint size_of() const;
+
+private:
+  static bool needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited);
+
+  static bool dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
+  static bool dominates_memory_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
+};
+
+class ShenandoahReadBarrierNode : public ShenandoahBarrierNode {
+public:
+  ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj)
+    : ShenandoahBarrierNode(ctrl, mem, obj, true) {
+    assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
+                               ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
+           "should be enabled");
+  }
+  ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
+    : ShenandoahBarrierNode(ctrl, mem, obj, allow_fromspace) {
+    assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
+                               ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
+           "should be enabled");
+  }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual Node* Identity(PhaseGVN* phase);
+  virtual int Opcode() const;
+
+  bool is_independent(Node* mem);
+
+  void try_move(PhaseIdealLoop* phase);
+
+private:
+  static bool is_independent(const Type* in_type, const Type* this_type);
+  static bool dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
+  static bool dominates_memory_rb_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
+};
+
+class ShenandoahWriteBarrierNode : public ShenandoahBarrierNode {
+public:
+  ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj);
+
+  virtual int Opcode() const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual Node* Identity(PhaseGVN* phase);
+  virtual bool depends_only_on_test() const { return false; }
+
+  static bool expand(Compile* C, PhaseIterGVN& igvn);
+  static bool is_gc_state_load(Node *n);
+  static bool is_heap_state_test(Node* iff, int mask);
+  static bool is_heap_stable_test(Node* iff);
+  static bool try_common_gc_state_load(Node *n, PhaseIdealLoop *phase);
+  static bool has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase);
+
+  static LoopNode* try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase);
+  static Node* move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase);
+#ifdef ASSERT
+  static bool memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
+  static void memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase);
+#endif
+  void try_move_before_loop(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
+  void try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
+  static void pin_and_expand(PhaseIdealLoop* phase);
+  CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
+  void pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, Unique_Node_List& uses);
+  void pin_and_expand_helper(PhaseIdealLoop* phase);
+  static Node* find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase);
+  static void follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase);
+  static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase);
+
+  static void test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
+                               PhaseIdealLoop* phase);
+  static void call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem,
+                           Node* raw_mem, Node* wb_mem, int alias,
+                           PhaseIdealLoop* phase);
+  static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase);
+  static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses,
+                             PhaseIdealLoop* phase);
+  static void in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
+  static void move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
+
+  static void optimize_after_expansion(VectorSet &visited, Node_Stack &nstack, Node_List &old_new, PhaseIdealLoop* phase);
+  static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase);
+  static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase);
+  static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
+
+  static void optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*> memory_graph_fixers, bool include_lsm);
+  Node* would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase);
+  static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase);
+
+  Node* try_split_thru_phi(PhaseIdealLoop* phase);
+};
+
+class ShenandoahWBMemProjNode : public Node {
+public:
+  enum { Control,
+         WriteBarrier };
+
+  ShenandoahWBMemProjNode(Node *src) : Node(NULL, src) {
+    assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
+    assert(src->Opcode() == Op_ShenandoahWriteBarrier || src->is_Mach(), "epxect wb");
+  }
+  virtual Node* Identity(PhaseGVN* phase);
+
+  virtual int Opcode() const;
+  virtual bool      is_CFG() const  { return false; }
+  virtual const Type *bottom_type() const {return Type::MEMORY;}
+  virtual const TypePtr *adr_type() const {
+    Node* wb = in(WriteBarrier);
+    if (wb == NULL || wb->is_top())  return NULL; // node is dead
+    assert(wb->Opcode() == Op_ShenandoahWriteBarrier || (wb->is_Mach() && wb->as_Mach()->ideal_Opcode() == Op_ShenandoahWriteBarrier) || wb->is_Phi(), "expect wb");
+    return ShenandoahBarrierNode::brooks_pointer_type(wb->bottom_type());
+  }
+
+  virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
+  virtual const Type *Value(PhaseGVN* phase ) const {
+    return bottom_type();
+  }
+#ifndef PRODUCT
+  virtual void dump_spec(outputStream *st) const {};
+#endif
+};
+
+class ShenandoahEnqueueBarrierNode : public Node {
+public:
+  ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
+  }
+
+  const Type *bottom_type() const;
+  const Type* Value(PhaseGVN* phase) const;
+  Node* Identity(PhaseGVN* phase);
+
+  int Opcode() const;
+
+private:
+  enum { Needed, NotNeeded, MaybeNeeded };
+
+  static int needed(Node* n);
+  static Node* next(Node* n);
+};
+
+class MemoryGraphFixer : public ResourceObj {
+private:
+  Node_List _memory_nodes;
+  int _alias;
+  PhaseIdealLoop* _phase;
+  bool _include_lsm;
+
+  void collect_memory_nodes();
+  Node* get_ctrl(Node* n) const;
+  Node* ctrl_or_self(Node* n) const;
+  bool mem_is_valid(Node* m, Node* c) const;
+  MergeMemNode* allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const;
+  MergeMemNode* clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const;
+  void fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const;
+  bool should_process_phi(Node* phi) const;
+  bool has_mem_phi(Node* region) const;
+
+public:
+  MemoryGraphFixer(int alias, bool include_lsm, PhaseIdealLoop* phase) :
+    _alias(alias), _phase(phase), _include_lsm(include_lsm) {
+    assert(_alias != Compile::AliasIdxBot, "unsupported");
+    collect_memory_nodes();
+  }
+
+  Node* find_mem(Node* ctrl, Node* n) const;
+  void fix_mem(Node* ctrl, Node* region, Node* mem, Node* mem_for_ctrl, Node* mem_phi, Unique_Node_List& uses);
+  int alias() const { return _alias; }
+  void remove(Node* n);
+};
+
+class ShenandoahCompareAndSwapPNode : public CompareAndSwapPNode {
+public:
+  ShenandoahCompareAndSwapPNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord)
+    : CompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
+      return new CompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
+    }
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ShenandoahCompareAndSwapNNode : public CompareAndSwapNNode {
+public:
+  ShenandoahCompareAndSwapNNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord)
+    : CompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
+      return new CompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
+    }
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ShenandoahWeakCompareAndSwapPNode : public WeakCompareAndSwapPNode {
+public:
+  ShenandoahWeakCompareAndSwapPNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord)
+    : WeakCompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
+      return new WeakCompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
+    }
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ShenandoahWeakCompareAndSwapNNode : public WeakCompareAndSwapNNode {
+public:
+  ShenandoahWeakCompareAndSwapNNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord)
+    : WeakCompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
+      return new WeakCompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
+    }
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ShenandoahCompareAndExchangePNode : public CompareAndExchangePNode {
+public:
+  ShenandoahCompareAndExchangePNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord)
+    : CompareAndExchangePNode(c, mem, adr, val, ex, at, t, mem_ord) { }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
+      return new CompareAndExchangePNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order());
+    }
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ShenandoahCompareAndExchangeNNode : public CompareAndExchangeNNode {
+public:
+  ShenandoahCompareAndExchangeNNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord)
+    : CompareAndExchangeNNode(c, mem, adr, val, ex, at, t, mem_ord) { }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
+      return new CompareAndExchangeNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order());
+    }
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_C2_SHENANDOAH_SUPPORT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
+#include "utilities/quickSort.hpp"
+
+ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() :
+  ShenandoahHeuristics(),
+  _cycle_gap_history(new TruncatedSeq(5)),
+  _conc_mark_duration_history(new TruncatedSeq(5)),
+  _conc_uprefs_duration_history(new TruncatedSeq(5)) {
+
+  SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
+  SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
+
+  // Final configuration checks
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
+}
+
+ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
+
+void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                                         RegionData* data, size_t size,
+                                                                         size_t actual_free) {
+  size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
+
+  // The logic for cset selection in adaptive is as follows:
+  //
+  //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
+  //      during evacuation, and thus guarantee full GC. In practice, we also want to let
+  //      application to allocate something. This is why we limit CSet to some fraction of
+  //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
+  //      over garbage threshold.
+  //
+  //   2. We should not get cset too low so that free threshold would not be met right
+  //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
+  //      too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero.
+  //
+  // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
+  // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before
+  // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
+  // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
+
+  size_t capacity    = ShenandoahHeap::heap()->capacity();
+  size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
+  size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
+  size_t max_cset    = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
+
+  log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
+                     SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
+                     free_target / M, actual_free / M, max_cset / M, min_garbage / M);
+
+  // Better select garbage-first regions
+  QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
+
+  size_t cur_cset = 0;
+  size_t cur_garbage = 0;
+  _bytes_in_cset = 0;
+
+  for (size_t idx = 0; idx < size; idx++) {
+    ShenandoahHeapRegion* r = data[idx]._region;
+
+    size_t new_cset    = cur_cset + r->get_live_data_bytes();
+    size_t new_garbage = cur_garbage + r->garbage();
+
+    if (new_cset > max_cset) {
+      break;
+    }
+
+    if ((new_garbage < min_garbage) || (r->garbage() > garbage_threshold)) {
+      cset->add_region(r);
+      _bytes_in_cset += r->used();
+      cur_cset = new_cset;
+      cur_garbage = new_garbage;
+    }
+  }
+}
+
+void ShenandoahAdaptiveHeuristics::record_cycle_start() {
+  ShenandoahHeuristics::record_cycle_start();
+  double last_cycle_gap = (_cycle_start - _last_cycle_end);
+  _cycle_gap_history->add(last_cycle_gap);
+}
+
+void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
+  if (phase == ShenandoahPhaseTimings::conc_mark) {
+    _conc_mark_duration_history->add(secs);
+  } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
+    _conc_uprefs_duration_history->add(secs);
+  } // Else ignore
+}
+
+bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  size_t capacity = heap->capacity();
+  size_t available = heap->free_set()->available();
+
+  // Check if we are falling below the worst limit, time to trigger the GC, regardless of
+  // anything else.
+  size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+  if (available < min_threshold) {
+    log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
+                 available / M, min_threshold / M);
+    return true;
+  }
+
+  // Check if are need to learn a bit about the application
+  const size_t max_learn = ShenandoahLearningSteps;
+  if (_gc_times_learned < max_learn) {
+    size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
+    if (available < init_threshold) {
+      log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
+                   _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
+      return true;
+    }
+  }
+
+  // Check if allocation headroom is still okay. This also factors in:
+  //   1. Some space to absorb allocation spikes
+  //   2. Accumulated penalties from Degenerated and Full GC
+
+  size_t allocation_headroom = available;
+
+  size_t spike_headroom = ShenandoahAllocSpikeFactor * capacity / 100;
+  size_t penalties      = _gc_time_penalties         * capacity / 100;
+
+  allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
+  allocation_headroom -= MIN2(allocation_headroom, penalties);
+
+  // TODO: Allocation rate is way too averaged to be useful during state changes
+
+  double average_gc = _gc_time_history->avg();
+  double time_since_last = time_since_last_gc();
+  double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last;
+
+  if (average_gc > allocation_headroom / allocation_rate) {
+    log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)",
+                 average_gc * 1000, allocation_rate / M, allocation_headroom / M);
+    log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M",
+                       available / M, spike_headroom / M, penalties / M, allocation_headroom / M);
+    return true;
+  }
+
+  return ShenandoahHeuristics::should_start_normal_gc();
+}
+
+bool ShenandoahAdaptiveHeuristics::should_start_update_refs() {
+  if (! _update_refs_adaptive) {
+    return _update_refs_early;
+  }
+
+  double cycle_gap_avg = _cycle_gap_history->avg();
+  double conc_mark_avg = _conc_mark_duration_history->avg();
+  double conc_uprefs_avg = _conc_uprefs_duration_history->avg();
+
+  if (_update_refs_early) {
+    double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0;
+    if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) {
+      _update_refs_early = false;
+    }
+  } else {
+    double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0;
+    if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
+      _update_refs_early = true;
+    }
+  }
+  return _update_refs_early;
+}
+
+const char* ShenandoahAdaptiveHeuristics::name() {
+  return "adaptive";
+}
+
+bool ShenandoahAdaptiveHeuristics::is_diagnostic() {
+  return false;
+}
+
+bool ShenandoahAdaptiveHeuristics::is_experimental() {
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP
+
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "utilities/numberSeq.hpp"
+
+class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
+private:
+  TruncatedSeq* _cycle_gap_history;
+  TruncatedSeq* _conc_mark_duration_history;
+  TruncatedSeq* _conc_uprefs_duration_history;
+
+public:
+  ShenandoahAdaptiveHeuristics();
+
+  virtual ~ShenandoahAdaptiveHeuristics();
+
+  virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                     RegionData* data, size_t size,
+                                                     size_t actual_free);
+
+  void record_cycle_start();
+
+  virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs);
+
+  virtual bool should_start_normal_gc() const;
+
+  virtual bool should_start_update_refs();
+
+  virtual const char* name();
+
+  virtual bool is_diagnostic();
+
+  virtual bool is_experimental();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
+#include "runtime/os.hpp"
+
+ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() {
+  // Do not shortcut evacuation
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100);
+
+  // Aggressive runs with max speed for allocation, to capture races against mutator
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing);
+
+  // Aggressive evacuates everything, so it needs as much evac space as it can get
+  SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
+
+  // If class unloading is globally enabled, aggressive does unloading even with
+  // concurrent cycles.
+  if (ClassUnloading) {
+    SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1);
+  }
+
+  // Final configuration checks
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
+}
+
+void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                                           RegionData* data, size_t size,
+                                                                           size_t free) {
+  for (size_t idx = 0; idx < size; idx++) {
+    ShenandoahHeapRegion* r = data[idx]._region;
+    if (r->garbage() > 0) {
+      cset->add_region(r);
+    }
+  }
+}
+
+bool ShenandoahAggressiveHeuristics::should_start_normal_gc() const {
+  log_info(gc)("Trigger: Start next cycle immediately");
+  return true;
+}
+
+bool ShenandoahAggressiveHeuristics::should_process_references() {
+  if (!can_process_references()) return false;
+  // Randomly process refs with 50% chance.
+  return (os::random() & 1) == 1;
+}
+
+bool ShenandoahAggressiveHeuristics::should_unload_classes() {
+  if (!can_unload_classes_normal()) return false;
+  if (has_metaspace_oom()) return true;
+  // Randomly unload classes with 50% chance.
+  return (os::random() & 1) == 1;
+}
+
+const char* ShenandoahAggressiveHeuristics::name() {
+  return "aggressive";
+}
+
+bool ShenandoahAggressiveHeuristics::is_diagnostic() {
+  return true;
+}
+
+bool ShenandoahAggressiveHeuristics::is_experimental() {
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP
+
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+
+class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
+public:
+  ShenandoahAggressiveHeuristics();
+
+  virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                     RegionData* data, size_t size,
+                                                     size_t free);
+
+  virtual bool should_start_normal_gc() const;
+
+  virtual bool should_process_references();
+
+  virtual bool should_unload_classes();
+
+  virtual const char* name();
+
+  virtual bool is_diagnostic();
+
+  virtual bool is_experimental();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
+
+ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristics() {
+  SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
+  SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
+  SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahUncommit);
+  SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahAlwaysClearSoftRefs);
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold,  10);
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold,   100);
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUncommitDelay,        1000);
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGuaranteedGCInterval, 30000);
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold,     10);
+
+  // Final configuration checks
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
+}
+
+bool ShenandoahCompactHeuristics::should_start_normal_gc() const {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  size_t available = heap->free_set()->available();
+  size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
+  size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+
+  if (available < min_threshold) {
+    log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
+                 available / M, min_threshold / M);
+    return true;
+  }
+
+  if (available < threshold_bytes_allocated) {
+    log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is lower than allocated recently (" SIZE_FORMAT "M)",
+                 available / M, threshold_bytes_allocated / M);
+    return true;
+  }
+
+  size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
+  if (bytes_allocated > threshold_bytes_allocated) {
+    log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "M) is larger than allocation threshold (" SIZE_FORMAT "M)",
+                 bytes_allocated / M, threshold_bytes_allocated / M);
+    return true;
+  }
+
+  return ShenandoahHeuristics::should_start_normal_gc();
+}
+
+void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                                        RegionData* data, size_t size,
+                                                                        size_t actual_free) {
+  // Do not select too large CSet that would overflow the available free space
+  size_t max_cset = actual_free * 3 / 4;
+
+  log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M",
+                     actual_free / M, max_cset / M);
+
+  size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
+
+  size_t live_cset = 0;
+  for (size_t idx = 0; idx < size; idx++) {
+    ShenandoahHeapRegion* r = data[idx]._region;
+    size_t new_cset = live_cset + r->get_live_data_bytes();
+    if (new_cset < max_cset && r->garbage() > threshold) {
+      live_cset = new_cset;
+      cset->add_region(r);
+    }
+  }
+}
+
+const char* ShenandoahCompactHeuristics::name() {
+  return "compact";
+}
+
+bool ShenandoahCompactHeuristics::is_diagnostic() {
+  return false;
+}
+
+bool ShenandoahCompactHeuristics::is_experimental() {
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP
+
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+
+class ShenandoahCompactHeuristics : public ShenandoahHeuristics {
+public:
+  ShenandoahCompactHeuristics();
+
+  virtual bool should_start_normal_gc() const;
+
+  virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                     RegionData* data, size_t size,
+                                                     size_t actual_free);
+
+  virtual const char* name();
+
+  virtual bool is_diagnostic();
+
+  virtual bool is_experimental();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
+
+ShenandoahPassiveHeuristics::ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
+  // Do not allow concurrent cycles.
+  FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
+  FLAG_SET_DEFAULT(ShenandoahImplicitGCInvokesConcurrent, false);
+
+  // Passive runs with max speed, reacts on allocation failure.
+  FLAG_SET_DEFAULT(ShenandoahPacing, false);
+
+  // No need for evacuation reserve with Full GC, only for Degenerated GC.
+  if (!ShenandoahDegeneratedGC) {
+    SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahEvacReserve, 0);
+  }
+
+  // Disable known barriers by default.
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
+  SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
+
+  // Final configuration checks
+  // No barriers are required to run.
+}
+
+bool ShenandoahPassiveHeuristics::should_start_normal_gc() const {
+  // Never do concurrent GCs.
+  return false;
+}
+
+bool ShenandoahPassiveHeuristics::should_process_references() {
+  // Always process references, if we can.
+  return can_process_references();
+}
+
+bool ShenandoahPassiveHeuristics::should_unload_classes() {
+  // Always unload classes, if we can.
+  return can_unload_classes();
+}
+
+bool ShenandoahPassiveHeuristics::should_degenerate_cycle() {
+  // Always fail to Degenerated GC, if enabled
+  return ShenandoahDegeneratedGC;
+}
+
+void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                                        RegionData* data, size_t size,
+                                                                        size_t actual_free) {
+  assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC");
+
+  // Do not select too large CSet that would overflow the available free space.
+  // Take at least the entire evacuation reserve, and be free to overflow to free space.
+  size_t capacity  = ShenandoahHeap::heap()->capacity();
+  size_t available = MAX2(ShenandoahEvacReserve * capacity / 100, actual_free);
+  size_t max_cset  = (size_t)(available / ShenandoahEvacWaste);
+
+  log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M",
+                     actual_free / M, max_cset / M);
+
+  size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
+
+  size_t live_cset = 0;
+  for (size_t idx = 0; idx < size; idx++) {
+    ShenandoahHeapRegion* r = data[idx]._region;
+    size_t new_cset = live_cset + r->get_live_data_bytes();
+    if (new_cset < max_cset && r->garbage() > threshold) {
+      live_cset = new_cset;
+      cset->add_region(r);
+    }
+  }
+}
+
+const char* ShenandoahPassiveHeuristics::name() {
+  return "passive";
+}
+
+bool ShenandoahPassiveHeuristics::is_diagnostic() {
+  return true;
+}
+
+bool ShenandoahPassiveHeuristics::is_experimental() {
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP
+
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+
+class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
+public:
+  ShenandoahPassiveHeuristics();
+
+  virtual bool should_start_normal_gc() const;
+
+  virtual bool should_process_references();
+
+  virtual bool should_unload_classes();
+
+  virtual bool should_degenerate_cycle();
+
+  virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
+                                                     RegionData* data, size_t data_size,
+                                                     size_t free);
+
+  virtual const char* name();
+
+  virtual bool is_diagnostic();
+
+  virtual bool is_experimental();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
+
+ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics() {
+  // Static heuristics may degrade to continuous if live data is larger
+  // than free threshold. ShenandoahAllocationThreshold is supposed to break this,
+  // but it only works if it is non-zero.
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold, 1);
+
+  SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
+  SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
+
+  // Final configuration checks
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
+}
+
+ShenandoahStaticHeuristics::~ShenandoahStaticHeuristics() {}
+
+bool ShenandoahStaticHeuristics::should_start_normal_gc() const {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  size_t capacity = heap->capacity();
+  size_t available = heap->free_set()->available();
+  size_t threshold_available = (capacity * ShenandoahFreeThreshold) / 100;
+
+  if (available < threshold_available) {
+    log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below free threshold (" SIZE_FORMAT "M)",
+                 available / M, threshold_available / M);
+    return true;
+  }
+  return ShenandoahHeuristics::should_start_normal_gc();
+}
+
+void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                                       RegionData* data, size_t size,
+                                                                       size_t free) {
+  size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
+
+  for (size_t idx = 0; idx < size; idx++) {
+    ShenandoahHeapRegion* r = data[idx]._region;
+    if (r->garbage() > threshold) {
+      cset->add_region(r);
+    }
+  }
+}
+
+const char* ShenandoahStaticHeuristics::name() {
+  return "static";
+}
+
+bool ShenandoahStaticHeuristics::is_diagnostic() {
+  return false;
+}
+
+bool ShenandoahStaticHeuristics::is_experimental() {
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP
+
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+
+class ShenandoahStaticHeuristics : public ShenandoahHeuristics {
+public:
+  ShenandoahStaticHeuristics();
+
+  virtual ~ShenandoahStaticHeuristics();
+
+  virtual bool should_start_normal_gc() const;
+
+  virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
+                                                     RegionData* data, size_t size,
+                                                     size_t free);
+
+  virtual const char* name();
+
+  virtual bool is_diagnostic();
+
+  virtual bool is_experimental();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
+#include "utilities/quickSort.hpp"
+
+ShenandoahTraversalHeuristics::ShenandoahTraversalHeuristics() : ShenandoahHeuristics(),
+  _last_cset_select(0)
+ {
+  FLAG_SET_DEFAULT(ShenandoahSATBBarrier,            false);
+  FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier,    false);
+  FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true);
+  FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier,       false);
+  FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs,       false);
+
+  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1);
+
+  // Adjust class unloading settings only if globally enabled.
+  if (ClassUnloadingWithConcurrentMark) {
+    SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1);
+  }
+
+  SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
+  SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
+
+  // Final configuration checks
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValEnqueueBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
+  SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
+}
+
+bool ShenandoahTraversalHeuristics::should_start_normal_gc() const {
+  return false;
+}
+
+bool ShenandoahTraversalHeuristics::is_experimental() {
+  return true;
+}
+
+bool ShenandoahTraversalHeuristics::is_diagnostic() {
+  return false;
+}
+
+bool ShenandoahTraversalHeuristics::can_do_traversal_gc() {
+  return true;
+}
+
+const char* ShenandoahTraversalHeuristics::name() {
+  return "traversal";
+}
+
+void ShenandoahTraversalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
+
+  ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set();
+  traversal_set->clear();
+
+  RegionData *data = get_region_data_cache(heap->num_regions());
+  size_t cnt = 0;
+
+  // Step 0. Prepare all regions
+
+  for (size_t i = 0; i < heap->num_regions(); i++) {
+    ShenandoahHeapRegion* r = heap->get_region(i);
+    if (r->used() > 0) {
+      if (r->is_regular()) {
+        data[cnt]._region = r;
+        data[cnt]._garbage = r->garbage();
+        data[cnt]._seqnum_last_alloc = r->seqnum_last_alloc_mutator();
+        cnt++;
+      }
+      traversal_set->add_region(r);
+    }
+  }
+
+  // The logic for cset selection is similar to that of adaptive:
+  //
+  //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
+  //      during evacuation, and thus guarantee full GC. In practice, we also want to let
+  //      application to allocate something. This is why we limit CSet to some fraction of
+  //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
+  //      over garbage threshold.
+  //
+  //   2. We should not get cset too low so that free threshold would not be met right
+  //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
+  //      too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero.
+  //
+  // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
+  // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before
+  // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
+  // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
+  //
+  // The significant complication is that liveness data was collected at the previous cycle, and only
+  // for those regions that were allocated before previous cycle started.
+
+  size_t capacity    = heap->capacity();
+  size_t actual_free = heap->free_set()->available();
+  size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
+  size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
+  size_t max_cset    = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
+
+  log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
+                     SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
+                     free_target / M, actual_free / M, max_cset / M, min_garbage / M);
+
+  // Better select garbage-first regions, and then older ones
+  QuickSort::sort<RegionData>(data, (int) cnt, compare_by_garbage_then_alloc_seq_ascending, false);
+
+  size_t cur_cset = 0;
+  size_t cur_garbage = 0;
+
+  size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() / 100 * ShenandoahGarbageThreshold;
+
+  // Step 1. Add trustworthy regions to collection set.
+  //
+  // We can trust live/garbage data from regions that were fully traversed during
+  // previous cycle. Even if actual liveness is different now, we can only have _less_
+  // live objects, because dead objects are not resurrected. Which means we can undershoot
+  // the collection set, but not overshoot it.
+
+  for (size_t i = 0; i < cnt; i++) {
+    if (data[i]._seqnum_last_alloc > _last_cset_select) continue;
+
+    ShenandoahHeapRegion* r = data[i]._region;
+    assert (r->is_regular(), "should have been filtered before");
+
+    size_t new_garbage = cur_garbage + r->garbage();
+    size_t new_cset    = cur_cset    + r->get_live_data_bytes();
+
+    if (new_cset > max_cset) {
+      break;
+    }
+
+    if ((new_garbage < min_garbage) || (r->garbage() > garbage_threshold)) {
+      assert(!collection_set->is_in(r), "must not yet be in cset");
+      collection_set->add_region(r);
+      cur_cset = new_cset;
+      cur_garbage = new_garbage;
+    }
+  }
+
+  // Step 2. Try to catch some recently allocated regions for evacuation ride.
+  //
+  // Pessimistically assume we are going to evacuate the entire region. While this
+  // is very pessimistic and in most cases undershoots the collection set when regions
+  // are mostly dead, it also provides more safety against running into allocation
+  // failure when newly allocated regions are fully live.
+
+  for (size_t i = 0; i < cnt; i++) {
+    if (data[i]._seqnum_last_alloc <= _last_cset_select) continue;
+
+    ShenandoahHeapRegion* r = data[i]._region;
+    assert (r->is_regular(), "should have been filtered before");
+
+    // size_t new_garbage = cur_garbage + 0; (implied)
+    size_t new_cset = cur_cset + r->used();
+
+    if (new_cset > max_cset) {
+      break;
+    }
+
+    assert(!collection_set->is_in(r), "must not yet be in cset");
+    collection_set->add_region(r);
+    cur_cset = new_cset;
+  }
+
+  // Step 3. Clear liveness data
+  // TODO: Merge it with step 0, but save live data in RegionData before.
+  for (size_t i = 0; i < heap->num_regions(); i++) {
+    ShenandoahHeapRegion* r = heap->get_region(i);
+    if (r->used() > 0) {
+      r->clear_live_data();
+    }
+  }
+
+  collection_set->update_region_status();
+
+  _last_cset_select = ShenandoahHeapRegion::seqnum_current_alloc();
+}
+
+bool ShenandoahTraversalHeuristics::should_start_traversal_gc() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  assert(!heap->has_forwarded_objects(), "no forwarded objects here");
+
+  size_t capacity = heap->capacity();
+  size_t available = heap->free_set()->available();
+
+  // Check if we are falling below the worst limit, time to trigger the GC, regardless of
+  // anything else.
+  size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
+  if (available < min_threshold) {
+    log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
+                 available / M, min_threshold / M);
+    return true;
+  }
+
+  // Check if are need to learn a bit about the application
+  const size_t max_learn = ShenandoahLearningSteps;
+  if (_gc_times_learned < max_learn) {
+    size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
+    if (available < init_threshold) {
+      log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
+                   _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
+      return true;
+    }
+  }
+
+  // Check if allocation headroom is still okay. This also factors in:
+  //   1. Some space to absorb allocation spikes
+  //   2. Accumulated penalties from Degenerated and Full GC
+
+  size_t allocation_headroom = available;
+
+  size_t spike_headroom = ShenandoahAllocSpikeFactor * capacity / 100;
+  size_t penalties      = _gc_time_penalties         * capacity / 100;
+
+  allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
+  allocation_headroom -= MIN2(allocation_headroom, penalties);
+
+  double average_gc = _gc_time_history->avg();
+  double time_since_last = time_since_last_gc();
+  double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last;
+
+  if (average_gc > allocation_headroom / allocation_rate) {
+    log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)",
+                 average_gc * 1000, allocation_rate / M, allocation_headroom / M);
+    log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M",
+                       available / M, spike_headroom / M, penalties / M, allocation_headroom / M);
+    return true;
+  } else if (ShenandoahHeuristics::should_start_normal_gc()) {
+    return true;
+  }
+
+  return false;
+}
+
+void ShenandoahTraversalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
+                                                                          RegionData* data, size_t data_size,
+                                                                          size_t free) {
+  ShouldNotReachHere();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALHEURISTICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALHEURISTICS_HPP
+
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+
+class ShenandoahTraversalHeuristics : public ShenandoahHeuristics {
+private:
+  uint64_t _last_cset_select;
+
+protected:
+  virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
+                                                     RegionData* data, size_t data_size,
+                                                     size_t free);
+
+public:
+  ShenandoahTraversalHeuristics();
+
+  virtual bool should_start_normal_gc() const;
+
+  virtual bool is_experimental();
+
+  virtual bool is_diagnostic();
+
+  virtual bool can_do_traversal_gc();
+
+  virtual const char* name();
+
+  virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
+
+  virtual bool should_start_traversal_gc();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHTRAVERSALHEURISTICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP
+
+#include "memory/allocation.hpp"
+
+class ShenandoahAllocRequest : StackObj {
+public:
+  enum Type {
+    _alloc_shared,      // Allocate common, outside of TLAB
+    _alloc_shared_gc,   // Allocate common, outside of GCLAB
+    _alloc_tlab,        // Allocate TLAB
+    _alloc_gclab,       // Allocate GCLAB
+    _ALLOC_LIMIT,
+  };
+
+  static const char* alloc_type_to_string(Type type) {
+    switch (type) {
+      case _alloc_shared:
+        return "Shared";
+      case _alloc_shared_gc:
+        return "Shared GC";
+      case _alloc_tlab:
+        return "TLAB";
+      case _alloc_gclab:
+        return "GCLAB";
+      default:
+        ShouldNotReachHere();
+        return "";
+    }
+  }
+
+private:
+  size_t _min_size;
+  size_t _requested_size;
+  size_t _actual_size;
+  Type _alloc_type;
+#ifdef ASSERT
+  bool _actual_size_set;
+#endif
+
+  ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type) :
+          _min_size(_min_size), _requested_size(_requested_size),
+          _actual_size(0), _alloc_type(_alloc_type)
+#ifdef ASSERT
+          , _actual_size_set(false)
+#endif
+  {}
+
+public:
+  static inline ShenandoahAllocRequest for_tlab(size_t min_size, size_t requested_size) {
+    return ShenandoahAllocRequest(min_size, requested_size, _alloc_tlab);
+  }
+
+  static inline ShenandoahAllocRequest for_gclab(size_t min_size, size_t requested_size) {
+    return ShenandoahAllocRequest(min_size, requested_size, _alloc_gclab);
+  }
+
+  static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size) {
+    return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc);
+  }
+
+  static inline ShenandoahAllocRequest for_shared(size_t requested_size) {
+    return ShenandoahAllocRequest(0, requested_size, _alloc_shared);
+  }
+
+  inline size_t size() {
+    return _requested_size;
+  }
+
+  inline Type type() {
+    return _alloc_type;
+  }
+
+  inline size_t min_size() {
+    assert (is_lab_alloc(), "Only access for LAB allocs");
+    return _min_size;
+  }
+
+  inline size_t actual_size() {
+    assert (_actual_size_set, "Should be set");
+    return _actual_size;
+  }
+
+  inline void set_actual_size(size_t v) {
+#ifdef ASSERT
+    assert (!_actual_size_set, "Should not be set");
+    _actual_size_set = true;
+#endif
+    _actual_size = v;
+  }
+
+  inline bool is_mutator_alloc() {
+    switch (_alloc_type) {
+      case _alloc_tlab:
+      case _alloc_shared:
+        return true;
+      case _alloc_gclab:
+      case _alloc_shared_gc:
+        return false;
+      default:
+        ShouldNotReachHere();
+        return false;
+    }
+  }
+
+  inline bool is_gc_alloc() {
+    switch (_alloc_type) {
+      case _alloc_tlab:
+      case _alloc_shared:
+        return false;
+      case _alloc_gclab:
+      case _alloc_shared_gc:
+        return true;
+      default:
+        ShouldNotReachHere();
+        return false;
+    }
+  }
+
+  inline bool is_lab_alloc() {
+    switch (_alloc_type) {
+      case _alloc_tlab:
+      case _alloc_gclab:
+        return true;
+      case _alloc_shared:
+      case _alloc_shared_gc:
+        return false;
+      default:
+        ShouldNotReachHere();
+        return false;
+    }
+  }
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocTracker.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahAllocTracker.hpp"
+#include "utilities/ostream.hpp"
+
+void ShenandoahAllocTracker::print_on(outputStream* out) const {
+  out->print_cr("ALLOCATION TRACING");
+  out->print_cr("  These are the slow-path allocations, including TLAB/GCLAB refills, and out-of-TLAB allocations.");
+  out->print_cr("  In-TLAB/GCLAB allocations happen orders of magnitude more frequently, and without delays.");
+  out->cr();
+
+  out->print("%22s", "");
+  for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) {
+    out->print("%12s", ShenandoahAllocRequest::alloc_type_to_string(ShenandoahAllocRequest::Type(t)));
+  }
+  out->cr();
+
+  out->print_cr("Counts:");
+  out->print("%22s", "#");
+  for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) {
+    out->print(SIZE_FORMAT_W(12), _alloc_size[t].num());
+  }
+  out->cr();
+  out->cr();
+
+  // Figure out max and min levels
+  int lat_min_level = +1000;
+  int lat_max_level = -1000;
+  int size_min_level = +1000;
+  int size_max_level = -1000;
+  for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) {
+    lat_min_level = MIN2(lat_min_level, _alloc_latency[t].min_level());
+    lat_max_level = MAX2(lat_max_level, _alloc_latency[t].max_level());
+    size_min_level = MIN2(size_min_level, _alloc_size[t].min_level());
+    size_max_level = MAX2(size_max_level, _alloc_size[t].max_level());
+  }
+
+  out->print_cr("Latency summary:");
+  out->print("%22s", "sum, ms:");
+  for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) {
+    out->print(SIZE_FORMAT_W(12), _alloc_latency[t].sum() / K);
+  }
+  out->cr();
+  out->cr();
+
+  out->print_cr("Sizes summary:");
+  out->print("%22s", "sum, M:");
+  for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) {
+    out->print(SIZE_FORMAT_W(12), _alloc_size[t].sum() * HeapWordSize / M);
+  }
+  out->cr();
+  out->cr();
+
+  out->print_cr("Latency histogram (time in microseconds):");
+  for (int c = lat_min_level; c <= lat_max_level; c++) {
+    out->print("%9d - %9d:", (c == 0) ? 0 : 1 << (c - 1), 1 << c);
+    for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) {
+      out->print(SIZE_FORMAT_W(12), _alloc_latency[t].level(c));
+    }
+    out->cr();
+  }
+  out->cr();
+
+  out->print_cr("Sizes histogram (size in bytes):");
+  for (int c = size_min_level; c <= size_max_level; c++) {
+    int l = (c == 0) ? 0 : 1 << (c - 1);
+    int r = 1 << c;
+    out->print("%9d - %9d:", l * HeapWordSize, r * HeapWordSize);
+    for (size_t t = 0; t < ShenandoahAllocRequest::_ALLOC_LIMIT; t++) {
+      out->print(SIZE_FORMAT_W(12), _alloc_size[t].level(c));
+    }
+    out->cr();
+  }
+  out->cr();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocTracker.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCTRACKER_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCTRACKER_HPP
+
+#include "gc/shenandoah/shenandoahAllocRequest.hpp"
+#include "gc/shenandoah/shenandoahNumberSeq.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/ostream.hpp"
+
+class ShenandoahAllocTracker : public CHeapObj<mtGC> {
+private:
+  BinaryMagnitudeSeq _alloc_size[ShenandoahAllocRequest::_ALLOC_LIMIT];
+  BinaryMagnitudeSeq _alloc_latency[ShenandoahAllocRequest::_ALLOC_LIMIT];
+
+public:
+  void record_alloc_latency(size_t words_size,
+                            ShenandoahAllocRequest::Type _alloc_type,
+                            double latency_us) {
+    _alloc_size[_alloc_type].add(words_size);
+    _alloc_latency[_alloc_type].add((size_t)latency_us);
+  }
+
+  void print_on(outputStream* out) const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCTRACKER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcArguments.inline.hpp"
+#include "gc/shared/workerPolicy.hpp"
+#include "gc/shenandoah/shenandoahArguments.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "utilities/defaultStream.hpp"
+
+void ShenandoahArguments::initialize() {
+#if !(defined AARCH64 || defined AMD64 || defined IA32)
+  vm_exit_during_initialization("Shenandoah GC is not supported on this platform.");
+#endif
+
+#ifdef IA32
+  log_warning(gc)("Shenandoah GC is not fully supported on this platform:");
+  log_warning(gc)("  concurrent modes are not supported, only STW cycles are enabled;");
+  log_warning(gc)("  arch-specific barrier code is not implemented, disabling barriers;");
+
+  FLAG_SET_DEFAULT(ShenandoahGCHeuristics,           "passive");
+
+  FLAG_SET_DEFAULT(ShenandoahSATBBarrier,            false);
+  FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier,       false);
+  FLAG_SET_DEFAULT(ShenandoahWriteBarrier,           false);
+  FLAG_SET_DEFAULT(ShenandoahReadBarrier,            false);
+  FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, false);
+  FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier,    false);
+  FLAG_SET_DEFAULT(ShenandoahCASBarrier,             false);
+  FLAG_SET_DEFAULT(ShenandoahAcmpBarrier,            false);
+  FLAG_SET_DEFAULT(ShenandoahCloneBarrier,           false);
+#endif
+
+#ifdef _LP64
+  // The optimized ObjArrayChunkedTask takes some bits away from the full 64 addressable
+  // bits, fail if we ever attempt to address more than we can. Only valid on 64bit.
+  if (MaxHeapSize >= ObjArrayChunkedTask::max_addressable()) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Shenandoah GC cannot address more than " SIZE_FORMAT " bytes, and " SIZE_FORMAT " bytes heap requested.",
+                ObjArrayChunkedTask::max_addressable(), MaxHeapSize);
+    vm_exit(1);
+  }
+#endif
+
+  if (UseLargePages && (MaxHeapSize / os::large_page_size()) < ShenandoahHeapRegion::MIN_NUM_REGIONS) {
+    warning("Large pages size (" SIZE_FORMAT "K) is too large to afford page-sized regions, disabling uncommit",
+            os::large_page_size() / K);
+    FLAG_SET_DEFAULT(ShenandoahUncommit, false);
+  }
+
+  // Enable NUMA by default. While Shenandoah is not NUMA-aware, enabling NUMA makes
+  // storage allocation code NUMA-aware, and NUMA interleaving makes the storage
+  // allocated in consistent manner (interleaving) to minimize run-to-run variance.
+  if (FLAG_IS_DEFAULT(UseNUMA)) {
+    FLAG_SET_DEFAULT(UseNUMA, true);
+    FLAG_SET_DEFAULT(UseNUMAInterleaving, true);
+  }
+
+  FLAG_SET_DEFAULT(ParallelGCThreads,
+                   WorkerPolicy::parallel_worker_threads());
+
+  if (FLAG_IS_DEFAULT(ConcGCThreads)) {
+    uint conc_threads = MAX2((uint) 1, ParallelGCThreads);
+    FLAG_SET_DEFAULT(ConcGCThreads, conc_threads);
+  }
+
+  if (FLAG_IS_DEFAULT(ParallelRefProcEnabled)) {
+    FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
+  }
+
+  if (ShenandoahRegionSampling && FLAG_IS_DEFAULT(PerfDataMemorySize)) {
+    // When sampling is enabled, max out the PerfData memory to get more
+    // Shenandoah data in, including Matrix.
+    FLAG_SET_DEFAULT(PerfDataMemorySize, 2048*K);
+  }
+
+#ifdef COMPILER2
+  // Shenandoah cares more about pause times, rather than raw throughput.
+  if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+    FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+    if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+      FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+    }
+  }
+#ifdef ASSERT
+  // C2 barrier verification is only reliable when all default barriers are enabled
+  if (ShenandoahVerifyOptoBarriers &&
+          (!FLAG_IS_DEFAULT(ShenandoahSATBBarrier)            ||
+           !FLAG_IS_DEFAULT(ShenandoahKeepAliveBarrier)       ||
+           !FLAG_IS_DEFAULT(ShenandoahWriteBarrier)           ||
+           !FLAG_IS_DEFAULT(ShenandoahReadBarrier)            ||
+           !FLAG_IS_DEFAULT(ShenandoahStoreValEnqueueBarrier) ||
+           !FLAG_IS_DEFAULT(ShenandoahStoreValReadBarrier)    ||
+           !FLAG_IS_DEFAULT(ShenandoahCASBarrier)             ||
+           !FLAG_IS_DEFAULT(ShenandoahAcmpBarrier)            ||
+           !FLAG_IS_DEFAULT(ShenandoahCloneBarrier)
+          )) {
+    warning("Unusual barrier configuration, disabling C2 barrier verification");
+    FLAG_SET_DEFAULT(ShenandoahVerifyOptoBarriers, false);
+  }
+#else
+  guarantee(!ShenandoahVerifyOptoBarriers, "Should be disabled");
+#endif // ASSERT
+#endif // COMPILER2
+
+  if (AlwaysPreTouch) {
+    // Shenandoah handles pre-touch on its own. It does not let the
+    // generic storage code to do the pre-touch before Shenandoah has
+    // a chance to do it on its own.
+    FLAG_SET_DEFAULT(AlwaysPreTouch, false);
+    FLAG_SET_DEFAULT(ShenandoahAlwaysPreTouch, true);
+  }
+
+  // Shenandoah C2 optimizations apparently dislike the shape of thread-local handshakes.
+  // Disable it by default, unless we enable it specifically for debugging.
+  if (FLAG_IS_DEFAULT(ThreadLocalHandshakes)) {
+    if (ThreadLocalHandshakes) {
+      FLAG_SET_DEFAULT(ThreadLocalHandshakes, false);
+    }
+  } else {
+    if (ThreadLocalHandshakes) {
+      warning("Thread-local handshakes are not working correctly with Shenandoah at the moment. Enable at your own risk.");
+    }
+  }
+
+  // Record more information about previous cycles for improved debugging pleasure
+  if (FLAG_IS_DEFAULT(LogEventsBufferEntries)) {
+    FLAG_SET_DEFAULT(LogEventsBufferEntries, 250);
+  }
+
+  if (ShenandoahAlwaysPreTouch) {
+    if (!FLAG_IS_DEFAULT(ShenandoahUncommit)) {
+      warning("AlwaysPreTouch is enabled, disabling ShenandoahUncommit");
+    }
+    FLAG_SET_DEFAULT(ShenandoahUncommit, false);
+  }
+
+  // If class unloading is disabled, no unloading for concurrent cycles as well.
+  // If class unloading is enabled, users should opt-in for unloading during
+  // concurrent cycles.
+  if (!ClassUnloading || !FLAG_IS_CMDLINE(ClassUnloadingWithConcurrentMark)) {
+    log_info(gc)("Consider -XX:+ClassUnloadingWithConcurrentMark if large pause times "
+                 "are observed on class-unloading sensitive workloads");
+    FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
+  }
+
+  // AOT is not supported yet
+  if (UseAOT) {
+    if (!FLAG_IS_DEFAULT(UseAOT)) {
+      warning("Shenandoah does not support AOT at this moment, disabling UseAOT");
+    }
+    FLAG_SET_DEFAULT(UseAOT, false);
+  }
+
+  // JNI fast get field stuff is not currently supported by Shenandoah.
+  // It would introduce another heap memory access for reading the forwarding
+  // pointer, which would have to be guarded by the signal handler machinery.
+  // See:
+  // http://mail.openjdk.java.net/pipermail/hotspot-dev/2018-June/032763.html
+  FLAG_SET_DEFAULT(UseFastJNIAccessors, false);
+
+  // TLAB sizing policy makes resizing decisions before each GC cycle. It averages
+  // historical data, assigning more recent data the weight according to TLABAllocationWeight.
+  // Current default is good for generational collectors that run frequent young GCs.
+  // With Shenandoah, GC cycles are much less frequent, so we need we need sizing policy
+  // to converge faster over smaller number of resizing decisions.
+  if (FLAG_IS_DEFAULT(TLABAllocationWeight)) {
+    FLAG_SET_DEFAULT(TLABAllocationWeight, 90);
+  }
+
+  // Shenandoah needs more C2 nodes to compile some methods with lots of barriers.
+  // NodeLimitFudgeFactor needs to stay the same relative to MaxNodeLimit.
+#ifdef COMPILER2
+  if (FLAG_IS_DEFAULT(MaxNodeLimit)) {
+    FLAG_SET_DEFAULT(MaxNodeLimit, MaxNodeLimit * 3);
+    FLAG_SET_DEFAULT(NodeLimitFudgeFactor, NodeLimitFudgeFactor * 3);
+  }
+#endif
+
+  // Make sure safepoint deadlocks are failing predictably. This sets up VM to report
+  // fatal error after 10 seconds of wait for safepoint syncronization (not the VM
+  // operation itself). There is no good reason why Shenandoah would spend that
+  // much time synchronizing.
+#ifdef ASSERT
+  FLAG_SET_DEFAULT(SafepointTimeout, true);
+  FLAG_SET_DEFAULT(SafepointTimeoutDelay, 10000);
+  FLAG_SET_DEFAULT(AbortVMOnSafepointTimeout, true);
+#endif
+}
+
+size_t ShenandoahArguments::conservative_max_heap_alignment() {
+  size_t align = ShenandoahMaxRegionSize;
+  if (UseLargePages) {
+    align = MAX2(align, os::large_page_size());
+  }
+  return align;
+}
+
+CollectedHeap* ShenandoahArguments::create_heap() {
+  return create_heap_with_policy<ShenandoahHeap, ShenandoahCollectorPolicy>();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHARGUMENTS_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CollectedHeap;
+
+class ShenandoahArguments : public GCArguments {
+public:
+  virtual void initialize();
+
+  virtual size_t conservative_max_heap_alignment();
+
+  virtual CollectedHeap* create_heap();
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHARGUMENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "memory/resourceArea.hpp"
+
+void print_raw_memory(ShenandoahMessageBuffer &msg, void* loc) {
+  // Be extra safe. Only access data that is guaranteed to be safe:
+  // should be in heap, in known committed region, within that region.
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  if (!heap->is_in(loc)) return;
+
+  ShenandoahHeapRegion* r = heap->heap_region_containing(loc);
+  if (r != NULL && r->is_committed()) {
+    address start = MAX2((address) r->bottom(), (address) loc - 32);
+    address end   = MIN2((address) r->end(),    (address) loc + 128);
+    if (start >= end) return;
+
+    stringStream ss;
+    os::print_hex_dump(&ss, start, end, 4);
+    msg.append("\n");
+    msg.append("Raw heap memory:\n%s", ss.as_string());
+  }
+}
+
+void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ShenandoahHeapRegion *r = heap->heap_region_containing(obj);
+
+  ResourceMark rm;
+  stringStream ss;
+  r->print_on(&ss);
+
+  ShenandoahMarkingContext* const ctx = heap->marking_context();
+
+  msg.append("  " PTR_FORMAT " - klass " PTR_FORMAT " %s\n", p2i(obj), p2i(obj->klass()), obj->klass()->external_name());
+  msg.append("    %3s allocated after mark start\n", ctx->allocated_after_mark_start((HeapWord *) obj) ? "" : "not");
+  msg.append("    %3s marked \n",                    ctx->is_marked(obj) ? "" : "not");
+  msg.append("    %3s in collection set\n",          heap->in_collection_set(obj) ? "" : "not");
+  if (heap->traversal_gc() != NULL) {
+    msg.append("    %3s in traversal set\n",         heap->traversal_gc()->traversal_set()->is_in((HeapWord*) obj) ? "" : "not");
+  }
+  msg.append("  region: %s", ss.as_string());
+}
+
+void ShenandoahAsserts::print_non_obj(ShenandoahMessageBuffer& msg, void* loc) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  if (heap->is_in(loc)) {
+    msg.append("  inside Java heap\n");
+    ShenandoahHeapRegion *r = heap->heap_region_containing(loc);
+    stringStream ss;
+    r->print_on(&ss);
+
+    msg.append("    %3s in collection set\n",    heap->in_collection_set(loc) ? "" : "not");
+    msg.append("  region: %s", ss.as_string());
+  } else {
+    msg.append("  outside of Java heap\n");
+    stringStream ss;
+    os::print_location(&ss, (intptr_t) loc, false);
+    msg.append("  %s", ss.as_string());
+  }
+}
+
+void ShenandoahAsserts::print_obj_safe(ShenandoahMessageBuffer& msg, void* loc) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  msg.append("  " PTR_FORMAT " - safe print, no details\n", p2i(loc));
+  if (heap->is_in(loc)) {
+    ShenandoahHeapRegion* r = heap->heap_region_containing(loc);
+    if (r != NULL) {
+      stringStream ss;
+      r->print_on(&ss);
+      msg.append("  region: %s", ss.as_string());
+      print_raw_memory(msg, loc);
+    }
+  }
+}
+
+void ShenandoahAsserts::print_failure(SafeLevel level, oop obj, void* interior_loc, oop loc,
+                                       const char* phase, const char* label,
+                                       const char* file, int line) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ResourceMark rm;
+
+  bool loc_in_heap = (loc != NULL && heap->is_in(loc));
+
+  ShenandoahMessageBuffer msg("%s; %s\n\n", phase, label);
+
+  msg.append("Referenced from:\n");
+  if (interior_loc != NULL) {
+    msg.append("  interior location: " PTR_FORMAT "\n", p2i(interior_loc));
+    if (loc_in_heap) {
+      print_obj(msg, loc);
+    } else {
+      print_non_obj(msg, interior_loc);
+    }
+  } else {
+    msg.append("  no interior location recorded (probably a plain heap scan, or detached oop)\n");
+  }
+  msg.append("\n");
+
+  msg.append("Object:\n");
+  if (level >= _safe_oop) {
+    print_obj(msg, obj);
+  } else {
+    print_obj_safe(msg, obj);
+  }
+  msg.append("\n");
+
+  if (level >= _safe_oop) {
+    oop fwd = (oop) ShenandoahBrooksPointer::get_raw_unchecked(obj);
+    msg.append("Forwardee:\n");
+    if (!oopDesc::equals_raw(obj, fwd)) {
+      if (level >= _safe_oop_fwd) {
+        print_obj(msg, fwd);
+      } else {
+        print_obj_safe(msg, fwd);
+      }
+    } else {
+      msg.append("  (the object itself)");
+    }
+    msg.append("\n");
+  }
+
+  if (level >= _safe_oop_fwd) {
+    oop fwd = (oop) ShenandoahBrooksPointer::get_raw_unchecked(obj);
+    oop fwd2 = (oop) ShenandoahBrooksPointer::get_raw_unchecked(fwd);
+    if (!oopDesc::equals_raw(fwd, fwd2)) {
+      msg.append("Second forwardee:\n");
+      print_obj_safe(msg, fwd2);
+      msg.append("\n");
+    }
+  }
+
+  report_vm_error(file, line, msg.buffer());
+}
+
+void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *file, int line) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
+
+  if (!heap->is_in(obj)) {
+    print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_heap failed",
+                  "oop must point to a heap address",
+                  file, line);
+  }
+}
+
+void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file, int line) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
+
+  // Step 1. Check that obj is correct.
+  // After this step, it is safe to call heap_region_containing().
+  if (!heap->is_in(obj)) {
+    print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                  "oop must point to a heap address",
+                  file, line);
+  }
+
+  Klass* obj_klass = obj->klass_or_null();
+  if (obj_klass == NULL) {
+    print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                  "Object klass pointer should not be NULL",
+                  file,line);
+  }
+
+  if (!Metaspace::contains(obj_klass)) {
+    print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                  "Object klass pointer must go to metaspace",
+                  file,line);
+  }
+
+  oop fwd = oop(ShenandoahBrooksPointer::get_raw_unchecked(obj));
+
+  if (!oopDesc::equals_raw(obj, fwd)) {
+    // When Full GC moves the objects, we cannot trust fwdptrs. If we got here, it means something
+    // tries fwdptr manipulation when Full GC is running. The only exception is using the fwdptr
+    // that still points to the object itself.
+    if (heap->is_full_gc_move_in_progress()) {
+      print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                    "Non-trivial forwarding pointer during Full GC moves, probable bug.",
+                    file, line);
+    }
+
+    // Step 2. Check that forwardee is correct
+    if (!heap->is_in(fwd)) {
+      print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                    "Forwardee must point to a heap address",
+                    file, line);
+    }
+
+    if (obj_klass != fwd->klass()) {
+      print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                    "Forwardee klass disagrees with object class",
+                    file, line);
+    }
+
+    // Step 3. Check that forwardee points to correct region
+    if (heap->heap_region_index_containing(fwd) == heap->heap_region_index_containing(obj)) {
+      print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                    "Non-trivial forwardee should in another region",
+                    file, line);
+    }
+
+    // Step 4. Check for multiple forwardings
+    oop fwd2 = oop(ShenandoahBrooksPointer::get_raw_unchecked(fwd));
+    if (!oopDesc::equals_raw(fwd, fwd2)) {
+      print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
+                    "Multiple forwardings",
+                    file, line);
+    }
+  }
+}
+
+void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, const char* file, int line) {
+  assert_correct(interior_loc, obj, file, line);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
+  ShenandoahHeapRegion* r = heap->heap_region_containing(obj);
+  if (!r->is_active()) {
+    print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
+                  "Object must reside in active region",
+                  file, line);
+  }
+
+  size_t alloc_size = obj->size() + ShenandoahBrooksPointer::word_size();
+  if (alloc_size > ShenandoahHeapRegion::humongous_threshold_words()) {
+    size_t idx = r->region_number();
+    size_t num_regions = ShenandoahHeapRegion::required_regions(alloc_size * HeapWordSize);
+    for (size_t i = idx; i < idx + num_regions; i++) {
+      ShenandoahHeapRegion* chain_reg = heap->get_region(i);
+      if (i == idx && !chain_reg->is_humongous_start()) {
+        print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
+                      "Object must reside in humongous start",
+                      file, line);
+      }
+      if (i != idx && !chain_reg->is_humongous_continuation()) {
+        print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
+                      "Humongous continuation should be of proper size",
+                      file, line);
+      }
+    }
+  }
+}
+
+void ShenandoahAsserts::assert_forwarded(void* interior_loc, oop obj, const char* file, int line) {
+  assert_correct(interior_loc, obj, file, line);
+  oop fwd = oop(ShenandoahBrooksPointer::get_raw_unchecked(obj));
+
+  if (oopDesc::equals_raw(obj, fwd)) {
+    print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_forwarded failed",
+                  "Object should be forwarded",
+                  file, line);
+  }
+}
+
+void ShenandoahAsserts::assert_not_forwarded(void* interior_loc, oop obj, const char* file, int line) {
+  assert_correct(interior_loc, obj, file, line);
+  oop fwd = oop(ShenandoahBrooksPointer::get_raw_unchecked(obj));
+
+  if (!oopDesc::equals_raw(obj, fwd)) {
+    print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_forwarded failed",
+                  "Object should not be forwarded",
+                  file, line);
+  }
+}
+
+void ShenandoahAsserts::assert_marked(void *interior_loc, oop obj, const char *file, int line) {
+  assert_correct(interior_loc, obj, file, line);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
+  if (!heap->marking_context()->is_marked(obj)) {
+    print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked failed",
+                  "Object should be marked",
+                  file, line);
+  }
+}
+
+void ShenandoahAsserts::assert_in_cset(void* interior_loc, oop obj, const char* file, int line) {
+  assert_correct(interior_loc, obj, file, line);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
+  if (!heap->in_collection_set(obj)) {
+    print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_in_cset failed",
+                  "Object should be in collection set",
+                  file, line);
+  }
+}
+
+void ShenandoahAsserts::assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line) {
+  assert_correct(interior_loc, obj, file, line);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
+  if (heap->in_collection_set(obj)) {
+    print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_in_cset failed",
+                  "Object should not be in collection set",
+                  file, line);
+  }
+}
+
+void ShenandoahAsserts::assert_not_in_cset_loc(void* interior_loc, const char* file, int line) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
+  if (heap->in_collection_set(interior_loc)) {
+    print_failure(_safe_unknown, NULL, interior_loc, NULL, "Shenandoah assert_not_in_cset_loc failed",
+                  "Interior location should not be in collection set",
+                  file, line);
+  }
+}
+
+void ShenandoahAsserts::print_rp_failure(const char *label, BoolObjectClosure* actual,
+                                         const char *file, int line) {
+  ShenandoahMessageBuffer msg("%s\n", label);
+  msg.append(" Actual:                  " PTR_FORMAT "\n", p2i(actual));
+  report_vm_error(file, line, msg.buffer());
+}
+
+void ShenandoahAsserts::assert_rp_isalive_not_installed(const char *file, int line) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ReferenceProcessor* rp = heap->ref_processor();
+  if (rp->is_alive_non_header() != NULL) {
+    print_rp_failure("Shenandoah assert_rp_isalive_not_installed failed", rp->is_alive_non_header(),
+                     file, line);
+  }
+}
+
+void ShenandoahAsserts::assert_rp_isalive_installed(const char *file, int line) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ReferenceProcessor* rp = heap->ref_processor();
+  if (rp->is_alive_non_header() == NULL) {
+    print_rp_failure("Shenandoah assert_rp_isalive_installed failed", rp->is_alive_non_header(),
+                     file, line);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP
+
+#include "memory/iterator.hpp"
+#include "utilities/formatBuffer.hpp"
+
+typedef FormatBuffer<8192> ShenandoahMessageBuffer;
+
+class ShenandoahAsserts {
+public:
+  enum SafeLevel {
+    _safe_unknown,
+    _safe_oop,
+    _safe_oop_fwd,
+    _safe_all,
+  };
+
+  static void print_obj(ShenandoahMessageBuffer &msg, oop obj);
+
+  static void print_non_obj(ShenandoahMessageBuffer &msg, void *loc);
+
+  static void print_obj_safe(ShenandoahMessageBuffer &msg, void *loc);
+
+  static void print_failure(SafeLevel level, oop obj, void *interior_loc, oop loc,
+                            const char *phase, const char *label,
+                            const char *file, int line);
+
+  static void print_rp_failure(const char *label, BoolObjectClosure* actual,
+                               const char *file, int line);
+
+  static void assert_in_heap(void* interior_loc, oop obj, const char* file, int line);
+  static void assert_in_correct_region(void* interior_loc, oop obj, const char* file, int line);
+
+  static void assert_correct(void* interior_loc, oop obj, const char* file, int line);
+  static void assert_forwarded(void* interior_loc, oop obj, const char* file, int line);
+  static void assert_not_forwarded(void* interior_loc, oop obj, const char* file, int line);
+  static void assert_marked(void* interior_loc, oop obj, const char* file, int line);
+  static void assert_in_cset(void* interior_loc, oop obj, const char* file, int line);
+  static void assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line);
+  static void assert_not_in_cset_loc(void* interior_loc, const char* file, int line);
+
+  static void assert_rp_isalive_not_installed(const char *file, int line);
+  static void assert_rp_isalive_installed(const char *file, int line);
+
+#ifdef ASSERT
+#define shenandoah_assert_in_heap(interior_loc, obj) \
+                    ShenandoahAsserts::assert_in_heap(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_in_correct_region(interior_loc, obj) \
+                    ShenandoahAsserts::assert_in_correct_region(interior_loc, obj, __FILE__, __LINE__);
+
+#define shenandoah_assert_correct_if(interior_loc, obj, condition) \
+  if (condition)    ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_correct_except(interior_loc, obj, exception) \
+  if (!(exception)) ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_correct(interior_loc, obj) \
+                    ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__);
+
+#define shenandoah_assert_forwarded_if(interior_loc, obj, condition) \
+  if (condition)    ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_forwarded_except(interior_loc, obj, exception) \
+  if (!(exception)) ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_forwarded(interior_loc, obj) \
+                    ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__);
+
+#define shenandoah_assert_not_forwarded_if(interior_loc, obj, condition) \
+  if (condition)    ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_not_forwarded_except(interior_loc, obj, exception) \
+  if (!(exception)) ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_not_forwarded(interior_loc, obj) \
+                    ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__);
+
+#define shenandoah_assert_marked_if(interior_loc, obj, condition) \
+  if (condition)    ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_marked_except(interior_loc, obj, exception) \
+  if (!(exception)) ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_marked(interior_loc, obj) \
+                    ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__);
+
+#define shenandoah_assert_in_cset_if(interior_loc, obj, condition) \
+  if (condition)    ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_in_cset_except(interior_loc, obj, exception) \
+  if (!(exception)) ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_in_cset(interior_loc, obj) \
+                    ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__);
+
+#define shenandoah_assert_not_in_cset_if(interior_loc, obj, condition) \
+  if (condition)    ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_not_in_cset_except(interior_loc, obj, exception) \
+  if (!(exception)) ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__);
+#define shenandoah_assert_not_in_cset(interior_loc, obj) \
+                    ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__);
+
+#define shenandoah_assert_not_in_cset_loc_if(interior_loc, condition) \
+  if (condition)    ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__);
+#define shenandoah_assert_not_in_cset_loc_except(interior_loc, exception) \
+  if (!(exception)) ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__);
+#define shenandoah_assert_not_in_cset_loc(interior_loc) \
+                    ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__);
+
+#define shenandoah_assert_rp_isalive_installed() \
+                    ShenandoahAsserts::assert_rp_isalive_installed(__FILE__, __LINE__);
+#define shenandoah_assert_rp_isalive_not_installed() \
+                    ShenandoahAsserts::assert_rp_isalive_not_installed(__FILE__, __LINE__);
+#else
+#define shenandoah_assert_in_heap(interior_loc, obj)
+#define shenandoah_assert_in_correct_region(interior_loc, obj)
+
+#define shenandoah_assert_correct_if(interior_loc, obj, condition)
+#define shenandoah_assert_correct_except(interior_loc, obj, exception)
+#define shenandoah_assert_correct(interior_loc, obj)
+
+#define shenandoah_assert_forwarded_if(interior_loc, obj, condition)
+#define shenandoah_assert_forwarded_except(interior_loc, obj, exception)
+#define shenandoah_assert_forwarded(interior_loc, obj)
+
+#define shenandoah_assert_not_forwarded_if(interior_loc, obj, condition)
+#define shenandoah_assert_not_forwarded_except(interior_loc, obj, exception)
+#define shenandoah_assert_not_forwarded(interior_loc, obj)
+
+#define shenandoah_assert_marked_if(interior_loc, obj, condition)
+#define shenandoah_assert_marked_except(interior_loc, obj, exception)
+#define shenandoah_assert_marked(interior_loc, obj)
+
+#define shenandoah_assert_in_cset_if(interior_loc, obj, condition)
+#define shenandoah_assert_in_cset_except(interior_loc, obj, exception)
+#define shenandoah_assert_in_cset(interior_loc, obj)
+
+#define shenandoah_assert_not_in_cset_if(interior_loc, obj, condition)
+#define shenandoah_assert_not_in_cset_except(interior_loc, obj, exception)
+#define shenandoah_assert_not_in_cset(interior_loc, obj)
+
+#define shenandoah_assert_not_in_cset_loc_if(interior_loc, condition)
+#define shenandoah_assert_not_in_cset_loc_except(interior_loc, exception)
+#define shenandoah_assert_not_in_cset_loc(interior_loc)
+
+#define shenandoah_assert_rp_isalive_installed()
+#define shenandoah_assert_rp_isalive_not_installed()
+#endif
+
+#define shenandoah_not_implemented \
+                    { fatal("Deliberately not implemented."); }
+#define shenandoah_not_implemented_return(v) \
+                    { fatal("Deliberately not implemented."); return v; }
+
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "memory/iterator.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#ifdef COMPILER1
+#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
+#endif
+#ifdef COMPILER2
+#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#endif
+
+class ShenandoahBarrierSetC1;
+class ShenandoahBarrierSetC2;
+
+template <bool STOREVAL_WRITE_BARRIER>
+class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
+private:
+  ShenandoahHeap* _heap;
+  ShenandoahBarrierSet* _bs;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    oop o;
+    if (STOREVAL_WRITE_BARRIER) {
+      o = _heap->evac_update_with_forwarded(p);
+      if (!CompressedOops::is_null(o)) {
+        _bs->enqueue(o);
+      }
+    } else {
+      _heap->maybe_update_with_forwarded(p);
+    }
+  }
+public:
+  ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) {
+    assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
+  }
+
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
+  BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
+             make_barrier_set_c1<ShenandoahBarrierSetC1>(),
+             make_barrier_set_c2<ShenandoahBarrierSetC2>(),
+             NULL /* barrier_set_nmethod */,
+             BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
+  _heap(heap),
+  _satb_mark_queue_set()
+{
+}
+
+ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
+  BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
+  return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
+}
+
+void ShenandoahBarrierSet::print_on(outputStream* st) const {
+  st->print("ShenandoahBarrierSet");
+}
+
+bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
+  return bsn == BarrierSet::ShenandoahBarrierSet;
+}
+
+bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
+  return true;
+}
+
+template <class T, bool STOREVAL_WRITE_BARRIER>
+void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
+  assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
+  ShenandoahUpdateRefsForOopClosure<STOREVAL_WRITE_BARRIER> cl;
+  T* dst = (T*) start;
+  for (size_t i = 0; i < count; i++) {
+    cl.do_oop(dst++);
+  }
+}
+
+void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
+  assert(UseShenandoahGC, "should be enabled");
+  if (count == 0) return;
+  if (!ShenandoahCloneBarrier) return;
+
+  if (!need_update_refs_barrier()) return;
+
+  if (_heap->is_concurrent_traversal_in_progress()) {
+    ShenandoahEvacOOMScope oom_evac_scope;
+    if (UseCompressedOops) {
+      write_ref_array_loop<narrowOop, /* wb = */ true>(start, count);
+    } else {
+      write_ref_array_loop<oop,       /* wb = */ true>(start, count);
+    }
+  } else {
+    if (UseCompressedOops) {
+      write_ref_array_loop<narrowOop, /* wb = */ false>(start, count);
+    } else {
+      write_ref_array_loop<oop,       /* wb = */ false>(start, count);
+    }
+  }
+}
+
+template <class T>
+void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
+  shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
+  if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
+    T* elem_ptr = dst;
+    for (size_t i = 0; i < count; i++, elem_ptr++) {
+      T heap_oop = RawAccess<>::oop_load(elem_ptr);
+      if (!CompressedOops::is_null(heap_oop)) {
+        enqueue(CompressedOops::decode_not_null(heap_oop));
+      }
+    }
+  }
+}
+
+void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
+  if (! dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+
+void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
+  if (! dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+
+template <class T>
+inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
+  shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
+  if (_heap->is_concurrent_mark_in_progress()) {
+    T heap_oop = RawAccess<>::oop_load(field);
+    if (!CompressedOops::is_null(heap_oop)) {
+      enqueue(CompressedOops::decode(heap_oop));
+    }
+  }
+}
+
+// These are the more general virtual versions.
+void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
+  inline_write_ref_field_pre(field, new_val);
+}
+
+void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
+  inline_write_ref_field_pre(field, new_val);
+}
+
+void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
+  guarantee(false, "Not needed");
+}
+
+void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
+  shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
+  shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
+  shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
+}
+
+void ShenandoahBarrierSet::write_region(MemRegion mr) {
+  assert(UseShenandoahGC, "should be enabled");
+  if (!ShenandoahCloneBarrier) return;
+  if (! need_update_refs_barrier()) return;
+
+  // This is called for cloning an object (see jvm.cpp) after the clone
+  // has been made. We are not interested in any 'previous value' because
+  // it would be NULL in any case. But we *are* interested in any oop*
+  // that potentially need to be updated.
+
+  oop obj = oop(mr.start());
+  shenandoah_assert_correct(NULL, obj);
+  if (_heap->is_concurrent_traversal_in_progress()) {
+    ShenandoahEvacOOMScope oom_evac_scope;
+    ShenandoahUpdateRefsForOopClosure</* wb = */ true> cl;
+    obj->oop_iterate(&cl);
+  } else {
+    ShenandoahUpdateRefsForOopClosure</* wb = */ false> cl;
+    obj->oop_iterate(&cl);
+  }
+}
+
+oop ShenandoahBarrierSet::read_barrier(oop src) {
+  // Check for forwarded objects, because on Full GC path we might deal with
+  // non-trivial fwdptrs that contain Full GC specific metadata. We could check
+  // for is_full_gc_in_progress(), but this also covers the case of stable heap,
+  // which provides a bit of performance improvement.
+  if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
+    return ShenandoahBarrierSet::resolve_forwarded(src);
+  } else {
+    return src;
+  }
+}
+
+bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
+  bool eq = oopDesc::equals_raw(obj1, obj2);
+  if (! eq && ShenandoahAcmpBarrier) {
+    OrderAccess::loadload();
+    obj1 = resolve_forwarded(obj1);
+    obj2 = resolve_forwarded(obj2);
+    eq = oopDesc::equals_raw(obj1, obj2);
+  }
+  return eq;
+}
+
+oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
+  assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
+  assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
+  shenandoah_assert_in_cset(NULL, obj);
+
+  oop fwd = resolve_forwarded_not_null(obj);
+  if (oopDesc::equals_raw(obj, fwd)) {
+    ShenandoahEvacOOMScope oom_evac_scope;
+
+    Thread* thread = Thread::current();
+    oop res_oop = _heap->evacuate_object(obj, thread);
+
+    // Since we are already here and paid the price of getting through runtime call adapters
+    // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
+    // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
+    // total assist costs, and can introduce a lot of evacuation latency. This is why we
+    // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
+    // The scan itself should also avoid touching the non-marked objects below TAMS, because
+    // their metadata (notably, klasses) may be incorrect already.
+
+    size_t max = ShenandoahEvacAssist;
+    if (max > 0) {
+      // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
+      // Other code uses complete marking context, because evac happens after the mark.
+      ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
+                                      _heap->marking_context() : _heap->complete_marking_context();
+
+      ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
+      assert(r->is_cset(), "sanity");
+
+      HeapWord* cur = (HeapWord*)obj + obj->size() + ShenandoahBrooksPointer::word_size();
+
+      size_t count = 0;
+      while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
+        oop cur_oop = oop(cur);
+        if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
+          _heap->evacuate_object(cur_oop, thread);
+        }
+        cur = cur + cur_oop->size() + ShenandoahBrooksPointer::word_size();
+      }
+    }
+
+    return res_oop;
+  }
+  return fwd;
+}
+
+oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
+  assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
+  if (!CompressedOops::is_null(obj)) {
+    bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
+    oop fwd = resolve_forwarded_not_null(obj);
+    if (evac_in_progress &&
+        _heap->in_collection_set(obj) &&
+        oopDesc::equals_raw(obj, fwd)) {
+      Thread *t = Thread::current();
+      if (t->is_GC_task_thread()) {
+        return _heap->evacuate_object(obj, t);
+      } else {
+        ShenandoahEvacOOMScope oom_evac_scope;
+        return _heap->evacuate_object(obj, t);
+      }
+    } else {
+      return fwd;
+    }
+  } else {
+    return obj;
+  }
+}
+
+oop ShenandoahBarrierSet::write_barrier(oop obj) {
+  if (ShenandoahWriteBarrier && _heap->has_forwarded_objects()) {
+    return write_barrier_impl(obj);
+  } else {
+    return obj;
+  }
+}
+
+oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
+  if (ShenandoahStoreValEnqueueBarrier) {
+    if (!CompressedOops::is_null(obj)) {
+      obj = write_barrier(obj);
+      enqueue(obj);
+    }
+  }
+  if (ShenandoahStoreValReadBarrier) {
+    obj = resolve_forwarded(obj);
+  }
+  return obj;
+}
+
+void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
+  if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
+    enqueue(obj);
+  }
+}
+
+void ShenandoahBarrierSet::enqueue(oop obj) {
+  shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
+  if (!_satb_mark_queue_set.is_active()) return;
+
+  // Filter marked objects before hitting the SATB queues. The same predicate would
+  // be used by SATBMQ::filter to eliminate already marked objects downstream, but
+  // filtering here helps to avoid wasteful SATB queueing work to begin with.
+  if (!_heap->requires_marking(obj)) return;
+
+  Thread* thr = Thread::current();
+  if (thr->is_Java_thread()) {
+    ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue(obj);
+  } else {
+    MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
+    _satb_mark_queue_set.shared_satb_queue()->enqueue(obj);
+  }
+}
+
+void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
+  // Create thread local data
+  ShenandoahThreadLocalData::create(thread);
+}
+
+void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
+  // Destroy thread local data
+  ShenandoahThreadLocalData::destroy(thread);
+}
+
+void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) {
+  assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
+  assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
+  assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
+  if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
+    ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true);
+  }
+  ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
+  ShenandoahThreadLocalData::initialize_gclab(thread);
+}
+
+void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) {
+  ShenandoahThreadLocalData::satb_mark_queue(thread).flush();
+  PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
+  if (gclab != NULL) {
+    gclab->retire();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
+
+#include "gc/shared/accessBarrierSupport.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp"
+
+class ShenandoahBarrierSetAssembler;
+
+class ShenandoahBarrierSet: public BarrierSet {
+private:
+  enum ArrayCopyStoreValMode {
+    NONE,
+    READ_BARRIER,
+    WRITE_BARRIER
+  };
+
+  ShenandoahHeap* _heap;
+  ShenandoahSATBMarkQueueSet _satb_mark_queue_set;
+
+public:
+  ShenandoahBarrierSet(ShenandoahHeap* heap);
+
+  static ShenandoahBarrierSetAssembler* assembler();
+
+  inline static ShenandoahBarrierSet* barrier_set() {
+    return barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
+  }
+
+  static ShenandoahSATBMarkQueueSet& satb_mark_queue_set() {
+    return barrier_set()->_satb_mark_queue_set;
+  }
+
+  void print_on(outputStream* st) const;
+
+  bool is_a(BarrierSet::Name bsn);
+
+  bool is_aligned(HeapWord* hw);
+
+  void write_ref_array(HeapWord* start, size_t count);
+
+  template <class T> void
+  write_ref_array_pre_work(T* dst, size_t count);
+
+  void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized);
+
+  void write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized);
+
+  // We export this to make it available in cases where the static
+  // type of the barrier set is known.  Note that it is non-virtual.
+  template <class T> inline void inline_write_ref_field_pre(T* field, oop new_val);
+
+  // These are the more general virtual versions.
+  void write_ref_field_pre_work(oop* field, oop new_val);
+  void write_ref_field_pre_work(narrowOop* field, oop new_val);
+  void write_ref_field_pre_work(void* field, oop new_val);
+
+  void write_ref_field_work(void* v, oop o, bool release = false);
+  void write_region(MemRegion mr);
+
+  virtual void on_thread_create(Thread* thread);
+  virtual void on_thread_destroy(Thread* thread);
+  virtual void on_thread_attach(JavaThread* thread);
+  virtual void on_thread_detach(JavaThread* thread);
+
+  virtual oop read_barrier(oop src);
+
+  static inline oop resolve_forwarded_not_null(oop p);
+  static inline oop resolve_forwarded(oop p);
+
+  virtual oop write_barrier(oop obj);
+
+  oop write_barrier_mutator(oop obj);
+
+  virtual oop storeval_barrier(oop obj);
+
+  virtual void keep_alive_barrier(oop obj);
+
+  bool obj_equals(oop obj1, oop obj2);
+
+#ifdef CHECK_UNHANDLED_OOPS
+  bool oop_equals_operator_allowed() { return !ShenandoahVerifyObjectEquals; }
+#endif
+
+  void enqueue(oop obj);
+
+private:
+  inline bool need_update_refs_barrier();
+
+  template <class T, bool STOREVAL_WRITE_BARRIER>
+  void write_ref_array_loop(HeapWord* start, size_t count);
+
+  oop write_barrier_impl(oop obj);
+
+  static void keep_alive_if_weak(DecoratorSet decorators, oop value) {
+    assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
+    const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
+    const bool peek              = (decorators & AS_NO_KEEPALIVE) != 0;
+    if (!peek && !on_strong_oop_ref && value != NULL) {
+      ShenandoahBarrierSet::barrier_set()->keep_alive_barrier(value);
+    }
+  }
+
+  template <typename T>
+  bool arraycopy_loop_1(T* src, T* dst, size_t length, Klass* bound,
+                        bool checkcast, bool satb, bool disjoint, ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode);
+
+  template <typename T, bool CHECKCAST>
+  bool arraycopy_loop_2(T* src, T* dst, size_t length, Klass* bound,
+                        bool satb, bool disjoint, ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode);
+
+  template <typename T, bool CHECKCAST, bool SATB>
+  bool arraycopy_loop_3(T* src, T* dst, size_t length, Klass* bound,
+                        bool disjoint, ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode);
+
+  template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE>
+  bool arraycopy_loop(T* src, T* dst, size_t length, Klass* bound, bool disjoint);
+
+  template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE>
+  bool arraycopy_element(T* cur_src, T* cur_dst, Klass* bound, Thread* thread);
+
+public:
+  // Callbacks for runtime accesses.
+  template <DecoratorSet decorators, typename BarrierSetT = ShenandoahBarrierSet>
+  class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
+    typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
+
+  public:
+    // Primitive heap accesses. These accessors get resolved when
+    // IN_HEAP is set (e.g. when using the HeapAccess API), it is
+    // not an oop_* overload, and the barrier strength is AS_NORMAL.
+    template <typename T>
+    static T load_in_heap(T* addr) {
+      ShouldNotReachHere();
+      return Raw::template load<T>(addr);
+    }
+
+    template <typename T>
+    static T load_in_heap_at(oop base, ptrdiff_t offset) {
+      base = ShenandoahBarrierSet::resolve_forwarded(base);
+      return Raw::template load_at<T>(base, offset);
+    }
+
+    template <typename T>
+    static void store_in_heap(T* addr, T value) {
+      ShouldNotReachHere();
+      Raw::store(addr, value);
+    }
+
+    template <typename T>
+    static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
+      base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
+      Raw::store_at(base, offset, value);
+    }
+
+    template <typename T>
+    static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
+      ShouldNotReachHere();
+      return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+    }
+
+    template <typename T>
+    static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
+      return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
+    }
+
+    template <typename T>
+    static T atomic_xchg_in_heap(T new_value, T* addr) {
+      ShouldNotReachHere();
+      return Raw::atomic_xchg(new_value, addr);
+    }
+
+    template <typename T>
+    static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
+      base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
+      return Raw::atomic_xchg_at(new_value, base, offset);
+    }
+
+    template <typename T>
+    static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                  size_t length);
+
+    // Heap oop accesses. These accessors get resolved when
+    // IN_HEAP is set (e.g. when using the HeapAccess API), it is
+    // an oop_* overload, and the barrier strength is AS_NORMAL.
+    template <typename T>
+    static oop oop_load_in_heap(T* addr) {
+      // ShouldNotReachHere();
+      oop value = Raw::template oop_load<oop>(addr);
+      keep_alive_if_weak(decorators, value);
+      return value;
+    }
+
+    static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
+      base = ShenandoahBarrierSet::resolve_forwarded(base);
+      oop value = Raw::template oop_load_at<oop>(base, offset);
+      keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
+      return value;
+    }
+
+    template <typename T>
+    static void oop_store_in_heap(T* addr, oop value) {
+      ShenandoahBarrierSet::barrier_set()->write_ref_field_pre_work(addr, value);
+      Raw::oop_store(addr, value);
+    }
+
+    static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
+      base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
+      value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(value);
+
+      oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
+    }
+
+    template <typename T>
+    static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
+
+    static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+      base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
+      new_value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
+      return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
+    }
+
+    template <typename T>
+    static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
+
+    static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
+      base = ShenandoahBarrierSet::barrier_set()->write_barrier(base);
+      new_value = ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
+      return oop_atomic_xchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
+    }
+
+    template <typename T>
+    static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                      size_t length);
+
+    // Clone barrier support
+    static void clone_in_heap(oop src, oop dst, size_t size);
+
+    // Needed for loads on non-heap weak references
+    template <typename T>
+    static oop oop_load_not_in_heap(T* addr) {
+      oop value = Raw::oop_load_not_in_heap(addr);
+      keep_alive_if_weak(decorators, value);
+      return value;
+    }
+
+    static oop resolve(oop obj) {
+      return ShenandoahBarrierSet::barrier_set()->write_barrier(obj);
+    }
+
+    static bool equals(oop o1, oop o2) {
+      return ShenandoahBarrierSet::barrier_set()->obj_equals(o1, o2);
+    }
+
+  };
+
+};
+
+template<>
+struct BarrierSet::GetName<ShenandoahBarrierSet> {
+  static const BarrierSet::Name value = BarrierSet::ShenandoahBarrierSet;
+};
+
+template<>
+struct BarrierSet::GetType<BarrierSet::ShenandoahBarrierSet> {
+  typedef ::ShenandoahBarrierSet type;
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP
+
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+
+bool ShenandoahBarrierSet::need_update_refs_barrier() {
+  return _heap->is_update_refs_in_progress() ||
+         _heap->is_concurrent_traversal_in_progress() ||
+         (_heap->is_concurrent_mark_in_progress() && _heap->has_forwarded_objects());
+}
+
+inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) {
+  return ShenandoahBrooksPointer::forwardee(p);
+}
+
+inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) {
+  if (((HeapWord*) p) != NULL) {
+    return resolve_forwarded_not_null(p);
+  } else {
+    return p;
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+  oop res;
+  oop expected = compare_value;
+  do {
+    compare_value = expected;
+    res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+    expected = res;
+  } while ((! oopDesc::equals_raw(compare_value, expected)) && oopDesc::equals_raw(resolve_forwarded(compare_value), resolve_forwarded(expected)));
+  if (oopDesc::equals_raw(expected, compare_value)) {
+    if (ShenandoahSATBBarrier && !CompressedOops::is_null(compare_value)) {
+      ShenandoahBarrierSet::barrier_set()->enqueue(compare_value);
+    }
+  }
+  return res;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
+  oop previous = Raw::oop_atomic_xchg(new_value, addr);
+  if (ShenandoahSATBBarrier) {
+    if (!CompressedOops::is_null(previous)) {
+      ShenandoahBarrierSet::barrier_set()->enqueue(previous);
+    }
+  }
+  return previous;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                                                                     arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                                                                     size_t length) {
+  if (!CompressedOops::is_null(src_obj)) {
+    src_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src_obj));
+  }
+  if (!CompressedOops::is_null(dst_obj)) {
+    dst_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst_obj));
+  }
+  Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length);
+}
+
+template <typename T>
+bool ShenandoahBarrierSet::arraycopy_loop_1(T* src, T* dst, size_t length, Klass* bound,
+                                            bool checkcast, bool satb, bool disjoint,
+                                            ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode) {
+  if (checkcast) {
+    return arraycopy_loop_2<T, true>(src, dst, length, bound, satb, disjoint, storeval_mode);
+  } else {
+    return arraycopy_loop_2<T, false>(src, dst, length, bound, satb, disjoint, storeval_mode);
+  }
+}
+
+template <typename T, bool CHECKCAST>
+bool ShenandoahBarrierSet::arraycopy_loop_2(T* src, T* dst, size_t length, Klass* bound,
+                                            bool satb, bool disjoint,
+                                            ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode) {
+  if (satb) {
+    return arraycopy_loop_3<T, CHECKCAST, true>(src, dst, length, bound, disjoint, storeval_mode);
+  } else {
+    return arraycopy_loop_3<T, CHECKCAST, false>(src, dst, length, bound, disjoint, storeval_mode);
+  }
+}
+
+template <typename T, bool CHECKCAST, bool SATB>
+bool ShenandoahBarrierSet::arraycopy_loop_3(T* src, T* dst, size_t length, Klass* bound, bool disjoint,
+                                            ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode) {
+  switch (storeval_mode) {
+    case NONE:
+      return arraycopy_loop<T, CHECKCAST, SATB, NONE>(src, dst, length, bound, disjoint);
+    case READ_BARRIER:
+      return arraycopy_loop<T, CHECKCAST, SATB, READ_BARRIER>(src, dst, length, bound, disjoint);
+    case WRITE_BARRIER:
+      return arraycopy_loop<T, CHECKCAST, SATB, WRITE_BARRIER>(src, dst, length, bound, disjoint);
+    default:
+      ShouldNotReachHere();
+      return true; // happy compiler
+  }
+}
+
+template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE>
+bool ShenandoahBarrierSet::arraycopy_loop(T* src, T* dst, size_t length, Klass* bound, bool disjoint) {
+  Thread* thread = Thread::current();
+
+  ShenandoahEvacOOMScope oom_evac_scope;
+
+  // We need to handle four cases:
+  //
+  // a) src < dst, conjoint, can only copy backward only
+  //   [...src...]
+  //         [...dst...]
+  //
+  // b) src < dst, disjoint, can only copy forward, because types may mismatch
+  //   [...src...]
+  //              [...dst...]
+  //
+  // c) src > dst, conjoint, can copy forward only
+  //         [...src...]
+  //   [...dst...]
+  //
+  // d) src > dst, disjoint, can only copy forward, because types may mismatch
+  //              [...src...]
+  //   [...dst...]
+  //
+  if (src > dst || disjoint) {
+    // copy forward:
+    T* cur_src = src;
+    T* cur_dst = dst;
+    T* src_end = src + length;
+    for (; cur_src < src_end; cur_src++, cur_dst++) {
+      if (!arraycopy_element<T, CHECKCAST, SATB, STOREVAL_MODE>(cur_src, cur_dst, bound, thread)) {
+        return false;
+      }
+    }
+  } else {
+    // copy backward:
+    T* cur_src = src + length - 1;
+    T* cur_dst = dst + length - 1;
+    for (; cur_src >= src; cur_src--, cur_dst--) {
+      if (!arraycopy_element<T, CHECKCAST, SATB, STOREVAL_MODE>(cur_src, cur_dst, bound, thread)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE>
+bool ShenandoahBarrierSet::arraycopy_element(T* cur_src, T* cur_dst, Klass* bound, Thread* thread) {
+  T o = RawAccess<>::oop_load(cur_src);
+
+  if (SATB) {
+    T prev = RawAccess<>::oop_load(cur_dst);
+    if (!CompressedOops::is_null(prev)) {
+      oop prev_obj = CompressedOops::decode_not_null(prev);
+      enqueue(prev_obj);
+    }
+  }
+
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+
+    if (CHECKCAST) {
+      assert(bound != NULL, "need element klass for checkcast");
+      if (!oopDesc::is_instanceof_or_null(obj, bound)) {
+        return false;
+      }
+    }
+
+    switch (STOREVAL_MODE) {
+    case NONE:
+      break;
+    case READ_BARRIER:
+      obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      break;
+    case WRITE_BARRIER:
+      if (_heap->in_collection_set(obj)) {
+        oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+        if (oopDesc::equals_raw(forw, obj)) {
+          forw = _heap->evacuate_object(forw, thread);
+        }
+        obj = forw;
+      }
+      enqueue(obj);
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+
+    RawAccess<IS_NOT_NULL>::oop_store(cur_dst, obj);
+  } else {
+    // Store null.
+    RawAccess<>::oop_store(cur_dst, o);
+  }
+  return true;
+}
+
+// Clone barrier support
+template <DecoratorSet decorators, typename BarrierSetT>
+void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
+  src = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src));
+  dst = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst));
+  Raw::clone(src, dst, size);
+  ShenandoahBarrierSet::barrier_set()->write_region(MemRegion((HeapWord*) dst, size));
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+bool ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                                                                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                                                                         size_t length) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  if (!CompressedOops::is_null(src_obj)) {
+    src_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->read_barrier(src_obj));
+  }
+  if (!CompressedOops::is_null(dst_obj)) {
+    dst_obj = arrayOop(ShenandoahBarrierSet::barrier_set()->write_barrier(dst_obj));
+  }
+
+  bool satb = ShenandoahSATBBarrier && heap->is_concurrent_mark_in_progress();
+  bool checkcast = HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value;
+  bool disjoint = HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value;
+  ArrayCopyStoreValMode storeval_mode;
+  if (heap->has_forwarded_objects()) {
+    if (heap->is_concurrent_traversal_in_progress()) {
+      storeval_mode = WRITE_BARRIER;
+    } else if (heap->is_concurrent_mark_in_progress() || heap->is_update_refs_in_progress()) {
+      storeval_mode = READ_BARRIER;
+    } else {
+      assert(heap->is_idle() || heap->is_evacuation_in_progress(), "must not have anything in progress");
+      storeval_mode = NONE; // E.g. during evac or outside cycle
+    }
+  } else {
+    assert(heap->is_stable() || heap->is_concurrent_mark_in_progress(), "must not have anything in progress");
+    storeval_mode = NONE;
+  }
+
+  if (!satb && !checkcast && storeval_mode == NONE) {
+    // Short-circuit to bulk copy.
+    return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length);
+  }
+
+  src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
+  dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
+
+  Klass* bound = objArrayOop(dst_obj)->element_klass();
+  ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
+  return bs->arraycopy_loop_1(src_raw, dst_raw, length, bound, checkcast, satb, disjoint, storeval_mode);
+}
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetAssembler.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_HPP
+
+#include "utilities/macros.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+
+#include CPU_HEADER(gc/shenandoah/shenandoahBarrierSetAssembler)
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBrooksPointer.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBROOKSPOINTER_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBROOKSPOINTER_HPP
+
+#include "oops/oop.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class ShenandoahBrooksPointer {
+  /*
+   * Notes:
+   *
+   *  a. It is important to have byte_offset and word_offset return constant
+   *     expressions, because that will allow to constant-fold forwarding ptr
+   *     accesses. This is not a problem in JIT compilers that would generate
+   *     the code once, but it is problematic in GC hotpath code.
+   *
+   *  b. With filler object mechanics, we may need to allocate more space for
+   *     the forwarding ptr to meet alignment requirements for objects. This
+   *     means *_offset and *_size calls are NOT interchangeable. The accesses
+   *     to forwarding ptrs should always be via *_offset. Storage size
+   *     calculations should always be via *_size.
+   */
+
+public:
+  /* Offset from the object start, in HeapWords. */
+  static inline int word_offset() {
+    return -1; // exactly one HeapWord
+  }
+
+  /* Offset from the object start, in bytes. */
+  static inline int byte_offset() {
+    return -HeapWordSize; // exactly one HeapWord
+  }
+
+  /* Allocated size, in HeapWords. */
+  static inline uint word_size() {
+    return (uint) MinObjAlignment;
+  }
+
+  /* Allocated size, in bytes */
+  static inline uint byte_size() {
+    return (uint) MinObjAlignmentInBytes;
+  }
+
+  /* Assert basic stuff once at startup. */
+  static void initial_checks() {
+    guarantee (MinObjAlignment > 0, "sanity, word_size is correct");
+    guarantee (MinObjAlignmentInBytes > 0, "sanity, byte_size is correct");
+  }
+
+  /* Initializes Brooks pointer (to self).
+   */
+  static inline void initialize(oop obj);
+
+  /* Gets forwardee from the given object.
+   */
+  static inline oop forwardee(oop obj);
+
+  /* Tries to atomically update forwardee in $holder object to $update.
+   * Assumes $holder points at itself.
+   * Asserts $holder is in from-space.
+   * Asserts $update is in to-space.
+   */
+  static inline oop try_update_forwardee(oop obj, oop update);
+
+  /* Sets raw value for forwardee slot.
+   * THIS IS DANGEROUS: USERS HAVE TO INITIALIZE/SET FORWARDEE BACK AFTER THEY ARE DONE.
+   */
+  static inline void set_raw(oop obj, HeapWord* update);
+
+  /* Returns the raw value from forwardee slot.
+   */
+  static inline HeapWord* get_raw(oop obj);
+
+  /* Returns the raw value from forwardee slot without any checks.
+   * Used for quick verification.
+   */
+  static inline HeapWord* get_raw_unchecked(oop obj);
+
+private:
+  static inline HeapWord** brooks_ptr_addr(oop obj);
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHBROOKSPOINTER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBrooksPointer.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBROOKSPOINTER_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBROOKSPOINTER_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "runtime/atomic.hpp"
+
+inline HeapWord** ShenandoahBrooksPointer::brooks_ptr_addr(oop obj) {
+  return (HeapWord**)((HeapWord*) obj + word_offset());
+}
+
+inline void ShenandoahBrooksPointer::initialize(oop obj) {
+  shenandoah_assert_in_heap(NULL, obj);
+  *brooks_ptr_addr(obj) = (HeapWord*) obj;
+}
+
+inline void ShenandoahBrooksPointer::set_raw(oop obj, HeapWord* update) {
+  shenandoah_assert_in_heap(NULL, obj);
+  *brooks_ptr_addr(obj) = update;
+}
+
+inline HeapWord* ShenandoahBrooksPointer::get_raw(oop obj) {
+  shenandoah_assert_in_heap(NULL, obj);
+  return *brooks_ptr_addr(obj);
+}
+
+inline HeapWord* ShenandoahBrooksPointer::get_raw_unchecked(oop obj) {
+  return *brooks_ptr_addr(obj);
+}
+
+inline oop ShenandoahBrooksPointer::forwardee(oop obj) {
+  shenandoah_assert_correct(NULL, obj);
+  return oop(*brooks_ptr_addr(obj));
+}
+
+inline oop ShenandoahBrooksPointer::try_update_forwardee(oop obj, oop update) {
+  oop result = (oop) Atomic::cmpxchg(update, (oop*)brooks_ptr_addr(obj), obj);
+  shenandoah_assert_correct_except(NULL, obj, !oopDesc::equals_raw(result, obj));
+  return result;
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHBROOKSPOINTER_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCache.hpp"
+#include "code/nmethod.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahCodeRoots.hpp"
+#include "memory/resourceArea.hpp"
+
+ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
+  _length = heaps->length();
+  _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
+  for (int h = 0; h < _length; h++) {
+    _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
+  }
+}
+
+ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
+  FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
+}
+
+void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
+  for (int c = 0; c < _length; c++) {
+    _iters[c].parallel_blobs_do(f);
+  }
+}
+
+ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
+        _heap(heap), _claimed_idx(0), _finished(false) {
+}
+
+void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
+
+  /*
+   * Parallel code heap walk.
+   *
+   * This code makes all threads scan all code heaps, but only one thread would execute the
+   * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
+   * had claimed the block, it can process all blobs in it. Others have to fast-forward to
+   * next attempt without processing.
+   *
+   * Late threads would return immediately if iterator is finished.
+   */
+
+  if (_finished) {
+    return;
+  }
+
+  int stride = 256; // educated guess
+  int stride_mask = stride - 1;
+  assert (is_power_of_2(stride), "sanity");
+
+  int count = 0;
+  bool process_block = true;
+
+  for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
+    int current = count++;
+    if ((current & stride_mask) == 0) {
+      process_block = (current >= _claimed_idx) &&
+                      (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current);
+    }
+    if (process_block) {
+      if (cb->is_alive()) {
+        f->do_code_blob(cb);
+#ifdef ASSERT
+        if (cb->is_nmethod())
+          Universe::heap()->verify_nmethod((nmethod*)cb);
+#endif
+      }
+    }
+  }
+
+  _finished = true;
+}
+
+class ShenandoahNMethodOopDetector : public OopClosure {
+private:
+  ResourceMark rm; // For growable array allocation below.
+  GrowableArray<oop*> _oops;
+
+public:
+  ShenandoahNMethodOopDetector() : _oops(10) {};
+
+  void do_oop(oop* o) {
+    _oops.append(o);
+  }
+  void do_oop(narrowOop* o) {
+    fatal("NMethods should not have compressed oops embedded.");
+  }
+
+  GrowableArray<oop*>* oops() {
+    return &_oops;
+  }
+
+  bool has_oops() {
+    return !_oops.is_empty();
+  }
+};
+
+class ShenandoahNMethodOopInitializer : public OopClosure {
+private:
+  ShenandoahHeap* const _heap;
+
+public:
+  ShenandoahNMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {};
+
+private:
+  template <class T>
+  inline void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (! CompressedOops::is_null(o)) {
+      oop obj1 = CompressedOops::decode_not_null(o);
+      oop obj2 = ShenandoahBarrierSet::barrier_set()->write_barrier(obj1);
+      if (! oopDesc::equals_raw(obj1, obj2)) {
+        shenandoah_assert_not_in_cset(NULL, obj2);
+        RawAccess<IS_NOT_NULL>::oop_store(p, obj2);
+        if (_heap->is_concurrent_traversal_in_progress()) {
+          ShenandoahBarrierSet::barrier_set()->enqueue(obj2);
+        }
+      }
+    }
+  }
+
+public:
+  void do_oop(oop* o) {
+    do_oop_work(o);
+  }
+  void do_oop(narrowOop* o) {
+    do_oop_work(o);
+  }
+};
+
+ShenandoahCodeRoots::PaddedLock ShenandoahCodeRoots::_recorded_nms_lock;
+GrowableArray<ShenandoahNMethod*>* ShenandoahCodeRoots::_recorded_nms;
+
+void ShenandoahCodeRoots::initialize() {
+  _recorded_nms_lock._lock = 0;
+  _recorded_nms = new (ResourceObj::C_HEAP, mtGC) GrowableArray<ShenandoahNMethod*>(100, true, mtGC);
+}
+
+void ShenandoahCodeRoots::add_nmethod(nmethod* nm) {
+  switch (ShenandoahCodeRootsStyle) {
+    case 0:
+    case 1: {
+      ShenandoahNMethodOopInitializer init;
+      nm->oops_do(&init);
+      nm->fix_oop_relocations();
+      break;
+    }
+    case 2: {
+      ShenandoahNMethodOopDetector detector;
+      nm->oops_do(&detector);
+
+      if (detector.has_oops()) {
+        ShenandoahNMethodOopInitializer init;
+        nm->oops_do(&init);
+        nm->fix_oop_relocations();
+
+        ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops());
+        nmr->assert_alive_and_correct();
+
+        ShenandoahCodeRootsLock lock(true);
+
+        int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
+        if (idx != -1) {
+          ShenandoahNMethod* old = _recorded_nms->at(idx);
+          _recorded_nms->at_put(idx, nmr);
+          delete old;
+        } else {
+          _recorded_nms->append(nmr);
+        }
+      }
+      break;
+    }
+    default:
+      ShouldNotReachHere();
+  }
+};
+
+void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) {
+  switch (ShenandoahCodeRootsStyle) {
+    case 0:
+    case 1: {
+      break;
+    }
+    case 2: {
+      ShenandoahNMethodOopDetector detector;
+      nm->oops_do(&detector, /* allow_zombie = */ true);
+
+      if (detector.has_oops()) {
+        ShenandoahCodeRootsLock lock(true);
+
+        int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
+        assert(idx != -1, "nmethod " PTR_FORMAT " should be registered", p2i(nm));
+        ShenandoahNMethod* old = _recorded_nms->at(idx);
+        old->assert_same_oops(detector.oops());
+        _recorded_nms->delete_at(idx);
+        delete old;
+      }
+      break;
+    }
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
+        _heap(ShenandoahHeap::heap()),
+        _par_iterator(CodeCache::heaps()),
+        _claimed(0) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
+  assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
+  switch (ShenandoahCodeRootsStyle) {
+    case 0:
+    case 1: {
+      // No need to do anything here
+      break;
+    }
+    case 2: {
+      ShenandoahCodeRoots::acquire_lock(false);
+      break;
+    }
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
+  switch (ShenandoahCodeRootsStyle) {
+    case 0:
+    case 1: {
+      // No need to do anything here
+      break;
+    }
+    case 2: {
+      ShenandoahCodeRoots::release_lock(false);
+      break;
+    }
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+template<bool CSET_FILTER>
+void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) {
+  switch (ShenandoahCodeRootsStyle) {
+    case 0: {
+      if (_seq_claimed.try_set()) {
+        CodeCache::blobs_do(f);
+      }
+      break;
+    }
+    case 1: {
+      _par_iterator.parallel_blobs_do(f);
+      break;
+    }
+    case 2: {
+      ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f);
+      break;
+    }
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+ShenandoahAllCodeRootsIterator ShenandoahCodeRoots::iterator() {
+  return ShenandoahAllCodeRootsIterator();
+}
+
+ShenandoahCsetCodeRootsIterator ShenandoahCodeRoots::cset_iterator() {
+  return ShenandoahCsetCodeRootsIterator();
+}
+
+void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
+  ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f);
+}
+
+void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
+  ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f);
+}
+
+template <bool CSET_FILTER>
+void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
+
+  size_t stride = 256; // educated guess
+
+  GrowableArray<ShenandoahNMethod*>* list = ShenandoahCodeRoots::_recorded_nms;
+
+  size_t max = (size_t)list->length();
+  while (_claimed < max) {
+    size_t cur = Atomic::add(stride, &_claimed) - stride;
+    size_t start = cur;
+    size_t end = MIN2(cur + stride, max);
+    if (start >= max) break;
+
+    for (size_t idx = start; idx < end; idx++) {
+      ShenandoahNMethod* nmr = list->at((int) idx);
+      nmr->assert_alive_and_correct();
+
+      if (CSET_FILTER && !nmr->has_cset_oops(_heap)) {
+        continue;
+      }
+
+      f->do_code_blob(nmr->nm());
+    }
+  }
+}
+
+ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>* oops) {
+  _nm = nm;
+  _oops = NEW_C_HEAP_ARRAY(oop*, oops->length(), mtGC);
+  _oops_count = oops->length();
+  for (int c = 0; c < _oops_count; c++) {
+    _oops[c] = oops->at(c);
+  }
+}
+
+ShenandoahNMethod::~ShenandoahNMethod() {
+  if (_oops != NULL) {
+    FREE_C_HEAP_ARRAY(oop*, _oops);
+  }
+}
+
+bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) {
+  for (int c = 0; c < _oops_count; c++) {
+    oop o = RawAccess<>::oop_load(_oops[c]);
+    if (heap->in_collection_set(o)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+#ifdef ASSERT
+void ShenandoahNMethod::assert_alive_and_correct() {
+  assert(_nm->is_alive(), "only alive nmethods here");
+  assert(_oops_count > 0, "should have filtered nmethods without oops before");
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  for (int c = 0; c < _oops_count; c++) {
+    oop *loc = _oops[c];
+    assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
+    oop o = RawAccess<>::oop_load(loc);
+    shenandoah_assert_correct_except(loc, o, o == NULL || heap->is_full_gc_move_in_progress());
+  }
+}
+
+void ShenandoahNMethod::assert_same_oops(GrowableArray<oop*>* oops) {
+  assert(_oops_count == oops->length(), "should have the same number of oop*");
+  for (int c = 0; c < _oops_count; c++) {
+    assert(_oops[c] == oops->at(c), "should be the same oop*");
+  }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP
+
+#include "code/codeCache.hpp"
+#include "gc/shenandoah/shenandoahSharedVariables.hpp"
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+
+class ShenandoahHeap;
+class ShenandoahHeapRegion;
+class ShenandoahCodeRootsLock;
+
+class ShenandoahParallelCodeHeapIterator {
+  friend class CodeCache;
+private:
+  CodeHeap*     _heap;
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
+  volatile int  _claimed_idx;
+  volatile bool _finished;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+public:
+  ShenandoahParallelCodeHeapIterator(CodeHeap* heap);
+  void parallel_blobs_do(CodeBlobClosure* f);
+};
+
+class ShenandoahParallelCodeCacheIterator {
+  friend class CodeCache;
+private:
+  ShenandoahParallelCodeHeapIterator* _iters;
+  int                       _length;
+public:
+  ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps);
+  ~ShenandoahParallelCodeCacheIterator();
+  void parallel_blobs_do(CodeBlobClosure* f);
+};
+
+// ShenandoahNMethod tuple records the internal locations of oop slots within the nmethod.
+// This allows us to quickly scan the oops without doing the nmethod-internal scans, that
+// sometimes involves parsing the machine code. Note it does not record the oops themselves,
+// because it would then require handling these tuples as the new class of roots.
+class ShenandoahNMethod : public CHeapObj<mtGC> {
+private:
+  nmethod* _nm;
+  oop**    _oops;
+  int      _oops_count;
+
+public:
+  ShenandoahNMethod(nmethod *nm, GrowableArray<oop*>* oops);
+  ~ShenandoahNMethod();
+
+  nmethod* nm() {
+    return _nm;
+  }
+
+  bool has_cset_oops(ShenandoahHeap* heap);
+
+  void assert_alive_and_correct() PRODUCT_RETURN;
+  void assert_same_oops(GrowableArray<oop*>* oops) PRODUCT_RETURN;
+
+  static bool find_with_nmethod(void* nm, ShenandoahNMethod* other) {
+    return other->_nm == nm;
+  }
+};
+
+class ShenandoahCodeRootsIterator {
+  friend class ShenandoahCodeRoots;
+protected:
+  ShenandoahHeap* _heap;
+  ShenandoahParallelCodeCacheIterator _par_iterator;
+  ShenandoahSharedFlag _seq_claimed;
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
+  volatile size_t _claimed;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+protected:
+  ShenandoahCodeRootsIterator();
+  ~ShenandoahCodeRootsIterator();
+
+  template<bool CSET_FILTER>
+  void dispatch_parallel_blobs_do(CodeBlobClosure *f);
+
+  template<bool CSET_FILTER>
+  void fast_parallel_blobs_do(CodeBlobClosure *f);
+};
+
+class ShenandoahAllCodeRootsIterator : public ShenandoahCodeRootsIterator {
+public:
+  ShenandoahAllCodeRootsIterator() : ShenandoahCodeRootsIterator() {};
+  void possibly_parallel_blobs_do(CodeBlobClosure *f);
+};
+
+class ShenandoahCsetCodeRootsIterator : public ShenandoahCodeRootsIterator {
+public:
+  ShenandoahCsetCodeRootsIterator() : ShenandoahCodeRootsIterator() {};
+  void possibly_parallel_blobs_do(CodeBlobClosure* f);
+};
+
+class ShenandoahCodeRoots : public CHeapObj<mtGC> {
+  friend class ShenandoahHeap;
+  friend class ShenandoahCodeRootsLock;
+  friend class ShenandoahCodeRootsIterator;
+
+public:
+  static void initialize();
+  static void add_nmethod(nmethod* nm);
+  static void remove_nmethod(nmethod* nm);
+
+  /**
+   * Provides the iterator over all nmethods in the code cache that have oops.
+   * @return
+   */
+  static ShenandoahAllCodeRootsIterator iterator();
+
+  /**
+   * Provides the iterator over nmethods that have at least one oop in collection set.
+   * @return
+   */
+  static ShenandoahCsetCodeRootsIterator cset_iterator();
+
+private:
+  struct PaddedLock {
+    DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
+    volatile int _lock;
+    DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+  };
+
+  static PaddedLock _recorded_nms_lock;
+  static GrowableArray<ShenandoahNMethod*>* _recorded_nms;
+
+  static void acquire_lock(bool write) {
+    volatile int* loc = &_recorded_nms_lock._lock;
+    if (write) {
+      while ((OrderAccess::load_acquire(loc) != 0) ||
+             Atomic::cmpxchg(-1, loc, 0) != 0) {
+        SpinPause();
+      }
+      assert (*loc == -1, "acquired for write");
+    } else {
+      while (true) {
+        int cur = OrderAccess::load_acquire(loc);
+        if (cur >= 0) {
+          if (Atomic::cmpxchg(cur + 1, loc, cur) == cur) {
+            // Success!
+            assert (*loc > 0, "acquired for read");
+            return;
+          }
+        }
+        SpinPause();
+      }
+    }
+  }
+
+  static void release_lock(bool write) {
+    volatile int* loc = &ShenandoahCodeRoots::_recorded_nms_lock._lock;
+    if (write) {
+      OrderAccess::release_store_fence(loc, 0);
+    } else {
+      Atomic::dec(loc);
+    }
+  }
+};
+
+// Very simple unranked read-write lock
+class ShenandoahCodeRootsLock : public StackObj {
+  friend class ShenandoahCodeRoots;
+private:
+  const bool _write;
+public:
+  ShenandoahCodeRootsLock(bool write) : _write(write) {
+    ShenandoahCodeRoots::acquire_lock(write);
+  }
+
+  ~ShenandoahCodeRootsLock() {
+    ShenandoahCodeRoots::release_lock(_write);
+  }
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/copy.hpp"
+
+ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, HeapWord* heap_base) :
+  _map_size(heap->num_regions()),
+  _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
+  _cset_map(NEW_C_HEAP_ARRAY(jbyte, _map_size, mtGC)),
+  _biased_cset_map(_cset_map - ((uintx)heap_base >> _region_size_bytes_shift)),
+  _heap(heap),
+  _garbage(0),
+  _live_data(0),
+  _used(0),
+  _region_count(0),
+  _current_index(0) {
+  // Use 1-byte data type
+  STATIC_ASSERT(sizeof(jbyte) == 1);
+
+  // Initialize cset map
+  Copy::zero_to_bytes(_cset_map, _map_size);
+}
+
+void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+  assert(Thread::current()->is_VM_thread(), "Must be VMThread");
+  assert(!is_in(r), "Already in collection set");
+  _cset_map[r->region_number()] = 1;
+  _region_count ++;
+  _garbage += r->garbage();
+  _live_data += r->get_live_data_bytes();
+  _used += r->used();
+}
+
+bool ShenandoahCollectionSet::add_region_check_for_duplicates(ShenandoahHeapRegion* r) {
+  if (!is_in(r)) {
+    add_region(r);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+void ShenandoahCollectionSet::remove_region(ShenandoahHeapRegion* r) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+  assert(Thread::current()->is_VM_thread(), "Must be VMThread");
+  assert(is_in(r), "Not in collection set");
+  _cset_map[r->region_number()] = 0;
+  _region_count --;
+}
+
+void ShenandoahCollectionSet::update_region_status() {
+  for (size_t index = 0; index < _heap->num_regions(); index ++) {
+    ShenandoahHeapRegion* r = _heap->get_region(index);
+    if (is_in(r)) {
+      r->make_cset();
+    } else {
+      assert (!r->is_cset(), "should not be cset");
+    }
+  }
+}
+
+void ShenandoahCollectionSet::clear() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+  Copy::zero_to_bytes(_cset_map, _map_size);
+
+#ifdef ASSERT
+  for (size_t index = 0; index < _heap->num_regions(); index ++) {
+    assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
+  }
+#endif
+
+  _garbage = 0;
+  _live_data = 0;
+  _used = 0;
+
+  _region_count = 0;
+  _current_index = 0;
+}
+
+ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
+  size_t num_regions = _heap->num_regions();
+  if (_current_index >= (jint)num_regions) {
+    return NULL;
+  }
+
+  jint saved_current = _current_index;
+  size_t index = (size_t)saved_current;
+
+  while(index < num_regions) {
+    if (is_in(index)) {
+      jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current);
+      assert(cur >= (jint)saved_current, "Must move forward");
+      if (cur == saved_current) {
+        assert(is_in(index), "Invariant");
+        return _heap->get_region(index);
+      } else {
+        index = (size_t)cur;
+        saved_current = cur;
+      }
+    } else {
+      index ++;
+    }
+  }
+  return NULL;
+}
+
+ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+  assert(Thread::current()->is_VM_thread(), "Must be VMThread");
+  size_t num_regions = _heap->num_regions();
+  for (size_t index = (size_t)_current_index; index < num_regions; index ++) {
+    if (is_in(index)) {
+      _current_index = (jint)(index + 1);
+      return _heap->get_region(index);
+    }
+  }
+
+  return NULL;
+}
+
+void ShenandoahCollectionSet::print_on(outputStream* out) const {
+  out->print_cr("Collection Set : " SIZE_FORMAT "", count());
+
+  debug_only(size_t regions = 0;)
+  for (size_t index = 0; index < _heap->num_regions(); index ++) {
+    if (is_in(index)) {
+      _heap->get_region(index)->print_on(out);
+      debug_only(regions ++;)
+    }
+  }
+  assert(regions == count(), "Must match");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP
+
+#include "memory/allocation.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+
+class ShenandoahCollectionSet : public CHeapObj<mtGC> {
+  friend class ShenandoahHeap;
+private:
+  size_t const          _map_size;
+  size_t const          _region_size_bytes_shift;
+  jbyte* const          _cset_map;
+  // Bias cset map's base address for fast test if an oop is in cset
+  jbyte* const          _biased_cset_map;
+
+  ShenandoahHeap* const _heap;
+
+  size_t                _garbage;
+  size_t                _live_data;
+  size_t                _used;
+  size_t                _region_count;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
+  volatile jint         _current_index;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+public:
+  ShenandoahCollectionSet(ShenandoahHeap* heap, HeapWord* heap_base);
+
+  // Add region to collection set
+  void add_region(ShenandoahHeapRegion* r);
+  bool add_region_check_for_duplicates(ShenandoahHeapRegion* r);
+
+  // Bring per-region statuses to consistency with this collection.
+  // TODO: This is a transitional interface that bridges the gap between
+  // region statuses and this collection. Should go away after we merge them.
+  void update_region_status();
+
+  // Remove region from collection set
+  void remove_region(ShenandoahHeapRegion* r);
+
+  // MT version
+  ShenandoahHeapRegion* claim_next();
+
+  // Single-thread version
+  ShenandoahHeapRegion* next();
+
+  size_t count()  const { return _region_count; }
+  bool is_empty() const { return _region_count == 0; }
+
+  void clear_current_index() {
+    _current_index = 0;
+  }
+
+  inline bool is_in(ShenandoahHeapRegion* r) const;
+  inline bool is_in(size_t region_number)    const;
+  inline bool is_in(HeapWord* p)             const;
+
+  void print_on(outputStream* out) const;
+
+  size_t used()      const { return _used; }
+  size_t live_data() const { return _live_data; }
+  size_t garbage()   const { return _garbage;   }
+  void clear();
+
+private:
+  jbyte* biased_map_address() const {
+    return _biased_cset_map;
+  }
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+
+bool ShenandoahCollectionSet::is_in(size_t region_number) const {
+  assert(region_number < _heap->num_regions(), "Sanity");
+  return _cset_map[region_number] == 1;
+}
+
+bool ShenandoahCollectionSet::is_in(ShenandoahHeapRegion* r) const {
+  return is_in(r->region_number());
+}
+
+bool ShenandoahCollectionSet::is_in(HeapWord* p) const {
+  assert(_heap->is_in(p), "Must be in the heap");
+  uintx index = ((uintx) p) >> _region_size_bytes_shift;
+  // no need to subtract the bottom of the heap from p,
+  // _biased_cset_map is biased
+  return _biased_cset_map[index] == 1;
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "runtime/os.hpp"
+
+ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
+  _success_concurrent_gcs(0),
+  _success_degenerated_gcs(0),
+  _success_full_gcs(0),
+  _alloc_failure_degenerated(0),
+  _alloc_failure_degenerated_upgrade_to_full(0),
+  _alloc_failure_full(0),
+  _explicit_concurrent(0),
+  _explicit_full(0),
+  _implicit_concurrent(0),
+  _implicit_full(0),
+  _cycle_counter(0) {
+
+  Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT);
+
+  ShenandoahHeapRegion::setup_sizes(initial_heap_byte_size(), max_heap_byte_size());
+
+  initialize_all();
+
+  _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
+
+}
+
+void ShenandoahCollectorPolicy::initialize_alignments() {
+  // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
+  size_t align = ShenandoahHeapRegion::region_size_bytes();
+  if (UseLargePages) {
+    align = MAX2(align, os::large_page_size());
+  }
+  _space_alignment = align;
+  _heap_alignment = align;
+}
+
+void ShenandoahCollectorPolicy::record_explicit_to_concurrent() {
+  _explicit_concurrent++;
+}
+
+void ShenandoahCollectorPolicy::record_explicit_to_full() {
+  _explicit_full++;
+}
+
+void ShenandoahCollectorPolicy::record_implicit_to_concurrent() {
+  _implicit_concurrent++;
+}
+
+void ShenandoahCollectorPolicy::record_implicit_to_full() {
+  _implicit_full++;
+}
+
+void ShenandoahCollectorPolicy::record_alloc_failure_to_full() {
+  _alloc_failure_full++;
+}
+
+void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) {
+  assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity");
+  _alloc_failure_degenerated++;
+  _degen_points[point]++;
+}
+
+void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() {
+  _alloc_failure_degenerated_upgrade_to_full++;
+}
+
+void ShenandoahCollectorPolicy::record_success_concurrent() {
+  _success_concurrent_gcs++;
+}
+
+void ShenandoahCollectorPolicy::record_success_degenerated() {
+  _success_degenerated_gcs++;
+}
+
+void ShenandoahCollectorPolicy::record_success_full() {
+  _success_full_gcs++;
+}
+
+size_t ShenandoahCollectorPolicy::cycle_counter() const {
+  return _cycle_counter;
+}
+
+void ShenandoahCollectorPolicy::record_cycle_start() {
+  _cycle_counter++;
+}
+
+void ShenandoahCollectorPolicy::record_shutdown() {
+  _in_shutdown.set();
+}
+
+bool ShenandoahCollectorPolicy::is_at_shutdown() {
+  return _in_shutdown.is_set();
+}
+
+void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
+  out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle");
+  out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,");
+  out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate");
+  out->print_cr("to avoid Degenerated and Full GC cycles.");
+  out->cr();
+
+  out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs",         _success_concurrent_gcs);
+  out->print_cr("  " SIZE_FORMAT_W(5) " invoked explicitly",           _explicit_concurrent);
+  out->print_cr("  " SIZE_FORMAT_W(5) " invoked implicitly",           _implicit_concurrent);
+  out->cr();
+
+  out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs",                   _success_degenerated_gcs);
+  out->print_cr("  " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated);
+  for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) {
+    if (_degen_points[c] > 0) {
+      const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c);
+      out->print_cr("    " SIZE_FORMAT_W(5) " happened at %s",         _degen_points[c], desc);
+    }
+  }
+  out->print_cr("  " SIZE_FORMAT_W(5) " upgraded to Full GC",          _alloc_failure_degenerated_upgrade_to_full);
+  out->cr();
+
+  out->print_cr(SIZE_FORMAT_W(5) " Full GCs",                          _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full);
+  out->print_cr("  " SIZE_FORMAT_W(5) " invoked explicitly",           _explicit_full);
+  out->print_cr("  " SIZE_FORMAT_W(5) " invoked implicitly",           _implicit_full);
+  out->print_cr("  " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full);
+  out->print_cr("  " SIZE_FORMAT_W(5) " upgraded from Degenerated GC", _alloc_failure_degenerated_upgrade_to_full);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP
+
+#include "gc/shared/collectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahTracer.hpp"
+#include "utilities/ostream.hpp"
+
+class ShenandoahCollectorPolicy: public CollectorPolicy {
+private:
+  size_t _success_concurrent_gcs;
+  size_t _success_degenerated_gcs;
+  size_t _success_full_gcs;
+  size_t _alloc_failure_degenerated;
+  size_t _alloc_failure_degenerated_upgrade_to_full;
+  size_t _alloc_failure_full;
+  size_t _explicit_concurrent;
+  size_t _explicit_full;
+  size_t _implicit_concurrent;
+  size_t _implicit_full;
+  size_t _degen_points[ShenandoahHeap::_DEGENERATED_LIMIT];
+
+  ShenandoahSharedFlag _in_shutdown;
+
+  ShenandoahTracer* _tracer;
+
+  size_t _cycle_counter;
+
+public:
+  ShenandoahCollectorPolicy();
+
+  void initialize_alignments();
+
+  // TODO: This is different from gc_end: that one encompasses one VM operation.
+  // These two encompass the entire cycle.
+  void record_cycle_start();
+
+  void record_success_concurrent();
+  void record_success_degenerated();
+  void record_success_full();
+  void record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point);
+  void record_alloc_failure_to_full();
+  void record_degenerated_upgrade_to_full();
+  void record_explicit_to_concurrent();
+  void record_explicit_to_full();
+  void record_implicit_to_concurrent();
+  void record_implicit_to_full();
+
+  void record_shutdown();
+  bool is_at_shutdown();
+
+  ShenandoahTracer* tracer() {return _tracer;}
+
+  size_t cycle_counter() const;
+
+  void print_gc_stats(outputStream* out) const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
+
+#include "gc/shared/weakProcessor.hpp"
+#include "gc/shared/weakProcessor.inline.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "gc/shared/referenceProcessorPhaseTimes.hpp"
+
+#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahOopClosures.hpp"
+#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
+#include "gc/shenandoah/shenandoahTimingTracker.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+
+#include "memory/iterator.inline.hpp"
+#include "memory/metaspace.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+
+template<UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
+class ShenandoahInitMarkRootsClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  ShenandoahHeap* _heap;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
+    _queue(q),
+    _heap(ShenandoahHeap::heap()),
+    _mark_context(_heap->marking_context()) {};
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+  MetadataVisitingOopIterateClosure(rp),
+  _queue(q),
+  _heap(ShenandoahHeap::heap()),
+  _mark_context(_heap->marking_context())
+{ }
+
+template<UpdateRefsMode UPDATE_REFS>
+class ShenandoahInitMarkRootsTask : public AbstractGangTask {
+private:
+  ShenandoahRootProcessor* _rp;
+  bool _process_refs;
+public:
+  ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
+    AbstractGangTask("Shenandoah init mark roots task"),
+    _rp(rp),
+    _process_refs(process_refs) {
+  }
+
+  void work(uint worker_id) {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
+    assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
+
+    ShenandoahObjToScanQueue* q = queues->queue(worker_id);
+
+    if (ShenandoahStringDedup::is_enabled()) {
+      ShenandoahInitMarkRootsClosure<UPDATE_REFS, ENQUEUE_DEDUP> mark_cl(q);
+      do_work(heap, &mark_cl, worker_id);
+    } else {
+      ShenandoahInitMarkRootsClosure<UPDATE_REFS, NO_DEDUP> mark_cl(q);
+      do_work(heap, &mark_cl, worker_id);
+    }
+  }
+
+private:
+  void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
+    // The rationale for selecting the roots to scan is as follows:
+    //   a. With unload_classes = true, we only want to scan the actual strong roots from the
+    //      code cache. This will allow us to identify the dead classes, unload them, *and*
+    //      invalidate the relevant code cache blobs. This could be only done together with
+    //      class unloading.
+    //   b. With unload_classes = false, we have to nominally retain all the references from code
+    //      cache, because there could be the case of embedded class/oop in the generated code,
+    //      which we will never visit during mark. Without code cache invalidation, as in (a),
+    //      we risk executing that code cache blob, and crashing.
+    //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
+    //      and instead do that in concurrent phase under the relevant lock. This saves init mark
+    //      pause time.
+
+    CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
+    MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
+    OopClosure* weak_oops = _process_refs ? NULL : oops;
+
+    ResourceMark m;
+    if (heap->unload_classes()) {
+      _rp->process_strong_roots(oops, weak_oops, &clds_cl, NULL, &blobs_cl, NULL, worker_id);
+    } else {
+      if (ShenandoahConcurrentScanCodeRoots) {
+        CodeBlobClosure* code_blobs = NULL;
+#ifdef ASSERT
+        ShenandoahAssertToSpaceClosure assert_to_space_oops;
+        CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
+        // If conc code cache evac is disabled, code cache should have only to-space ptrs.
+        // Otherwise, it should have to-space ptrs only if mark does not update refs.
+        if (!heap->has_forwarded_objects()) {
+          code_blobs = &assert_to_space;
+        }
+#endif
+        _rp->process_all_roots(oops, weak_oops, &clds_cl, code_blobs, NULL, worker_id);
+      } else {
+        _rp->process_all_roots(oops, weak_oops, &clds_cl, &blobs_cl, NULL, worker_id);
+      }
+    }
+  }
+};
+
+class ShenandoahUpdateRootsTask : public AbstractGangTask {
+private:
+  ShenandoahRootProcessor* _rp;
+  const bool _update_code_cache;
+public:
+  ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
+    AbstractGangTask("Shenandoah update roots task"),
+    _rp(rp),
+    _update_code_cache(update_code_cache) {
+  }
+
+  void work(uint worker_id) {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahUpdateRefsClosure cl;
+    CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
+
+    CodeBlobClosure* code_blobs;
+    CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
+#ifdef ASSERT
+    ShenandoahAssertToSpaceClosure assert_to_space_oops;
+    CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
+#endif
+    if (_update_code_cache) {
+      code_blobs = &update_blobs;
+    } else {
+      code_blobs =
+        DEBUG_ONLY(&assert_to_space)
+        NOT_DEBUG(NULL);
+    }
+    _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
+  }
+};
+
+class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
+private:
+  ShenandoahConcurrentMark* _cm;
+  ShenandoahTaskTerminator* _terminator;
+
+public:
+  ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
+    AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahConcurrentWorkerSession worker_session(worker_id);
+    ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
+    ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
+    ReferenceProcessor* rp;
+    if (heap->process_references()) {
+      rp = heap->ref_processor();
+      shenandoah_assert_rp_isalive_installed();
+    } else {
+      rp = NULL;
+    }
+
+    _cm->concurrent_scan_code_roots(worker_id, rp);
+    _cm->mark_loop(worker_id, _terminator, rp,
+                   true, // cancellable
+                   ShenandoahStringDedup::is_enabled()); // perform string dedup
+  }
+};
+
+class ShenandoahSATBThreadsClosure : public ThreadClosure {
+private:
+  ShenandoahSATBBufferClosure* _satb_cl;
+  int _thread_parity;
+
+public:
+  ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
+    _satb_cl(satb_cl),
+    _thread_parity(Threads::thread_claim_parity()) {}
+
+  void do_thread(Thread* thread) {
+    if (thread->is_Java_thread()) {
+      if (thread->claim_oops_do(true, _thread_parity)) {
+        JavaThread* jt = (JavaThread*)thread;
+        ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
+      }
+    } else if (thread->is_VM_thread()) {
+      if (thread->claim_oops_do(true, _thread_parity)) {
+        ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
+      }
+    }
+  }
+};
+
+class ShenandoahFinalMarkingTask : public AbstractGangTask {
+private:
+  ShenandoahConcurrentMark* _cm;
+  ShenandoahTaskTerminator* _terminator;
+  bool _dedup_string;
+
+public:
+  ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
+    AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+    // First drain remaining SATB buffers.
+    // Notice that this is not strictly necessary for mark-compact. But since
+    // it requires a StrongRootsScope around the task, we need to claim the
+    // threads, and performance-wise it doesn't really matter. Adds about 1ms to
+    // full-gc.
+    {
+      ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
+      ShenandoahSATBBufferClosure cl(q);
+      SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
+      while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
+      ShenandoahSATBThreadsClosure tc(&cl);
+      Threads::threads_do(&tc);
+    }
+
+    ReferenceProcessor* rp;
+    if (heap->process_references()) {
+      rp = heap->ref_processor();
+      shenandoah_assert_rp_isalive_installed();
+    } else {
+      rp = NULL;
+    }
+
+    // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
+    // let's check here.
+    _cm->concurrent_scan_code_roots(worker_id, rp);
+    _cm->mark_loop(worker_id, _terminator, rp,
+                   false, // not cancellable
+                   _dedup_string);
+
+    assert(_cm->task_queues()->is_empty(), "Should be empty");
+  }
+};
+
+void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
+  assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  ShenandoahGCPhase phase(root_phase);
+
+  WorkGang* workers = heap->workers();
+  uint nworkers = workers->active_workers();
+
+  assert(nworkers <= task_queues()->size(), "Just check");
+
+  ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
+  TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
+  task_queues()->reserve(nworkers);
+
+  if (heap->has_forwarded_objects()) {
+    ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
+    workers->run_task(&mark_roots);
+  } else {
+    // No need to update references, which means the heap is stable.
+    // Can save time not walking through forwarding pointers.
+    ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
+    workers->run_task(&mark_roots);
+  }
+
+  if (ShenandoahConcurrentScanCodeRoots) {
+    clear_claim_codecache();
+  }
+}
+
+void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+  bool update_code_cache = true; // initialize to safer value
+  switch (root_phase) {
+    case ShenandoahPhaseTimings::update_roots:
+    case ShenandoahPhaseTimings::final_update_refs_roots:
+      update_code_cache = false;
+      break;
+    case ShenandoahPhaseTimings::full_gc_roots:
+    case ShenandoahPhaseTimings::degen_gc_update_roots:
+      update_code_cache = true;
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+
+  ShenandoahGCPhase phase(root_phase);
+
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  DerivedPointerTable::clear();
+#endif
+
+  uint nworkers = _heap->workers()->active_workers();
+
+  ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase);
+  ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
+  _heap->workers()->run_task(&update_roots);
+
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  DerivedPointerTable::update_pointers();
+#endif
+}
+
+void ShenandoahConcurrentMark::initialize(uint workers) {
+  _heap = ShenandoahHeap::heap();
+
+  uint num_queues = MAX2(workers, 1U);
+
+  _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
+
+  for (uint i = 0; i < num_queues; ++i) {
+    ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
+    task_queue->initialize();
+    _task_queues->register_queue(i, task_queue);
+  }
+}
+
+void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
+  if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
+    ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
+    if (!_heap->unload_classes()) {
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      // TODO: We can not honor StringDeduplication here, due to lock ranking
+      // inversion. So, we may miss some deduplication candidates.
+      if (_heap->has_forwarded_objects()) {
+        ShenandoahMarkResolveRefsClosure cl(q, rp);
+        CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
+        CodeCache::blobs_do(&blobs);
+      } else {
+        ShenandoahMarkRefsClosure cl(q, rp);
+        CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
+        CodeCache::blobs_do(&blobs);
+      }
+    }
+  }
+}
+
+void ShenandoahConcurrentMark::mark_from_roots() {
+  WorkGang* workers = _heap->workers();
+  uint nworkers = workers->active_workers();
+
+  ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
+
+  if (_heap->process_references()) {
+    ReferenceProcessor* rp = _heap->ref_processor();
+    rp->set_active_mt_degree(nworkers);
+
+    // enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_no_refs*/);
+    rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
+  }
+
+  shenandoah_assert_rp_isalive_not_installed();
+  ShenandoahIsAliveSelector is_alive;
+  ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
+
+  task_queues()->reserve(nworkers);
+
+  {
+    ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
+    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    ShenandoahConcurrentMarkingTask task(this, &terminator);
+    workers->run_task(&task);
+  }
+
+  assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
+}
+
+void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+  uint nworkers = _heap->workers()->active_workers();
+
+  // Finally mark everything else we've got in our queues during the previous steps.
+  // It does two different things for concurrent vs. mark-compact GC:
+  // - For concurrent GC, it starts with empty task queues, drains the remaining
+  //   SATB buffers, and then completes the marking closure.
+  // - For mark-compact GC, it starts out with the task queues seeded by initial
+  //   root scan, and completes the closure, thus marking through all live objects
+  // The implementation is the same, so it's shared here.
+  {
+    ShenandoahGCPhase phase(full_gc ?
+                            ShenandoahPhaseTimings::full_gc_mark_finish_queues :
+                            ShenandoahPhaseTimings::finish_queues);
+    task_queues()->reserve(nworkers);
+
+    shenandoah_assert_rp_isalive_not_installed();
+    ShenandoahIsAliveSelector is_alive;
+    ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
+
+    ShenandoahTerminationTracker termination_tracker(full_gc ?
+                                                     ShenandoahPhaseTimings::full_gc_mark_termination :
+                                                     ShenandoahPhaseTimings::termination);
+
+    StrongRootsScope scope(nworkers);
+    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
+    _heap->workers()->run_task(&task);
+  }
+
+  assert(task_queues()->is_empty(), "Should be empty");
+
+  // When we're done marking everything, we process weak references.
+  if (_heap->process_references()) {
+    weak_refs_work(full_gc);
+  }
+
+  // And finally finish class unloading
+  if (_heap->unload_classes()) {
+    _heap->unload_classes_and_cleanup_tables(full_gc);
+  }
+
+  assert(task_queues()->is_empty(), "Should be empty");
+  TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
+  TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
+
+  // Resize Metaspace
+  MetaspaceGC::compute_new_size();
+}
+
+// Weak Reference Closures
+class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
+  uint _worker_id;
+  ShenandoahTaskTerminator* _terminator;
+  bool _reset_terminator;
+
+public:
+  ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
+    _worker_id(worker_id),
+    _terminator(t),
+    _reset_terminator(reset_terminator) {
+  }
+
+  void do_void() {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+    ShenandoahConcurrentMark* scm = sh->concurrent_mark();
+    assert(sh->process_references(), "why else would we be here?");
+    ReferenceProcessor* rp = sh->ref_processor();
+
+    shenandoah_assert_rp_isalive_installed();
+
+    scm->mark_loop(_worker_id, _terminator, rp,
+                   false,   // not cancellable
+                   false);  // do not do strdedup
+
+    if (_reset_terminator) {
+      _terminator->reset_for_reuse();
+    }
+  }
+};
+
+class ShenandoahCMKeepAliveClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  ShenandoahHeap* _heap;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
+    _queue(q),
+    _heap(ShenandoahHeap::heap()),
+    _mark_context(_heap->marking_context()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  ShenandoahHeap* _heap;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
+    _queue(q),
+    _heap(ShenandoahHeap::heap()),
+    _mark_context(_heap->marking_context()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahWeakUpdateClosure : public OopClosure {
+private:
+  ShenandoahHeap* const _heap;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    oop o = _heap->maybe_update_with_forwarded(p);
+    shenandoah_assert_marked_except(p, o, o == NULL);
+  }
+
+public:
+  ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      shenandoah_assert_not_forwarded(p, obj);
+    }
+  }
+
+public:
+  ShenandoahWeakAssertNotForwardedClosure() {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahRefProcTaskProxy : public AbstractGangTask {
+private:
+  AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
+  ShenandoahTaskTerminator* _terminator;
+
+public:
+  ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
+                             ShenandoahTaskTerminator* t) :
+    AbstractGangTask("Process reference objects in parallel"),
+    _proc_task(proc_task),
+    _terminator(t) {
+  }
+
+  void work(uint worker_id) {
+    ResourceMark rm;
+    HandleMark hm;
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
+    if (heap->has_forwarded_objects()) {
+      ShenandoahForwardedIsAliveClosure is_alive;
+      ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
+      _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
+    } else {
+      ShenandoahIsAliveClosure is_alive;
+      ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
+      _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
+    }
+  }
+};
+
+class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
+private:
+  WorkGang* _workers;
+
+public:
+  ShenandoahRefProcTaskExecutor(WorkGang* workers) :
+    _workers(workers) {
+  }
+
+  // Executes a task using worker threads.
+  void execute(ProcessTask& task, uint ergo_workers) {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahConcurrentMark* cm = heap->concurrent_mark();
+    ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
+                                          ergo_workers,
+                                          /* do_check = */ false);
+    uint nworkers = _workers->active_workers();
+    cm->task_queues()->reserve(nworkers);
+    ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
+    ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
+    _workers->run_task(&proc_task_proxy);
+  }
+};
+
+void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
+  assert(_heap->process_references(), "sanity");
+
+  ShenandoahPhaseTimings::Phase phase_root =
+          full_gc ?
+          ShenandoahPhaseTimings::full_gc_weakrefs :
+          ShenandoahPhaseTimings::weakrefs;
+
+  ShenandoahGCPhase phase(phase_root);
+
+  ReferenceProcessor* rp = _heap->ref_processor();
+
+  // NOTE: We cannot shortcut on has_discovered_references() here, because
+  // we will miss marking JNI Weak refs then, see implementation in
+  // ReferenceProcessor::process_discovered_references.
+  weak_refs_work_doit(full_gc);
+
+  rp->verify_no_references_recorded();
+  assert(!rp->discovery_enabled(), "Post condition");
+
+}
+
+void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
+  ReferenceProcessor* rp = _heap->ref_processor();
+
+  ShenandoahPhaseTimings::Phase phase_process =
+          full_gc ?
+          ShenandoahPhaseTimings::full_gc_weakrefs_process :
+          ShenandoahPhaseTimings::weakrefs_process;
+
+  ShenandoahPhaseTimings::Phase phase_process_termination =
+          full_gc ?
+          ShenandoahPhaseTimings::full_gc_weakrefs_termination :
+          ShenandoahPhaseTimings::weakrefs_termination;
+
+  shenandoah_assert_rp_isalive_not_installed();
+  ShenandoahIsAliveSelector is_alive;
+  ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
+
+  WorkGang* workers = _heap->workers();
+  uint nworkers = workers->active_workers();
+
+  rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
+  rp->set_active_mt_degree(nworkers);
+
+  assert(task_queues()->is_empty(), "Should be empty");
+
+  // complete_gc and keep_alive closures instantiated here are only needed for
+  // single-threaded path in RP. They share the queue 0 for tracking work, which
+  // simplifies implementation. Since RP may decide to call complete_gc several
+  // times, we need to be able to reuse the terminator.
+  uint serial_worker_id = 0;
+  ShenandoahTaskTerminator terminator(1, task_queues());
+  ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
+
+  ShenandoahRefProcTaskExecutor executor(workers);
+
+  ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
+
+  {
+    ShenandoahGCPhase phase(phase_process);
+    ShenandoahTerminationTracker phase_term(phase_process_termination);
+
+    // Process leftover weak oops: update them, if needed (using parallel version),
+    // or assert they do not need updating (using serial version) otherwise.
+    // Weak processor API requires us to visit the oops, even if we are not doing
+    // anything to them.
+    if (_heap->has_forwarded_objects()) {
+      ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
+      rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
+                                        &complete_gc, &executor,
+                                        &pt);
+
+      ShenandoahWeakUpdateClosure cl;
+      WeakProcessor::weak_oops_do(workers, is_alive.is_alive_closure(), &cl, 1);
+    } else {
+      ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
+      rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
+                                        &complete_gc, &executor,
+                                        &pt);
+
+      ShenandoahWeakAssertNotForwardedClosure cl;
+      WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
+    }
+
+    pt.print_all_references();
+
+    assert(task_queues()->is_empty(), "Should be empty");
+  }
+}
+
+class ShenandoahCancelledGCYieldClosure : public YieldClosure {
+private:
+  ShenandoahHeap* const _heap;
+public:
+  ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
+  virtual bool should_return() { return _heap->cancelled_gc(); }
+};
+
+class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
+public:
+  void do_void() {
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+    ShenandoahConcurrentMark* scm = sh->concurrent_mark();
+    assert(sh->process_references(), "why else would we be here?");
+    ShenandoahTaskTerminator terminator(1, scm->task_queues());
+
+    ReferenceProcessor* rp = sh->ref_processor();
+    shenandoah_assert_rp_isalive_installed();
+
+    scm->mark_loop(0, &terminator, rp,
+                   false, // not cancellable
+                   false); // do not do strdedup
+  }
+};
+
+class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  ShenandoahHeap* _heap;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
+    _queue(q),
+    _heap(ShenandoahHeap::heap()),
+    _mark_context(_heap->marking_context()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahPrecleanTask : public AbstractGangTask {
+private:
+  ReferenceProcessor* _rp;
+
+public:
+  ShenandoahPrecleanTask(ReferenceProcessor* rp) :
+          AbstractGangTask("Precleaning task"),
+          _rp(rp) {}
+
+  void work(uint worker_id) {
+    assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+
+    ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
+
+    ShenandoahCancelledGCYieldClosure yield;
+    ShenandoahPrecleanCompleteGCClosure complete_gc;
+
+    if (sh->has_forwarded_objects()) {
+      ShenandoahForwardedIsAliveClosure is_alive;
+      ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
+      ResourceMark rm;
+      _rp->preclean_discovered_references(&is_alive, &keep_alive,
+                                          &complete_gc, &yield,
+                                          NULL);
+    } else {
+      ShenandoahIsAliveClosure is_alive;
+      ShenandoahCMKeepAliveClosure keep_alive(q);
+      ResourceMark rm;
+      _rp->preclean_discovered_references(&is_alive, &keep_alive,
+                                          &complete_gc, &yield,
+                                          NULL);
+    }
+  }
+};
+
+void ShenandoahConcurrentMark::preclean_weak_refs() {
+  // Pre-cleaning weak references before diving into STW makes sense at the
+  // end of concurrent mark. This will filter out the references which referents
+  // are alive. Note that ReferenceProcessor already filters out these on reference
+  // discovery, and the bulk of work is done here. This phase processes leftovers
+  // that missed the initial filtering, i.e. when referent was marked alive after
+  // reference was discovered by RP.
+
+  assert(_heap->process_references(), "sanity");
+
+  // Shortcut if no references were discovered to avoid winding up threads.
+  ReferenceProcessor* rp = _heap->ref_processor();
+  if (!rp->has_discovered_references()) {
+    return;
+  }
+
+  assert(task_queues()->is_empty(), "Should be empty");
+
+  ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
+
+  shenandoah_assert_rp_isalive_not_installed();
+  ShenandoahIsAliveSelector is_alive;
+  ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
+
+  // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
+  // queues and other goodies. When upstream ReferenceProcessor starts supporting
+  // parallel precleans, we can extend this to more threads.
+  WorkGang* workers = _heap->workers();
+  uint nworkers = workers->active_workers();
+  assert(nworkers == 1, "This code uses only a single worker");
+  task_queues()->reserve(nworkers);
+
+  ShenandoahPrecleanTask task(rp);
+  workers->run_task(&task);
+
+  assert(task_queues()->is_empty(), "Should be empty");
+}
+
+void ShenandoahConcurrentMark::cancel() {
+  // Clean up marking stacks.
+  ShenandoahObjToScanQueueSet* queues = task_queues();
+  queues->clear();
+
+  // Cancel SATB buffers.
+  ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
+}
+
+ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
+  assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
+  return _task_queues->queue(worker_id);
+}
+
+template <bool CANCELLABLE>
+void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
+                                                 bool strdedup) {
+  ShenandoahObjToScanQueue* q = get_queue(w);
+
+  jushort* ld = _heap->get_liveness_cache(w);
+
+  // TODO: We can clean up this if we figure out how to do templated oop closures that
+  // play nice with specialized_oop_iterators.
+  if (_heap->unload_classes()) {
+    if (_heap->has_forwarded_objects()) {
+      if (strdedup) {
+        ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
+      } else {
+        ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
+      }
+    } else {
+      if (strdedup) {
+        ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
+      } else {
+        ShenandoahMarkRefsMetadataClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
+      }
+    }
+  } else {
+    if (_heap->has_forwarded_objects()) {
+      if (strdedup) {
+        ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
+      } else {
+        ShenandoahMarkUpdateRefsClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
+      }
+    } else {
+      if (strdedup) {
+        ShenandoahMarkRefsDedupClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
+      } else {
+        ShenandoahMarkRefsClosure cl(q, rp);
+        mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
+      }
+    }
+  }
+
+  _heap->flush_liveness_cache(w);
+}
+
+template <class T, bool CANCELLABLE>
+void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
+  uintx stride = ShenandoahMarkLoopStride;
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ShenandoahObjToScanQueueSet* queues = task_queues();
+  ShenandoahObjToScanQueue* q;
+  ShenandoahMarkTask t;
+
+  /*
+   * Process outstanding queues, if any.
+   *
+   * There can be more queues than workers. To deal with the imbalance, we claim
+   * extra queues first. Since marking can push new tasks into the queue associated
+   * with this worker id, we come back to process this queue in the normal loop.
+   */
+  assert(queues->get_reserved() == heap->workers()->active_workers(),
+         "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
+
+  q = queues->claim_next();
+  while (q != NULL) {
+    if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
+      ShenandoahCancelledTerminatorTerminator tt;
+      ShenandoahSuspendibleThreadSetLeaver stsl(ShenandoahSuspendibleWorkers);
+      while (!terminator->offer_termination(&tt));
+      return;
+    }
+
+    for (uint i = 0; i < stride; i++) {
+      if (q->pop(t)) {
+        do_task<T>(q, cl, live_data, &t);
+      } else {
+        assert(q->is_empty(), "Must be empty");
+        q = queues->claim_next();
+        break;
+      }
+    }
+  }
+  q = get_queue(worker_id);
+
+  ShenandoahSATBBufferClosure drain_satb(q);
+  SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
+
+  /*
+   * Normal marking loop:
+   */
+  while (true) {
+    if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
+      ShenandoahCancelledTerminatorTerminator tt;
+      ShenandoahSuspendibleThreadSetLeaver stsl(ShenandoahSuspendibleWorkers);
+      while (!terminator->offer_termination(&tt));
+      return;
+    }
+
+    while (satb_mq_set.completed_buffers_num() > 0) {
+      satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
+    }
+
+    uint work = 0;
+    for (uint i = 0; i < stride; i++) {
+      if (q->pop(t) ||
+          queues->steal(worker_id, t)) {
+        do_task<T>(q, cl, live_data, &t);
+        work++;
+      } else {
+        break;
+      }
+    }
+
+    if (work == 0) {
+      // No work encountered in current stride, try to terminate.
+      // Need to leave the STS here otherwise it might block safepoints.
+      ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
+      ShenandoahTerminationTimingsTracker term_tracker(worker_id);
+      if (terminator->offer_termination()) return;
+    }
+  }
+}
+
+bool ShenandoahConcurrentMark::claim_codecache() {
+  assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
+  return _claimed_codecache.try_set();
+}
+
+void ShenandoahConcurrentMark::clear_claim_codecache() {
+  assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
+  _claimed_codecache.unset();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
+
+#include "gc/shared/taskqueue.hpp"
+#include "gc/shenandoah/shenandoahOopClosures.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+
+class ShenandoahStrDedupQueue;
+
+class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
+  friend class ShenandoahTraversalGC;
+private:
+  ShenandoahHeap* _heap;
+  ShenandoahObjToScanQueueSet* _task_queues;
+
+public:
+  void initialize(uint workers);
+  void cancel();
+
+// ---------- Marking loop and tasks
+//
+private:
+  template <class T>
+  inline void do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task);
+
+  template <class T>
+  inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array);
+
+  template <class T>
+  inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow);
+
+  inline void count_liveness(jushort* live_data, oop obj);
+
+  template <class T, bool CANCELLABLE>
+  void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *t);
+
+  template <bool CANCELLABLE>
+  void mark_loop_prework(uint worker_id, ShenandoahTaskTerminator *terminator, ReferenceProcessor *rp, bool strdedup);
+
+public:
+  void mark_loop(uint worker_id, ShenandoahTaskTerminator* terminator, ReferenceProcessor *rp,
+                 bool cancellable, bool strdedup) {
+    if (cancellable) {
+      mark_loop_prework<true>(worker_id, terminator, rp, strdedup);
+    } else {
+      mark_loop_prework<false>(worker_id, terminator, rp, strdedup);
+    }
+  }
+
+  template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
+  static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context);
+
+  void mark_from_roots();
+  void finish_mark_from_roots(bool full_gc);
+
+  void mark_roots(ShenandoahPhaseTimings::Phase root_phase);
+  void update_roots(ShenandoahPhaseTimings::Phase root_phase);
+
+// ---------- Weak references
+//
+private:
+  void weak_refs_work(bool full_gc);
+  void weak_refs_work_doit(bool full_gc);
+
+public:
+  void preclean_weak_refs();
+
+// ---------- Concurrent code cache
+//
+private:
+  ShenandoahSharedFlag _claimed_codecache;
+
+public:
+  void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp);
+  bool claim_codecache();
+  void clear_claim_codecache();
+
+// ---------- Helpers
+// Used from closures, need to be public
+//
+public:
+  ShenandoahObjToScanQueue* get_queue(uint worker_id);
+  ShenandoahObjToScanQueueSet* task_queues() { return _task_queues; }
+
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahStringDedup.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
+
+template <class T>
+void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task) {
+  oop obj = task->obj();
+
+  shenandoah_assert_not_forwarded_except(NULL, obj, _heap->is_concurrent_traversal_in_progress() && _heap->cancelled_gc());
+  shenandoah_assert_marked(NULL, obj);
+  shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc());
+
+  if (task->is_not_chunked()) {
+    if (obj->is_instance()) {
+      // Case 1: Normal oop, process as usual.
+      obj->oop_iterate(cl);
+    } else if (obj->is_objArray()) {
+      // Case 2: Object array instance and no chunk is set. Must be the first
+      // time we visit it, start the chunked processing.
+      do_chunked_array_start<T>(q, cl, obj);
+    } else {
+      // Case 3: Primitive array. Do nothing, no oops there. We use the same
+      // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
+      // We skip iterating over the klass pointer since we know that
+      // Universe::TypeArrayKlass never moves.
+      assert (obj->is_typeArray(), "should be type array");
+    }
+    // Count liveness the last: push the outstanding work to the queues first
+    count_liveness(live_data, obj);
+  } else {
+    // Case 4: Array chunk, has sensible chunk id. Process it.
+    do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow());
+  }
+}
+
+inline void ShenandoahConcurrentMark::count_liveness(jushort* live_data, oop obj) {
+  size_t region_idx = _heap->heap_region_index_containing(obj);
+  ShenandoahHeapRegion* region = _heap->get_region(region_idx);
+  size_t size = obj->size() + ShenandoahBrooksPointer::word_size();
+
+  if (!region->is_humongous_start()) {
+    assert(!region->is_humongous(), "Cannot have continuations here");
+    size_t max = (1 << (sizeof(jushort) * 8)) - 1;
+    if (size >= max) {
+      // too big, add to region data directly
+      region->increase_live_data_gc_words(size);
+    } else {
+      jushort cur = live_data[region_idx];
+      size_t new_val = cur + size;
+      if (new_val >= max) {
+        // overflow, flush to region data
+        region->increase_live_data_gc_words(new_val);
+        live_data[region_idx] = 0;
+      } else {
+        // still good, remember in locals
+        live_data[region_idx] = (jushort) new_val;
+      }
+    }
+  } else {
+    shenandoah_assert_in_correct_region(NULL, obj);
+    size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
+
+    for (size_t i = region_idx; i < region_idx + num_regions; i++) {
+      ShenandoahHeapRegion* chain_reg = _heap->get_region(i);
+      assert(chain_reg->is_humongous(), "Expecting a humongous region");
+      chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
+    }
+  }
+}
+
+template <class T>
+inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) {
+  assert(obj->is_objArray(), "expect object array");
+  objArrayOop array = objArrayOop(obj);
+  int len = array->length();
+
+  if (len <= (int) ObjArrayMarkingStride*2) {
+    // A few slices only, process directly
+    array->oop_iterate_range(cl, 0, len);
+  } else {
+    int bits = log2_long((size_t) len);
+    // Compensate for non-power-of-two arrays, cover the array in excess:
+    if (len != (1 << bits)) bits++;
+
+    // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
+    // boundaries against array->length(), touching the array header on every chunk.
+    //
+    // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
+    // If the array is not divided in chunk sizes, then there would be an irregular tail,
+    // which we will process separately.
+
+    int last_idx = 0;
+
+    int chunk = 1;
+    int pow = bits;
+
+    // Handle overflow
+    if (pow >= 31) {
+      assert (pow == 31, "sanity");
+      pow--;
+      chunk = 2;
+      last_idx = (1 << pow);
+      bool pushed = q->push(ShenandoahMarkTask(array, 1, pow));
+      assert(pushed, "overflow queue should always succeed pushing");
+    }
+
+    // Split out tasks, as suggested in ObjArrayChunkedTask docs. Record the last
+    // successful right boundary to figure out the irregular tail.
+    while ((1 << pow) > (int)ObjArrayMarkingStride &&
+           (chunk*2 < ShenandoahMarkTask::chunk_size())) {
+      pow--;
+      int left_chunk = chunk*2 - 1;
+      int right_chunk = chunk*2;
+      int left_chunk_end = left_chunk * (1 << pow);
+      if (left_chunk_end < len) {
+        bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow));
+        assert(pushed, "overflow queue should always succeed pushing");
+        chunk = right_chunk;
+        last_idx = left_chunk_end;
+      } else {
+        chunk = left_chunk;
+      }
+    }
+
+    // Process the irregular tail, if present
+    int from = last_idx;
+    if (from < len) {
+      array->oop_iterate_range(cl, from, len);
+    }
+  }
+}
+
+template <class T>
+inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) {
+  assert(obj->is_objArray(), "expect object array");
+  objArrayOop array = objArrayOop(obj);
+
+  assert (ObjArrayMarkingStride > 0, "sanity");
+
+  // Split out tasks, as suggested in ObjArrayChunkedTask docs. Avoid pushing tasks that
+  // are known to start beyond the array.
+  while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
+    pow--;
+    chunk *= 2;
+    bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow));
+    assert(pushed, "overflow queue should always succeed pushing");
+  }
+
+  int chunk_size = 1 << pow;
+
+  int from = (chunk - 1) * chunk_size;
+  int to = chunk * chunk_size;
+
+#ifdef ASSERT
+  int len = array->length();
+  assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
+  assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
+#endif
+
+  array->oop_iterate_range(cl, from, to);
+}
+
+class ShenandoahSATBBufferClosure : public SATBBufferClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  ShenandoahHeap* _heap;
+  ShenandoahMarkingContext* const _mark_context;
+public:
+  ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) :
+    _queue(q),
+    _heap(ShenandoahHeap::heap()),
+    _mark_context(_heap->marking_context())
+  {
+  }
+
+  void do_buffer(void **buffer, size_t size) {
+    if (_heap->has_forwarded_objects()) {
+      if (ShenandoahStringDedup::is_enabled()) {
+        do_buffer_impl<RESOLVE, ENQUEUE_DEDUP>(buffer, size);
+      } else {
+        do_buffer_impl<RESOLVE, NO_DEDUP>(buffer, size);
+      }
+    } else {
+      if (ShenandoahStringDedup::is_enabled()) {
+        do_buffer_impl<NONE, ENQUEUE_DEDUP>(buffer, size);
+      } else {
+        do_buffer_impl<NONE, NO_DEDUP>(buffer, size);
+      }
+    }
+  }
+
+  template<UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
+  void do_buffer_impl(void **buffer, size_t size) {
+    for (size_t i = 0; i < size; ++i) {
+      oop *p = (oop *) &buffer[i];
+      ShenandoahConcurrentMark::mark_through_ref<oop, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context);
+    }
+  }
+};
+
+template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
+inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    switch (UPDATE_REFS) {
+    case NONE:
+      break;
+    case RESOLVE:
+      obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      break;
+    case SIMPLE:
+      // We piggy-back reference updating to the marking tasks.
+      obj = heap->update_with_forwarded_not_null(p, obj);
+      break;
+    case CONCURRENT:
+      obj = heap->maybe_update_with_forwarded_not_null(p, obj);
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+
+    // Note: Only when concurrently updating references can obj become NULL here.
+    // It happens when a mutator thread beats us by writing another value. In that
+    // case we don't need to do anything else.
+    if (UPDATE_REFS != CONCURRENT || !CompressedOops::is_null(obj)) {
+      shenandoah_assert_not_forwarded(p, obj);
+      shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
+
+      if (mark_context->mark(obj)) {
+        bool pushed = q->push(ShenandoahMarkTask(obj));
+        assert(pushed, "overflow queue should always succeed pushing");
+
+        if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) {
+          assert(ShenandoahStringDedup::is_enabled(), "Must be enabled");
+          ShenandoahStringDedup::enqueue_candidate(obj);
+        }
+      }
+
+      shenandoah_assert_marked(p, obj);
+    }
+  }
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
+#include "gc/shenandoah/shenandoahControlThread.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVMOperations.hpp"
+#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
+#include "memory/iterator.hpp"
+#include "memory/universe.hpp"
+
+ShenandoahControlThread::ShenandoahControlThread() :
+  ConcurrentGCThread(),
+  _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
+  _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
+  _periodic_task(this),
+  _requested_gc_cause(GCCause::_no_cause_specified),
+  _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
+  _allocs_seen(0) {
+
+  create_and_start();
+  _periodic_task.enroll();
+  _periodic_satb_flush_task.enroll();
+}
+
+ShenandoahControlThread::~ShenandoahControlThread() {
+  // This is here so that super is called.
+}
+
+void ShenandoahPeriodicTask::task() {
+  _thread->handle_force_counters_update();
+  _thread->handle_counters_update();
+}
+
+void ShenandoahPeriodicSATBFlushTask::task() {
+  ShenandoahHeap::heap()->force_satb_flush_all_threads();
+}
+
+void ShenandoahControlThread::run_service() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  int sleep = ShenandoahControlIntervalMin;
+
+  double last_shrink_time = os::elapsedTime();
+  double last_sleep_adjust_time = os::elapsedTime();
+
+  // Shrink period avoids constantly polling regions for shrinking.
+  // Having a period 10x lower than the delay would mean we hit the
+  // shrinking with lag of less than 1/10-th of true delay.
+  // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
+  double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
+
+  ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
+  ShenandoahHeuristics* heuristics = heap->heuristics();
+  while (!in_graceful_shutdown() && !should_terminate()) {
+    // Figure out if we have pending requests.
+    bool alloc_failure_pending = _alloc_failure_gc.is_set();
+    bool explicit_gc_requested = _gc_requested.is_set() &&  is_explicit_gc(_requested_gc_cause);
+    bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
+
+    // This control loop iteration have seen this much allocations.
+    size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen);
+
+    // Choose which GC mode to run in. The block below should select a single mode.
+    GCMode mode = none;
+    GCCause::Cause cause = GCCause::_last_gc_cause;
+    ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
+
+    if (alloc_failure_pending) {
+      // Allocation failure takes precedence: we have to deal with it first thing
+      log_info(gc)("Trigger: Handle Allocation Failure");
+
+      cause = GCCause::_allocation_failure;
+
+      // Consume the degen point, and seed it with default value
+      degen_point = _degen_point;
+      _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
+
+      if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
+        heuristics->record_allocation_failure_gc();
+        policy->record_alloc_failure_to_degenerated(degen_point);
+        mode = stw_degenerated;
+      } else {
+        heuristics->record_allocation_failure_gc();
+        policy->record_alloc_failure_to_full();
+        mode = stw_full;
+      }
+
+    } else if (explicit_gc_requested) {
+      cause = _requested_gc_cause;
+      log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
+
+      heuristics->record_requested_gc();
+
+      if (ExplicitGCInvokesConcurrent) {
+        policy->record_explicit_to_concurrent();
+        if (heuristics->can_do_traversal_gc()) {
+          mode = concurrent_traversal;
+        } else {
+          mode = concurrent_normal;
+        }
+        // Unload and clean up everything
+        heap->set_process_references(heuristics->can_process_references());
+        heap->set_unload_classes(heuristics->can_unload_classes());
+      } else {
+        policy->record_explicit_to_full();
+        mode = stw_full;
+      }
+    } else if (implicit_gc_requested) {
+      cause = _requested_gc_cause;
+      log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
+
+      heuristics->record_requested_gc();
+
+      if (ShenandoahImplicitGCInvokesConcurrent) {
+        policy->record_implicit_to_concurrent();
+        if (heuristics->can_do_traversal_gc()) {
+          mode = concurrent_traversal;
+        } else {
+          mode = concurrent_normal;
+        }
+
+        // Unload and clean up everything
+        heap->set_process_references(heuristics->can_process_references());
+        heap->set_unload_classes(heuristics->can_unload_classes());
+      } else {
+        policy->record_implicit_to_full();
+        mode = stw_full;
+      }
+    } else {
+      // Potential normal cycle: ask heuristics if it wants to act
+      if (heuristics->should_start_traversal_gc()) {
+        mode = concurrent_traversal;
+        cause = GCCause::_shenandoah_traversal_gc;
+      } else if (heuristics->should_start_normal_gc()) {
+        mode = concurrent_normal;
+        cause = GCCause::_shenandoah_concurrent_gc;
+      }
+
+      // Ask policy if this cycle wants to process references or unload classes
+      heap->set_process_references(heuristics->should_process_references());
+      heap->set_unload_classes(heuristics->should_unload_classes());
+    }
+
+    // Blow all soft references on this cycle, if handling allocation failure,
+    // or we are requested to do so unconditionally.
+    if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) {
+      heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
+    }
+
+    bool gc_requested = (mode != none);
+    assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
+
+    if (gc_requested) {
+      heap->reset_bytes_allocated_since_gc_start();
+
+      // If GC was requested, we are sampling the counters even without actual triggers
+      // from allocation machinery. This captures GC phases more accurately.
+      set_forced_counters_update(true);
+
+      // If GC was requested, we better dump freeset data for performance debugging
+      {
+        ShenandoahHeapLocker locker(heap->lock());
+        heap->free_set()->log_status();
+      }
+    }
+
+    switch (mode) {
+      case none:
+        break;
+      case concurrent_traversal:
+        service_concurrent_traversal_cycle(cause);
+        break;
+      case concurrent_normal:
+        service_concurrent_normal_cycle(cause);
+        break;
+      case stw_degenerated:
+        service_stw_degenerated_cycle(cause, degen_point);
+        break;
+      case stw_full:
+        service_stw_full_cycle(cause);
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+
+    if (gc_requested) {
+      // If this was the requested GC cycle, notify waiters about it
+      if (explicit_gc_requested || implicit_gc_requested) {
+        notify_gc_waiters();
+      }
+
+      // If this was the allocation failure GC cycle, notify waiters about it
+      if (alloc_failure_pending) {
+        notify_alloc_failure_waiters();
+      }
+
+      // Report current free set state at the end of cycle, whether
+      // it is a normal completion, or the abort.
+      {
+        ShenandoahHeapLocker locker(heap->lock());
+        heap->free_set()->log_status();
+
+        // Notify Universe about new heap usage. This has implications for
+        // global soft refs policy, and we better report it every time heap
+        // usage goes down.
+        Universe::update_heap_info_at_gc();
+      }
+
+      // Disable forced counters update, and update counters one more time
+      // to capture the state at the end of GC session.
+      handle_force_counters_update();
+      set_forced_counters_update(false);
+
+      // Retract forceful part of soft refs policy
+      heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
+
+      // Clear metaspace oom flag, if current cycle unloaded classes
+      if (heap->unload_classes()) {
+        heuristics->clear_metaspace_oom();
+      }
+
+      // GC is over, we are at idle now
+      if (ShenandoahPacing) {
+        heap->pacer()->setup_for_idle();
+      }
+    } else {
+      // Allow allocators to know we have seen this much regions
+      if (ShenandoahPacing && (allocs_seen > 0)) {
+        heap->pacer()->report_alloc(allocs_seen);
+      }
+    }
+
+    double current = os::elapsedTime();
+
+    if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) {
+      // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything.
+      // Regular paths uncommit only occasionally.
+      double shrink_before = explicit_gc_requested ?
+                             current :
+                             current - (ShenandoahUncommitDelay / 1000.0);
+      service_uncommit(shrink_before);
+      last_shrink_time = current;
+    }
+
+    // Wait before performing the next action. If allocation happened during this wait,
+    // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
+    // back off exponentially.
+    if (_heap_changed.try_unset()) {
+      sleep = ShenandoahControlIntervalMin;
+    } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
+      sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
+      last_sleep_adjust_time = current;
+    }
+    os::naked_short_sleep(sleep);
+  }
+
+  // Wait for the actual stop(), can't leave run_service() earlier.
+  while (!should_terminate()) {
+    os::naked_short_sleep(ShenandoahControlIntervalMin);
+  }
+}
+
+void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
+  GCIdMark gc_id_mark;
+  ShenandoahGCSession session(cause);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+
+  // Reset for upcoming cycle
+  heap->entry_reset();
+
+  heap->vmop_entry_init_traversal();
+
+  if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
+
+  heap->entry_traversal();
+  if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
+
+  heap->vmop_entry_final_traversal();
+
+  heap->entry_cleanup();
+
+  heap->heuristics()->record_success_concurrent();
+  heap->shenandoah_policy()->record_success_concurrent();
+}
+
+void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
+  // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
+  // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
+  // If second allocation failure happens during Degenerated GC cycle (for example, when GC
+  // tries to evac something and no memory is available), cycle degrades to Full GC.
+  //
+  // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
+  // heuristics says there are no regions to compact, and all the collection comes from immediately
+  // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
+  // mark from the next cycle.
+  //
+  // ................................................................................................
+  //
+  //                                    (immediate garbage shortcut)                Concurrent GC
+  //                             /-------------------------------------------\
+  //                             |                       (coalesced UR)      v
+  //                             |                  /----------------------->o
+  //                             |                  |                        |
+  //                             |                  |                        v
+  // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
+  //                   |                    |                 |              ^
+  //                   | (af)               | (af)            | (af)         |
+  // ..................|....................|.................|..............|.......................
+  //                   |                    |                 |              |
+  //                   |                    |                 |              |      Degenerated GC
+  //                   v                    v                 v              |
+  //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
+  //                   |                    |                 |              ^
+  //                   | (af)               | (af)            | (af)         |
+  // ..................|....................|.................|..............|.......................
+  //                   |                    |                 |              |
+  //                   |                    v                 |              |      Full GC
+  //                   \------------------->o<----------------/              |
+  //                                        |                                |
+  //                                        v                                |
+  //                                      Full GC  --------------------------/
+  //
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
+
+  GCIdMark gc_id_mark;
+  ShenandoahGCSession session(cause);
+
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+
+  // Reset for upcoming marking
+  heap->entry_reset();
+
+  // Start initial mark under STW
+  heap->vmop_entry_init_mark();
+
+  // Continue concurrent mark
+  heap->entry_mark();
+  if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
+
+  // If not cancelled, can try to concurrently pre-clean
+  heap->entry_preclean();
+
+  // Complete marking under STW, and start evacuation
+  heap->vmop_entry_final_mark();
+
+  // Continue the cycle with evacuation and optional update-refs.
+  // This may be skipped if there is nothing to evacuate.
+  // If so, evac_in_progress would be unset by collection set preparation code.
+  if (heap->is_evacuation_in_progress()) {
+    // Final mark had reclaimed some immediate garbage, kick cleanup to reclaim the space
+    // for the rest of the cycle, and report current state of free set.
+    heap->entry_cleanup();
+
+    {
+      ShenandoahHeapLocker locker(heap->lock());
+      heap->free_set()->log_status();
+    }
+
+    // Concurrently evacuate
+    heap->entry_evac();
+    if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
+
+    // Perform update-refs phase, if required. This phase can be skipped if heuristics
+    // decides to piggy-back the update-refs on the next marking cycle. On either path,
+    // we need to turn off evacuation: either in init-update-refs, or in final-evac.
+    if (heap->heuristics()->should_start_update_refs()) {
+      heap->vmop_entry_init_updaterefs();
+      heap->entry_updaterefs();
+      if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
+
+      heap->vmop_entry_final_updaterefs();
+    } else {
+      heap->vmop_entry_final_evac();
+    }
+  }
+
+  // Reclaim space after cycle
+  heap->entry_cleanup();
+
+  // Cycle is complete
+  heap->heuristics()->record_success_concurrent();
+  heap->shenandoah_policy()->record_success_concurrent();
+}
+
+bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  if (heap->cancelled_gc()) {
+    assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
+    if (!in_graceful_shutdown()) {
+      assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
+              "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
+      _degen_point = point;
+    }
+    return true;
+  }
+  return false;
+}
+
+void ShenandoahControlThread::stop_service() {
+  // Nothing to do here.
+}
+
+void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
+  GCIdMark gc_id_mark;
+  ShenandoahGCSession session(cause);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  heap->vmop_entry_full(cause);
+
+  heap->heuristics()->record_success_full();
+  heap->shenandoah_policy()->record_success_full();
+}
+
+void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
+  assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
+
+  GCIdMark gc_id_mark;
+  ShenandoahGCSession session(cause);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  heap->vmop_degenerated(point);
+
+  heap->heuristics()->record_success_degenerated();
+  heap->shenandoah_policy()->record_success_degenerated();
+}
+
+void ShenandoahControlThread::service_uncommit(double shrink_before) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  // Scan through the heap and determine if there is work to do. This avoids taking
+  // heap lock if there is no work available, avoids spamming logs with superfluous
+  // logging messages, and minimises the amount of work while locks are taken.
+
+  bool has_work = false;
+  for (size_t i = 0; i < heap->num_regions(); i++) {
+    ShenandoahHeapRegion *r = heap->get_region(i);
+    if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
+      has_work = true;
+      break;
+    }
+  }
+
+  if (has_work) {
+    heap->entry_uncommit(shrink_before);
+  }
+}
+
+bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
+  return GCCause::is_user_requested_gc(cause) ||
+         GCCause::is_serviceability_requested_gc(cause);
+}
+
+void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
+  assert(GCCause::is_user_requested_gc(cause) ||
+         GCCause::is_serviceability_requested_gc(cause) ||
+         cause == GCCause::_metadata_GC_clear_soft_refs ||
+         cause == GCCause::_full_gc_alot ||
+         cause == GCCause::_wb_full_gc ||
+         cause == GCCause::_scavenge_alot,
+         "only requested GCs here");
+
+  if (is_explicit_gc(cause)) {
+    if (!DisableExplicitGC) {
+      handle_requested_gc(cause);
+    }
+  } else {
+    handle_requested_gc(cause);
+  }
+}
+
+void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
+  _requested_gc_cause = cause;
+  _gc_requested.set();
+  MonitorLockerEx ml(&_gc_waiters_lock);
+  while (_gc_requested.is_set()) {
+    ml.wait();
+  }
+}
+
+void ShenandoahControlThread::handle_alloc_failure(size_t words) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  assert(current()->is_Java_thread(), "expect Java thread here");
+
+  if (try_set_alloc_failure_gc()) {
+    // Only report the first allocation failure
+    log_info(gc)("Failed to allocate " SIZE_FORMAT "%s",
+                 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
+
+    // Now that alloc failure GC is scheduled, we can abort everything else
+    heap->cancel_gc(GCCause::_allocation_failure);
+  }
+
+  MonitorLockerEx ml(&_alloc_failure_waiters_lock);
+  while (is_alloc_failure_gc()) {
+    ml.wait();
+  }
+}
+
+void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  if (try_set_alloc_failure_gc()) {
+    // Only report the first allocation failure
+    log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
+                 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
+  }
+
+  // Forcefully report allocation failure
+  heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
+}
+
+void ShenandoahControlThread::notify_alloc_failure_waiters() {
+  _alloc_failure_gc.unset();
+  MonitorLockerEx ml(&_alloc_failure_waiters_lock);
+  ml.notify_all();
+}
+
+bool ShenandoahControlThread::try_set_alloc_failure_gc() {
+  return _alloc_failure_gc.try_set();
+}
+
+bool ShenandoahControlThread::is_alloc_failure_gc() {
+  return _alloc_failure_gc.is_set();
+}
+
+void ShenandoahControlThread::notify_gc_waiters() {
+  _gc_requested.unset();
+  MonitorLockerEx ml(&_gc_waiters_lock);
+  ml.notify_all();
+}
+
+void ShenandoahControlThread::handle_counters_update() {
+  if (_do_counters_update.is_set()) {
+    _do_counters_update.unset();
+    ShenandoahHeap::heap()->monitoring_support()->update_counters();
+  }
+}
+
+void ShenandoahControlThread::handle_force_counters_update() {
+  if (_force_counters_update.is_set()) {
+    _do_counters_update.unset(); // reset these too, we do update now!
+    ShenandoahHeap::heap()->monitoring_support()->update_counters();
+  }
+}
+
+void ShenandoahControlThread::notify_heap_changed() {
+  // This is called from allocation path, and thus should be fast.
+
+  // Update monitoring counters when we took a new region. This amortizes the
+  // update costs on slow path.
+  if (_do_counters_update.is_unset()) {
+    _do_counters_update.set();
+  }
+  // Notify that something had changed.
+  if (_heap_changed.is_unset()) {
+    _heap_changed.set();
+  }
+}
+
+void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
+  assert(ShenandoahPacing, "should only call when pacing is enabled");
+  Atomic::add(words, &_allocs_seen);
+}
+
+void ShenandoahControlThread::set_forced_counters_update(bool value) {
+  _force_counters_update.set_cond(value);
+}
+
+void ShenandoahControlThread::print() const {
+  print_on(tty);
+}
+
+void ShenandoahControlThread::print_on(outputStream* st) const {
+  st->print("Shenandoah Concurrent Thread");
+  Thread::print_on(st);
+  st->cr();
+}
+
+void ShenandoahControlThread::start() {
+  create_and_start();
+}
+
+void ShenandoahControlThread::prepare_for_graceful_shutdown() {
+  _graceful_shutdown.set();
+}
+
+bool ShenandoahControlThread::in_graceful_shutdown() {
+  return _graceful_shutdown.is_set();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSCHEDULERTHREAD_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSCHEDULERTHREAD_HPP
+
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/concurrentGCThread.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahSharedVariables.hpp"
+#include "runtime/task.hpp"
+#include "utilities/ostream.hpp"
+
+// Periodic task is useful for doing asynchronous things that do not require (heap) locks,
+// or synchronization with other parts of collector. These could run even when ShenandoahConcurrentThread
+// is busy driving the GC cycle.
+class ShenandoahPeriodicTask : public PeriodicTask {
+private:
+  ShenandoahControlThread* _thread;
+public:
+  ShenandoahPeriodicTask(ShenandoahControlThread* thread) :
+          PeriodicTask(100), _thread(thread) {}
+  virtual void task();
+};
+
+// Periodic task to flush SATB buffers periodically.
+class ShenandoahPeriodicSATBFlushTask : public PeriodicTask {
+public:
+  ShenandoahPeriodicSATBFlushTask() : PeriodicTask(ShenandoahSATBBufferFlushInterval) {}
+  virtual void task();
+};
+
+class ShenandoahControlThread: public ConcurrentGCThread {
+  friend class VMStructs;
+
+private:
+  typedef enum {
+    none,
+    concurrent_traversal,
+    concurrent_normal,
+    stw_degenerated,
+    stw_full,
+  } GCMode;
+
+  // While we could have a single lock for these, it may risk unblocking
+  // GC waiters when alloc failure GC cycle finishes. We want instead
+  // to make complete explicit cycle for for demanding customers.
+  Monitor _alloc_failure_waiters_lock;
+  Monitor _gc_waiters_lock;
+  ShenandoahPeriodicTask _periodic_task;
+  ShenandoahPeriodicSATBFlushTask _periodic_satb_flush_task;
+
+public:
+  void run_service();
+  void stop_service();
+
+private:
+  ShenandoahSharedFlag _gc_requested;
+  ShenandoahSharedFlag _alloc_failure_gc;
+  ShenandoahSharedFlag _graceful_shutdown;
+  ShenandoahSharedFlag _heap_changed;
+  ShenandoahSharedFlag _do_counters_update;
+  ShenandoahSharedFlag _force_counters_update;
+  GCCause::Cause       _requested_gc_cause;
+  ShenandoahHeap::ShenandoahDegenPoint _degen_point;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
+  volatile size_t _allocs_seen;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  bool check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point);
+  void service_concurrent_normal_cycle(GCCause::Cause cause);
+  void service_stw_full_cycle(GCCause::Cause cause);
+  void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point);
+  void service_concurrent_traversal_cycle(GCCause::Cause cause);
+  void service_uncommit(double shrink_before);
+
+  bool try_set_alloc_failure_gc();
+  void notify_alloc_failure_waiters();
+  bool is_alloc_failure_gc();
+
+  void notify_gc_waiters();
+
+  // Handle GC request.
+  // Blocks until GC is over.
+  void handle_requested_gc(GCCause::Cause cause);
+
+  bool is_explicit_gc(GCCause::Cause cause) const;
+public:
+  // Constructor
+  ShenandoahControlThread();
+  ~ShenandoahControlThread();
+
+  // Handle allocation failure from normal allocation.
+  // Blocks until memory is available.
+  void handle_alloc_failure(size_t words);
+
+  // Handle allocation failure from evacuation path.
+  // Optionally blocks while collector is handling the failure.
+  void handle_alloc_failure_evac(size_t words);
+
+  void request_gc(GCCause::Cause cause);
+
+  void handle_counters_update();
+  void handle_force_counters_update();
+  void set_forced_counters_update(bool value);
+
+  void notify_heap_changed();
+
+  void pacing_notify_alloc(size_t words);
+
+  void start();
+  void prepare_for_graceful_shutdown();
+  bool in_graceful_shutdown();
+
+  char* name() const { return (char*)"ShenandoahControlThread";}
+
+  // Printing
+  void print_on(outputStream* st) const;
+  void print() const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSCHEDULERTHREAD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.hpp"
+
+const jint ShenandoahEvacOOMHandler::OOM_MARKER_MASK = 0x80000000;
+
+ShenandoahEvacOOMHandler::ShenandoahEvacOOMHandler() :
+  _threads_in_evac(0) {
+}
+
+void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() {
+  while ((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) {
+    os::naked_short_sleep(1);
+  }
+  // At this point we are sure that no threads can evacuate anything. Raise
+  // the thread-local oom_during_evac flag to indicate that any attempt
+  // to evacuate should simply return the forwarding pointer instead (which is safe now).
+  ShenandoahThreadLocalData::set_oom_during_evac(Thread::current(), true);
+}
+
+void ShenandoahEvacOOMHandler::enter_evacuation() {
+  jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
+
+  assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
+  assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
+
+  if ((threads_in_evac & OOM_MARKER_MASK) != 0) {
+    wait_for_no_evac_threads();
+    return;
+  }
+
+  while (true) {
+    jint other = Atomic::cmpxchg(threads_in_evac + 1, &_threads_in_evac, threads_in_evac);
+    if (other == threads_in_evac) {
+      // Success: caller may safely enter evacuation
+      DEBUG_ONLY(ShenandoahThreadLocalData::set_evac_allowed(Thread::current(), true));
+      return;
+    } else {
+      // Failure:
+      //  - if offender has OOM_MARKER_MASK, then loop until no more threads in evac
+      //  - otherwise re-try CAS
+      if ((other & OOM_MARKER_MASK) != 0) {
+        wait_for_no_evac_threads();
+        return;
+      }
+      threads_in_evac = other;
+    }
+  }
+}
+
+void ShenandoahEvacOOMHandler::leave_evacuation() {
+  if (!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
+    assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity");
+    // NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive.
+    Atomic::dec(&_threads_in_evac);
+  } else {
+    // If we get here, the current thread has already gone through the
+    // OOM-during-evac protocol and has thus either never entered or successfully left
+    // the evacuation region. Simply flip its TL oom-during-evac flag back off.
+    ShenandoahThreadLocalData::set_oom_during_evac(Thread::current(), false);
+  }
+  DEBUG_ONLY(ShenandoahThreadLocalData::set_evac_allowed(Thread::current(), false));
+  assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must be turned off");
+}
+
+void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() {
+  assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
+  assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
+
+  jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
+  while (true) {
+    jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK,
+                                  &_threads_in_evac, threads_in_evac);
+    if (other == threads_in_evac) {
+      // Success: wait for other threads to get out of the protocol and return.
+      wait_for_no_evac_threads();
+      return;
+    } else {
+      // Failure: try again with updated new value.
+      threads_in_evac = other;
+    }
+  }
+}
+
+void ShenandoahEvacOOMHandler::clear() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
+  assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity");
+  OrderAccess::release_store_fence<jint>(&_threads_in_evac, 0);
+}
+
+ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() {
+  ShenandoahHeap::heap()->enter_evacuation();
+}
+
+ShenandoahEvacOOMScope::~ShenandoahEvacOOMScope() {
+  ShenandoahHeap::heap()->leave_evacuation();
+}
+
+ShenandoahEvacOOMScopeLeaver::ShenandoahEvacOOMScopeLeaver() {
+  ShenandoahHeap::heap()->leave_evacuation();
+}
+
+ShenandoahEvacOOMScopeLeaver::~ShenandoahEvacOOMScopeLeaver() {
+  ShenandoahHeap::heap()->enter_evacuation();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+/**
+ * Provides safe handling of out-of-memory situations during evacuation.
+ *
+ * When a Java thread encounters out-of-memory while evacuating an object in a
+ * write-barrier (i.e. it cannot copy the object to to-space), it does not necessarily
+ * follow we can return immediately from the WB (and store to from-space).
+ *
+ * In very basic case, on such failure we may wait until the the evacuation is over,
+ * and then resolve the forwarded copy, and to the store there. This is possible
+ * because other threads might still have space in their GCLABs, and successfully
+ * evacuate the object.
+ *
+ * But, there is a race due to non-atomic evac_in_progress transition. Consider
+ * thread A is stuck waiting for the evacuation to be over -- it cannot leave with
+ * from-space copy yet. Control thread drops evacuation_in_progress preparing for
+ * next STW phase that has to recover from OOME. Thread B misses that update, and
+ * successfully evacuates the object, does the write to to-copy. But, before
+ * Thread B is able to install the fwdptr, thread A discovers evac_in_progress is
+ * down, exits from here, reads the fwdptr, discovers old from-copy, and stores there.
+ * Thread B then wakes up and installs to-copy. This breaks to-space invariant, and
+ * silently corrupts the heap: we accepted two writes to separate copies of the object.
+ *
+ * The way it is solved here is to maintain a counter of threads inside the
+ * 'evacuation path'. The 'evacuation path' is the part of evacuation that does the actual
+ * allocation, copying and CASing of the copy object, and is protected by this
+ * OOM-during-evac-handler. The handler allows multiple threads to enter and exit
+ * evacuation path, but on OOME it requires all threads that experienced OOME to wait
+ * for current threads to leave, and blocks other threads from entering.
+ *
+ * Detailed state change:
+ *
+ * Upon entry of the evac-path, entering thread will attempt to increase the counter,
+ * using a CAS. Depending on the result of the CAS:
+ * - success: carry on with evac
+ * - failure:
+ *   - if offending value is a valid counter, then try again
+ *   - if offending value is OOM-during-evac special value: loop until
+ *     counter drops to 0, then exit with read-barrier
+ *
+ * Upon exit, exiting thread will decrease the counter using atomic dec.
+ *
+ * Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac
+ * special value into the counter. Depending on result:
+ *   - success: busy-loop until counter drops to zero, then exit with RB
+ *   - failure:
+ *     - offender is valid counter update: try again
+ *     - offender is OOM-during-evac: busy loop until counter drops to
+ *       zero, then exit with RB
+ */
+class ShenandoahEvacOOMHandler {
+private:
+  static const jint OOM_MARKER_MASK;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile jint));
+  volatile jint _threads_in_evac;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  void wait_for_no_evac_threads();
+
+public:
+  ShenandoahEvacOOMHandler();
+
+  /**
+   * Attempt to enter the protected evacuation path.
+   *
+   * When this returns true, it is safe to continue with normal evacuation.
+   * When this method returns false, evacuation must not be entered, and caller
+   * may safely continue with a read-barrier (if Java thread).
+   */
+  void enter_evacuation();
+
+  /**
+   * Leave evacuation path.
+   */
+  void leave_evacuation();
+
+  /**
+   * Signal out-of-memory during evacuation. It will prevent any other threads
+   * from entering the evacuation path, then wait until all threads have left the
+   * evacuation path, and then return. It is then safe to continue with a read-barrier.
+   */
+  void handle_out_of_memory_during_evacuation();
+
+  void clear();
+};
+
+class ShenandoahEvacOOMScope : public StackObj {
+public:
+  ShenandoahEvacOOMScope();
+  ~ShenandoahEvacOOMScope();
+};
+
+class ShenandoahEvacOOMScopeLeaver : public StackObj {
+public:
+  ShenandoahEvacOOMScopeLeaver();
+  ~ShenandoahEvacOOMScopeLeaver();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "logging/logStream.hpp"
+
+ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
+  _heap(heap),
+  _mutator_free_bitmap(max_regions, mtGC),
+  _collector_free_bitmap(max_regions, mtGC),
+  _max(max_regions)
+{
+  clear_internal();
+}
+
+void ShenandoahFreeSet::increase_used(size_t num_bytes) {
+  assert_heaplock_owned_by_current_thread();
+  _used += num_bytes;
+
+  assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
+         ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
+}
+
+bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
+  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
+          idx, _max, _mutator_leftmost, _mutator_rightmost);
+  return _mutator_free_bitmap.at(idx);
+}
+
+bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
+  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
+          idx, _max, _collector_leftmost, _collector_rightmost);
+  return _collector_free_bitmap.at(idx);
+}
+
+HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
+  // Scan the bitmap looking for a first fit.
+  //
+  // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
+  // we would find the region to allocate at right away.
+  //
+  // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
+  // go to the end. This makes application allocation faster, because we would clear lots
+  // of regions from the beginning most of the time.
+  //
+  // Free set maintains mutator and collector views, and normally they allocate in their views only,
+  // unless we special cases for stealing and mixed allocations.
+
+  switch (req.type()) {
+    case ShenandoahAllocRequest::_alloc_tlab:
+    case ShenandoahAllocRequest::_alloc_shared: {
+
+      // Try to allocate in the mutator view
+      for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
+        if (is_mutator_free(idx)) {
+          HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
+          if (result != NULL) {
+            return result;
+          }
+        }
+      }
+
+      // There is no recovery. Mutator does not touch collector view at all.
+      break;
+    }
+    case ShenandoahAllocRequest::_alloc_gclab:
+    case ShenandoahAllocRequest::_alloc_shared_gc: {
+      // size_t is unsigned, need to dodge underflow when _leftmost = 0
+
+      // Fast-path: try to allocate in the collector view first
+      for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
+        size_t idx = c - 1;
+        if (is_collector_free(idx)) {
+          HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
+          if (result != NULL) {
+            return result;
+          }
+        }
+      }
+
+      // No dice. Can we borrow space from mutator view?
+      if (!ShenandoahEvacReserveOverflow) {
+        return NULL;
+      }
+
+      // Try to steal the empty region from the mutator view
+      for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
+        size_t idx = c - 1;
+        if (is_mutator_free(idx)) {
+          ShenandoahHeapRegion* r = _heap->get_region(idx);
+          if (is_empty_or_trash(r)) {
+            flip_to_gc(r);
+            HeapWord *result = try_allocate_in(r, req, in_new_region);
+            if (result != NULL) {
+              return result;
+            }
+          }
+        }
+      }
+
+      // Try to mix the allocation into the mutator view:
+      if (ShenandoahAllowMixedAllocs) {
+        for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
+          size_t idx = c - 1;
+          if (is_mutator_free(idx)) {
+            HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
+            if (result != NULL) {
+              return result;
+            }
+          }
+        }
+      }
+      break;
+    }
+    default:
+      ShouldNotReachHere();
+  }
+
+  return NULL;
+}
+
+HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
+  assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number());
+
+  try_recycle_trashed(r);
+
+  in_new_region = r->is_empty();
+
+  HeapWord* result = NULL;
+  size_t size = req.size();
+
+  if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
+    size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
+    if (size > free) {
+      size = free;
+    }
+    if (size >= req.min_size()) {
+      result = r->allocate(size, req.type());
+      assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
+    }
+  } else {
+    result = r->allocate(size, req.type());
+  }
+
+  if (result != NULL) {
+    // Allocation successful, bump stats:
+    if (req.is_mutator_alloc()) {
+      increase_used(size * HeapWordSize);
+    }
+
+    // Record actual allocation size
+    req.set_actual_size(size);
+
+    if (req.is_gc_alloc() && _heap->is_concurrent_traversal_in_progress()) {
+      // Traversal needs to traverse through GC allocs. Adjust TAMS to the new top
+      // so that these allocations appear below TAMS, and thus get traversed.
+      // See top of shenandoahTraversal.cpp for an explanation.
+      _heap->marking_context()->capture_top_at_mark_start(r);
+      _heap->traversal_gc()->traversal_set()->add_region_check_for_duplicates(r);
+      OrderAccess::fence();
+    }
+  }
+
+  if (result == NULL || has_no_alloc_capacity(r)) {
+    // Region cannot afford this or future allocations. Retire it.
+    //
+    // While this seems a bit harsh, especially in the case when this large allocation does not
+    // fit, but the next small one would, we are risking to inflate scan times when lots of
+    // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
+    // TODO: Record first fully-empty region, and use that for large allocations
+
+    // Record the remainder as allocation waste
+    if (req.is_mutator_alloc()) {
+      size_t waste = r->free();
+      if (waste > 0) {
+        increase_used(waste);
+        _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
+      }
+    }
+
+    size_t num = r->region_number();
+    _collector_free_bitmap.clear_bit(num);
+    _mutator_free_bitmap.clear_bit(num);
+    // Touched the bounds? Need to update:
+    if (touches_bounds(num)) {
+      adjust_bounds();
+    }
+    assert_bounds();
+  }
+  return result;
+}
+
+bool ShenandoahFreeSet::touches_bounds(size_t num) const {
+  return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
+}
+
+void ShenandoahFreeSet::recompute_bounds() {
+  // Reset to the most pessimistic case:
+  _mutator_rightmost = _max - 1;
+  _mutator_leftmost = 0;
+  _collector_rightmost = _max - 1;
+  _collector_leftmost = 0;
+
+  // ...and adjust from there
+  adjust_bounds();
+}
+
+void ShenandoahFreeSet::adjust_bounds() {
+  // Rewind both mutator bounds until the next bit.
+  while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
+    _mutator_leftmost++;
+  }
+  while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
+    _mutator_rightmost--;
+  }
+  // Rewind both collector bounds until the next bit.
+  while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
+    _collector_leftmost++;
+  }
+  while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
+    _collector_rightmost--;
+  }
+}
+
+HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
+  assert_heaplock_owned_by_current_thread();
+
+  size_t words_size = req.size();
+  size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
+
+  // No regions left to satisfy allocation, bye.
+  if (num > mutator_count()) {
+    return NULL;
+  }
+
+  // Find the continuous interval of $num regions, starting from $beg and ending in $end,
+  // inclusive. Contiguous allocations are biased to the beginning.
+
+  size_t beg = _mutator_leftmost;
+  size_t end = beg;
+
+  while (true) {
+    if (end >= _max) {
+      // Hit the end, goodbye
+      return NULL;
+    }
+
+    // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
+    // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
+    if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) {
+      end++;
+      beg = end;
+      continue;
+    }
+
+    if ((end - beg + 1) == num) {
+      // found the match
+      break;
+    }
+
+    end++;
+  };
+
+  size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
+
+  // Initialize regions:
+  for (size_t i = beg; i <= end; i++) {
+    ShenandoahHeapRegion* r = _heap->get_region(i);
+    try_recycle_trashed(r);
+
+    assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous");
+    assert(r->is_empty(), "Should be empty");
+
+    if (i == beg) {
+      r->make_humongous_start();
+    } else {
+      r->make_humongous_cont();
+    }
+
+    // Trailing region may be non-full, record the remainder there
+    size_t used_words;
+    if ((i == end) && (remainder != 0)) {
+      used_words = remainder;
+    } else {
+      used_words = ShenandoahHeapRegion::region_size_words();
+    }
+
+    r->set_top(r->bottom() + used_words);
+    r->reset_alloc_metadata_to_shared();
+
+    _mutator_free_bitmap.clear_bit(r->region_number());
+  }
+
+  // While individual regions report their true use, all humongous regions are
+  // marked used in the free set.
+  increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
+
+  if (remainder != 0) {
+    // Record this remainder as allocation waste
+    _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
+  }
+
+  // Allocated at left/rightmost? Move the bounds appropriately.
+  if (beg == _mutator_leftmost || end == _mutator_rightmost) {
+    adjust_bounds();
+  }
+  assert_bounds();
+
+  req.set_actual_size(words_size);
+  return _heap->get_region(beg)->bottom();
+}
+
+bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
+  return r->is_empty() || r->is_trash();
+}
+
+size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
+  if (r->is_trash()) {
+    // This would be recycled on allocation path
+    return ShenandoahHeapRegion::region_size_bytes();
+  } else {
+    return r->free();
+  }
+}
+
+bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
+  return alloc_capacity(r) == 0;
+}
+
+void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
+  if (r->is_trash()) {
+    _heap->decrease_used(r->used());
+    r->recycle();
+  }
+}
+
+void ShenandoahFreeSet::recycle_trash() {
+  // lock is not reentrable, check we don't have it
+  assert_heaplock_not_owned_by_current_thread();
+
+  for (size_t i = 0; i < _heap->num_regions(); i++) {
+    ShenandoahHeapRegion* r = _heap->get_region(i);
+    if (r->is_trash()) {
+      ShenandoahHeapLocker locker(_heap->lock());
+      try_recycle_trashed(r);
+    }
+    SpinPause(); // allow allocators to take the lock
+  }
+}
+
+void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
+  size_t idx = r->region_number();
+
+  assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
+  assert(is_empty_or_trash(r), "Should not be allocated");
+
+  _mutator_free_bitmap.clear_bit(idx);
+  _collector_free_bitmap.set_bit(idx);
+  _collector_leftmost = MIN2(idx, _collector_leftmost);
+  _collector_rightmost = MAX2(idx, _collector_rightmost);
+
+  _capacity -= alloc_capacity(r);
+
+  if (touches_bounds(idx)) {
+    adjust_bounds();
+  }
+  assert_bounds();
+}
+
+void ShenandoahFreeSet::clear() {
+  assert_heaplock_owned_by_current_thread();
+  clear_internal();
+}
+
+void ShenandoahFreeSet::clear_internal() {
+  _mutator_free_bitmap.clear();
+  _collector_free_bitmap.clear();
+  _mutator_leftmost = _max;
+  _mutator_rightmost = 0;
+  _collector_leftmost = _max;
+  _collector_rightmost = 0;
+  _capacity = 0;
+  _used = 0;
+}
+
+void ShenandoahFreeSet::rebuild() {
+  assert_heaplock_owned_by_current_thread();
+  clear();
+
+  for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
+    ShenandoahHeapRegion* region = _heap->get_region(idx);
+    if (region->is_alloc_allowed() || region->is_trash()) {
+      assert(!region->is_cset(), "Shouldn't be adding those to the free set");
+
+      // Do not add regions that would surely fail allocation
+      if (has_no_alloc_capacity(region)) continue;
+
+      _capacity += alloc_capacity(region);
+      assert(_used <= _capacity, "must not use more than we have");
+
+      assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
+      _mutator_free_bitmap.set_bit(idx);
+    }
+  }
+
+  // Evac reserve: reserve trailing space for evacuations
+  size_t to_reserve = ShenandoahEvacReserve * _heap->capacity() / 100;
+  size_t reserved = 0;
+
+  for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
+    if (reserved >= to_reserve) break;
+
+    ShenandoahHeapRegion* region = _heap->get_region(idx);
+    if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) {
+      _mutator_free_bitmap.clear_bit(idx);
+      _collector_free_bitmap.set_bit(idx);
+      size_t ac = alloc_capacity(region);
+      _capacity -= ac;
+      reserved += ac;
+    }
+  }
+
+  recompute_bounds();
+  assert_bounds();
+}
+
+void ShenandoahFreeSet::log_status() {
+  assert_heaplock_owned_by_current_thread();
+
+  LogTarget(Info, gc, ergo) lt;
+  if (lt.is_enabled()) {
+    ResourceMark rm;
+    LogStream ls(lt);
+
+    {
+      size_t last_idx = 0;
+      size_t max = 0;
+      size_t max_contig = 0;
+      size_t empty_contig = 0;
+
+      size_t total_used = 0;
+      size_t total_free = 0;
+
+      for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
+        if (is_mutator_free(idx)) {
+          ShenandoahHeapRegion *r = _heap->get_region(idx);
+          size_t free = alloc_capacity(r);
+
+          max = MAX2(max, free);
+
+          if (r->is_empty() && (last_idx + 1 == idx)) {
+            empty_contig++;
+          } else {
+            empty_contig = 0;
+          }
+
+          total_used += r->used();
+          total_free += free;
+
+          max_contig = MAX2(max_contig, empty_contig);
+          last_idx = idx;
+        }
+      }
+
+      size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
+      size_t free = capacity() - used();
+
+      ls.print("Free: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ",
+               total_free / M, mutator_count(), max / K, max_humongous / K);
+
+      size_t frag_ext;
+      if (free > 0) {
+        frag_ext = 100 - (100 * max_humongous / free);
+      } else {
+        frag_ext = 0;
+      }
+      ls.print("External frag: " SIZE_FORMAT "%%, ", frag_ext);
+
+      size_t frag_int;
+      if (mutator_count() > 0) {
+        frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
+      } else {
+        frag_int = 0;
+      }
+      ls.print("Internal frag: " SIZE_FORMAT "%%", frag_int);
+      ls.cr();
+    }
+
+    {
+      size_t max = 0;
+      size_t total_free = 0;
+
+      for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
+        if (is_collector_free(idx)) {
+          ShenandoahHeapRegion *r = _heap->get_region(idx);
+          size_t free = alloc_capacity(r);
+          max = MAX2(max, free);
+          total_free += free;
+        }
+      }
+
+      ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K",
+                  total_free / M, collector_count(), max / K);
+    }
+  }
+}
+
+HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
+  assert_heaplock_owned_by_current_thread();
+  assert_bounds();
+
+  if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
+    switch (req.type()) {
+      case ShenandoahAllocRequest::_alloc_shared:
+      case ShenandoahAllocRequest::_alloc_shared_gc:
+        in_new_region = true;
+        return allocate_contiguous(req);
+      case ShenandoahAllocRequest::_alloc_gclab:
+      case ShenandoahAllocRequest::_alloc_tlab:
+        in_new_region = false;
+        assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
+               req.size(), ShenandoahHeapRegion::humongous_threshold_words());
+        return NULL;
+      default:
+        ShouldNotReachHere();
+        return NULL;
+    }
+  } else {
+    return allocate_single(req, in_new_region);
+  }
+}
+
+size_t ShenandoahFreeSet::unsafe_peek_free() const {
+  // Deliberately not locked, this method is unsafe when free set is modified.
+
+  for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
+    if (index < _max && is_mutator_free(index)) {
+      ShenandoahHeapRegion* r = _heap->get_region(index);
+      if (r->free() >= MinTLABSize) {
+        return r->free();
+      }
+    }
+  }
+
+  // It appears that no regions left
+  return 0;
+}
+
+void ShenandoahFreeSet::print_on(outputStream* out) const {
+  out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
+  for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
+    if (is_mutator_free(index)) {
+      _heap->get_region(index)->print_on(out);
+    }
+  }
+  out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
+  for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
+    if (is_collector_free(index)) {
+      _heap->get_region(index)->print_on(out);
+    }
+  }
+}
+
+#ifdef ASSERT
+void ShenandoahFreeSet::assert_heaplock_owned_by_current_thread() const {
+  _heap->assert_heaplock_owned_by_current_thread();
+}
+
+void ShenandoahFreeSet::assert_heaplock_not_owned_by_current_thread() const {
+  _heap->assert_heaplock_not_owned_by_current_thread();
+}
+
+void ShenandoahFreeSet::assert_bounds() const {
+  // Performance invariants. Failing these would not break the free set, but performance
+  // would suffer.
+  assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
+  assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
+
+  assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
+  assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
+
+  size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
+  size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
+  assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
+  assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
+
+  assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
+  assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
+
+  assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
+  assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
+
+  beg_off = _collector_free_bitmap.get_next_one_offset(0);
+  end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
+  assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
+  assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
+
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+
+class ShenandoahFreeSet : public CHeapObj<mtGC> {
+private:
+  ShenandoahHeap* const _heap;
+  CHeapBitMap _mutator_free_bitmap;
+  CHeapBitMap _collector_free_bitmap;
+  size_t _max;
+
+  // Left-most and right-most region indexes. There are no free regions outside
+  // of [left-most; right-most] index intervals
+  size_t _mutator_leftmost, _mutator_rightmost;
+  size_t _collector_leftmost, _collector_rightmost;
+
+  size_t _capacity;
+  size_t _used;
+
+  void assert_bounds() const PRODUCT_RETURN;
+  void assert_heaplock_owned_by_current_thread() const PRODUCT_RETURN;
+  void assert_heaplock_not_owned_by_current_thread() const PRODUCT_RETURN;
+
+  bool is_mutator_free(size_t idx) const;
+  bool is_collector_free(size_t idx) const;
+
+  HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
+  HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region);
+  HeapWord* allocate_contiguous(ShenandoahAllocRequest& req);
+
+  void flip_to_gc(ShenandoahHeapRegion* r);
+
+  void recompute_bounds();
+  void adjust_bounds();
+  bool touches_bounds(size_t num) const;
+
+  void increase_used(size_t amount);
+  void clear_internal();
+
+  size_t collector_count() const { return _collector_free_bitmap.count_one_bits(); }
+  size_t mutator_count()   const { return _mutator_free_bitmap.count_one_bits();   }
+
+  void try_recycle_trashed(ShenandoahHeapRegion *r);
+
+  bool is_empty_or_trash(ShenandoahHeapRegion *r);
+  size_t alloc_capacity(ShenandoahHeapRegion *r);
+  bool has_no_alloc_capacity(ShenandoahHeapRegion *r);
+
+public:
+  ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
+
+  void clear();
+  void rebuild();
+
+  void recycle_trash();
+
+  void log_status();
+
+  size_t capacity()  const { return _capacity; }
+  size_t used()      const { return _used;     }
+  size_t available() const {
+    assert(_used <= _capacity, "must use less than capacity");
+    return _capacity - _used;
+  }
+
+  HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region);
+  size_t unsafe_peek_free() const;
+
+  void print_on(outputStream* out) const;
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,2796 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/memAllocator.hpp"
+#include "gc/shared/parallelCleaning.hpp"
+#include "gc/shared/plab.hpp"
+
+#include "gc/shenandoah/shenandoahAllocTracker.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
+#include "gc/shenandoah/shenandoahControlThread.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahMemoryPool.hpp"
+#include "gc/shenandoah/shenandoahMetrics.hpp"
+#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
+#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
+#include "gc/shenandoah/shenandoahPacer.hpp"
+#include "gc/shenandoah/shenandoahPacer.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahStringDedup.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVerifier.hpp"
+#include "gc/shenandoah/shenandoahCodeRoots.hpp"
+#include "gc/shenandoah/shenandoahVMOperations.hpp"
+#include "gc/shenandoah/shenandoahWorkGroup.hpp"
+#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
+#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
+#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
+#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
+#include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
+#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
+#include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
+
+#include "memory/metaspace.hpp"
+#include "runtime/vmThread.hpp"
+#include "services/mallocTracker.hpp"
+
+ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
+
+#ifdef ASSERT
+template <class T>
+void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
+  T o = RawAccess<>::oop_load(p);
+  if (! CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    shenandoah_assert_not_forwarded(p, obj);
+  }
+}
+
+void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
+void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
+#endif
+
+class ShenandoahPretouchTask : public AbstractGangTask {
+private:
+  ShenandoahRegionIterator _regions;
+  const size_t _bitmap_size;
+  const size_t _page_size;
+  char* _bitmap_base;
+public:
+  ShenandoahPretouchTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
+    AbstractGangTask("Shenandoah PreTouch"),
+    _bitmap_size(bitmap_size),
+    _page_size(page_size),
+    _bitmap_base(bitmap_base) {
+  }
+
+  virtual void work(uint worker_id) {
+    ShenandoahHeapRegion* r = _regions.next();
+    while (r != NULL) {
+      os::pretouch_memory(r->bottom(), r->end(), _page_size);
+
+      size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
+      size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
+      assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
+
+      os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
+
+      r = _regions.next();
+    }
+  }
+};
+
+jint ShenandoahHeap::initialize() {
+  ShenandoahBrooksPointer::initial_checks();
+
+  initialize_heuristics();
+
+  size_t init_byte_size = collector_policy()->initial_heap_byte_size();
+  size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t heap_alignment = collector_policy()->heap_alignment();
+
+  if (ShenandoahAlwaysPreTouch) {
+    // Enabled pre-touch means the entire heap is committed right away.
+    init_byte_size = max_byte_size;
+  }
+
+  Universe::check_alignment(max_byte_size,
+                            ShenandoahHeapRegion::region_size_bytes(),
+                            "shenandoah heap");
+  Universe::check_alignment(init_byte_size,
+                            ShenandoahHeapRegion::region_size_bytes(),
+                            "shenandoah heap");
+
+  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
+                                                 heap_alignment);
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
+
+  ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
+
+  _num_regions = ShenandoahHeapRegion::region_count();
+
+  size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
+  num_committed_regions = MIN2(num_committed_regions, _num_regions);
+  assert(num_committed_regions <= _num_regions, "sanity");
+
+  _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
+  _committed = _initial_size;
+
+  log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s",
+          byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size));
+  if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
+    vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
+  }
+
+  size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
+  size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+  _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
+  _free_set = new ShenandoahFreeSet(this, _num_regions);
+
+  _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
+
+  if (ShenandoahPacing) {
+    _pacer = new ShenandoahPacer(this);
+    _pacer->setup_for_idle();
+  } else {
+    _pacer = NULL;
+  }
+
+  assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
+         "misaligned heap: " PTR_FORMAT, p2i(base()));
+
+  // The call below uses stuff (the SATB* things) that are in G1, but probably
+  // belong into a shared location.
+  ShenandoahBarrierSet::satb_mark_queue_set().initialize(this,
+                                               SATB_Q_CBL_mon,
+                                               20 /*G1SATBProcessCompletedThreshold */,
+                                               60 /* G1SATBBufferEnqueueingThresholdPercent */,
+                                               Shared_SATB_Q_lock);
+
+  // Reserve space for prev and next bitmap.
+  size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
+  _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
+  _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
+  _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
+
+  size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
+
+  guarantee(bitmap_bytes_per_region != 0,
+            "Bitmap bytes per region should not be zero");
+  guarantee(is_power_of_2(bitmap_bytes_per_region),
+            "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
+
+  if (bitmap_page_size > bitmap_bytes_per_region) {
+    _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
+    _bitmap_bytes_per_slice = bitmap_page_size;
+  } else {
+    _bitmap_regions_per_slice = 1;
+    _bitmap_bytes_per_slice = bitmap_bytes_per_region;
+  }
+
+  guarantee(_bitmap_regions_per_slice >= 1,
+            "Should have at least one region per slice: " SIZE_FORMAT,
+            _bitmap_regions_per_slice);
+
+  guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
+            "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
+            _bitmap_bytes_per_slice, bitmap_page_size);
+
+  ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
+  MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
+  _bitmap_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
+
+  size_t bitmap_init_commit = _bitmap_bytes_per_slice *
+                              align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
+  bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
+  os::commit_memory_or_exit((char *) (_bitmap_region.start()), bitmap_init_commit, false,
+                            "couldn't allocate initial bitmap");
+
+  size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
+
+  if (ShenandoahVerify) {
+    ReservedSpace verify_bitmap(_bitmap_size, page_size);
+    os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
+                              "couldn't allocate verification bitmap");
+    MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
+    MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
+    _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
+    _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
+  }
+
+  _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
+
+  {
+    ShenandoahHeapLocker locker(lock());
+    for (size_t i = 0; i < _num_regions; i++) {
+      ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
+                                                         (HeapWord*) pgc_rs.base() + reg_size_words * i,
+                                                         reg_size_words,
+                                                         i,
+                                                         i < num_committed_regions);
+
+      _marking_context->initialize_top_at_mark_start(r);
+      _regions[i] = r;
+      assert(!collection_set()->is_in(i), "New region should not be in collection set");
+    }
+
+    // Initialize to complete
+    _marking_context->mark_complete();
+
+    _free_set->rebuild();
+  }
+
+  if (ShenandoahAlwaysPreTouch) {
+    assert (!AlwaysPreTouch, "Should have been overridden");
+
+    // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
+    // before initialize() below zeroes it with initializing thread. For any given region,
+    // we touch the region and the corresponding bitmaps from the same thread.
+    ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
+
+    log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
+                       _num_regions, page_size);
+    ShenandoahPretouchTask cl(bitmap0.base(), _bitmap_size, page_size);
+    _workers->run_task(&cl);
+  }
+
+  // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
+  ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
+  MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
+  _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
+  _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
+
+  _traversal_gc = heuristics()->can_do_traversal_gc() ?
+                new ShenandoahTraversalGC(this, _num_regions) :
+                NULL;
+
+  _monitoring_support = new ShenandoahMonitoringSupport(this);
+
+  _phase_timings = new ShenandoahPhaseTimings();
+
+  if (ShenandoahAllocationTrace) {
+    _alloc_tracker = new ShenandoahAllocTracker();
+  }
+
+  ShenandoahStringDedup::initialize();
+
+  _control_thread = new ShenandoahControlThread();
+
+  ShenandoahCodeRoots::initialize();
+
+  log_info(gc, init)("Safepointing mechanism: %s",
+                     SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
+                     (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
+
+  _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
+  for (uint worker = 0; worker < _max_workers; worker++) {
+    _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
+    Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
+  }
+
+  return JNI_OK;
+}
+
+void ShenandoahHeap::initialize_heuristics() {
+  if (ShenandoahGCHeuristics != NULL) {
+    if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
+      _heuristics = new ShenandoahAggressiveHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
+      _heuristics = new ShenandoahStaticHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
+      _heuristics = new ShenandoahAdaptiveHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
+      _heuristics = new ShenandoahPassiveHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
+      _heuristics = new ShenandoahCompactHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
+      _heuristics = new ShenandoahTraversalHeuristics();
+    } else {
+      vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
+    }
+
+    if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
+      vm_exit_during_initialization(
+              err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
+                      _heuristics->name()));
+    }
+    if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
+      vm_exit_during_initialization(
+              err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
+                      _heuristics->name()));
+    }
+
+    if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
+      vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
+    }
+    log_info(gc, init)("Shenandoah heuristics: %s",
+                       _heuristics->name());
+  } else {
+      ShouldNotReachHere();
+  }
+
+}
+
+#ifdef _MSC_VER
+#pragma warning( push )
+#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
+#endif
+
+ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
+  CollectedHeap(),
+  _initial_size(0),
+  _used(0),
+  _committed(0),
+  _bytes_allocated_since_gc_start(0),
+  _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
+  _workers(NULL),
+  _safepoint_workers(NULL),
+  _num_regions(0),
+  _regions(NULL),
+  _update_refs_iterator(this),
+  _control_thread(NULL),
+  _shenandoah_policy(policy),
+  _heuristics(NULL),
+  _free_set(NULL),
+  _scm(new ShenandoahConcurrentMark()),
+  _traversal_gc(NULL),
+  _full_gc(new ShenandoahMarkCompact()),
+  _pacer(NULL),
+  _verifier(NULL),
+  _alloc_tracker(NULL),
+  _phase_timings(NULL),
+  _monitoring_support(NULL),
+  _memory_pool(NULL),
+  _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
+  _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
+  _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
+  _soft_ref_policy(),
+  _ref_processor(NULL),
+  _marking_context(NULL),
+  _collection_set(NULL)
+{
+  log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
+  log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
+
+  BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
+
+  _max_workers = MAX2(_max_workers, 1U);
+  _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
+                            /* are_GC_task_threads */true,
+                            /* are_ConcurrentGC_threads */false);
+  if (_workers == NULL) {
+    vm_exit_during_initialization("Failed necessary allocation.");
+  } else {
+    _workers->initialize_workers();
+  }
+
+  if (ShenandoahParallelSafepointThreads > 1) {
+    _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
+                                                ShenandoahParallelSafepointThreads,
+                                                false, false);
+    _safepoint_workers->initialize_workers();
+  }
+}
+
+#ifdef _MSC_VER
+#pragma warning( pop )
+#endif
+
+class ShenandoahResetBitmapTask : public AbstractGangTask {
+private:
+  ShenandoahRegionIterator _regions;
+
+public:
+  ShenandoahResetBitmapTask() :
+    AbstractGangTask("Parallel Reset Bitmap Task") {}
+
+  void work(uint worker_id) {
+    ShenandoahHeapRegion* region = _regions.next();
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahMarkingContext* const ctx = heap->marking_context();
+    while (region != NULL) {
+      if (heap->is_bitmap_slice_committed(region)) {
+        ctx->clear_bitmap(region);
+      }
+      region = _regions.next();
+    }
+  }
+};
+
+void ShenandoahHeap::reset_mark_bitmap() {
+  assert_gc_workers(_workers->active_workers());
+  mark_incomplete_marking_context();
+
+  ShenandoahResetBitmapTask task;
+  _workers->run_task(&task);
+}
+
+void ShenandoahHeap::print_on(outputStream* st) const {
+  st->print_cr("Shenandoah Heap");
+  st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
+               capacity() / K, committed() / K, used() / K);
+  st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
+               num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
+
+  st->print("Status: ");
+  if (has_forwarded_objects())               st->print("has forwarded objects, ");
+  if (is_concurrent_mark_in_progress())      st->print("marking, ");
+  if (is_evacuation_in_progress())           st->print("evacuating, ");
+  if (is_update_refs_in_progress())          st->print("updating refs, ");
+  if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
+  if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
+  if (is_full_gc_in_progress())              st->print("full gc, ");
+  if (is_full_gc_move_in_progress())         st->print("full gc move, ");
+
+  if (cancelled_gc()) {
+    st->print("cancelled");
+  } else {
+    st->print("not cancelled");
+  }
+  st->cr();
+
+  st->print_cr("Reserved region:");
+  st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
+               p2i(reserved_region().start()),
+               p2i(reserved_region().end()));
+
+  st->cr();
+  MetaspaceUtils::print_on(st);
+
+  if (Verbose) {
+    print_heap_regions_on(st);
+  }
+}
+
+class ShenandoahInitGCLABClosure : public ThreadClosure {
+public:
+  void do_thread(Thread* thread) {
+    if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread())) {
+      ShenandoahThreadLocalData::initialize_gclab(thread);
+    }
+  }
+};
+
+void ShenandoahHeap::post_initialize() {
+  CollectedHeap::post_initialize();
+  MutexLocker ml(Threads_lock);
+
+  ShenandoahInitGCLABClosure init_gclabs;
+  Threads::threads_do(&init_gclabs);
+  _workers->threads_do(&init_gclabs);
+  _safepoint_workers->threads_do(&init_gclabs);
+
+  // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
+  // Now, we will let WorkGang to initialize gclab when new worker is created.
+  _workers->set_initialize_gclab();
+
+  _scm->initialize(_max_workers);
+  _full_gc->initialize(_gc_timer);
+
+  ref_processing_init();
+
+  _heuristics->initialize();
+}
+
+size_t ShenandoahHeap::used() const {
+  return OrderAccess::load_acquire(&_used);
+}
+
+size_t ShenandoahHeap::committed() const {
+  OrderAccess::acquire();
+  return _committed;
+}
+
+void ShenandoahHeap::increase_committed(size_t bytes) {
+  assert_heaplock_or_safepoint();
+  _committed += bytes;
+}
+
+void ShenandoahHeap::decrease_committed(size_t bytes) {
+  assert_heaplock_or_safepoint();
+  _committed -= bytes;
+}
+
+void ShenandoahHeap::increase_used(size_t bytes) {
+  Atomic::add(bytes, &_used);
+}
+
+void ShenandoahHeap::set_used(size_t bytes) {
+  OrderAccess::release_store_fence(&_used, bytes);
+}
+
+void ShenandoahHeap::decrease_used(size_t bytes) {
+  assert(used() >= bytes, "never decrease heap size by more than we've left");
+  Atomic::sub(bytes, &_used);
+}
+
+void ShenandoahHeap::increase_allocated(size_t bytes) {
+  Atomic::add(bytes, &_bytes_allocated_since_gc_start);
+}
+
+void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
+  size_t bytes = words * HeapWordSize;
+  if (!waste) {
+    increase_used(bytes);
+  }
+  increase_allocated(bytes);
+  if (ShenandoahPacing) {
+    control_thread()->pacing_notify_alloc(words);
+    if (waste) {
+      pacer()->claim_for_alloc(words, true);
+    }
+  }
+}
+
+size_t ShenandoahHeap::capacity() const {
+  return num_regions() * ShenandoahHeapRegion::region_size_bytes();
+}
+
+size_t ShenandoahHeap::max_capacity() const {
+  return _num_regions * ShenandoahHeapRegion::region_size_bytes();
+}
+
+size_t ShenandoahHeap::initial_capacity() const {
+  return _initial_size;
+}
+
+bool ShenandoahHeap::is_in(const void* p) const {
+  HeapWord* heap_base = (HeapWord*) base();
+  HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
+  return p >= heap_base && p < last_region_end;
+}
+
+void ShenandoahHeap::op_uncommit(double shrink_before) {
+  assert (ShenandoahUncommit, "should be enabled");
+
+  size_t count = 0;
+  for (size_t i = 0; i < num_regions(); i++) {
+    ShenandoahHeapRegion* r = get_region(i);
+    if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
+      ShenandoahHeapLocker locker(lock());
+      if (r->is_empty_committed()) {
+        r->make_uncommitted();
+        count++;
+      }
+    }
+    SpinPause(); // allow allocators to take the lock
+  }
+
+  if (count > 0) {
+    log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
+                 count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
+    control_thread()->notify_heap_changed();
+  }
+}
+
+HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
+  // New object should fit the GCLAB size
+  size_t min_size = MAX2(size, PLAB::min_size());
+
+  // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
+  size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
+  new_size = MIN2(new_size, PLAB::max_size());
+  new_size = MAX2(new_size, PLAB::min_size());
+
+  // Record new heuristic value even if we take any shortcut. This captures
+  // the case when moderately-sized objects always take a shortcut. At some point,
+  // heuristics should catch up with them.
+  ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
+
+  if (new_size < size) {
+    // New size still does not fit the object. Fall back to shared allocation.
+    // This avoids retiring perfectly good GCLABs, when we encounter a large object.
+    return NULL;
+  }
+
+  // Retire current GCLAB, and allocate a new one.
+  PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
+  gclab->retire();
+
+  size_t actual_size = 0;
+  HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
+  if (gclab_buf == NULL) {
+    return NULL;
+  }
+
+  assert (size <= actual_size, "allocation should fit");
+
+  if (ZeroTLAB) {
+    // ..and clear it.
+    Copy::zero_to_words(gclab_buf, actual_size);
+  } else {
+    // ...and zap just allocated object.
+#ifdef ASSERT
+    // Skip mangling the space corresponding to the object header to
+    // ensure that the returned space is not considered parsable by
+    // any concurrent GC thread.
+    size_t hdr_size = oopDesc::header_size();
+    Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
+#endif // ASSERT
+  }
+  gclab->set_buf(gclab_buf, actual_size);
+  return gclab->allocate(size);
+}
+
+HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
+                                            size_t requested_size,
+                                            size_t* actual_size) {
+  ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
+  HeapWord* res = allocate_memory(req);
+  if (res != NULL) {
+    *actual_size = req.actual_size();
+  } else {
+    *actual_size = 0;
+  }
+  return res;
+}
+
+HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
+                                             size_t word_size,
+                                             size_t* actual_size) {
+  ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
+  HeapWord* res = allocate_memory(req);
+  if (res != NULL) {
+    *actual_size = req.actual_size();
+  } else {
+    *actual_size = 0;
+  }
+  return res;
+}
+
+ShenandoahHeap* ShenandoahHeap::heap() {
+  CollectedHeap* heap = Universe::heap();
+  assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
+  assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
+  return (ShenandoahHeap*) heap;
+}
+
+ShenandoahHeap* ShenandoahHeap::heap_no_check() {
+  CollectedHeap* heap = Universe::heap();
+  return (ShenandoahHeap*) heap;
+}
+
+HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
+  ShenandoahAllocTrace trace_alloc(req.size(), req.type());
+
+  intptr_t pacer_epoch = 0;
+  bool in_new_region = false;
+  HeapWord* result = NULL;
+
+  if (req.is_mutator_alloc()) {
+    if (ShenandoahPacing) {
+      pacer()->pace_for_alloc(req.size());
+      pacer_epoch = pacer()->epoch();
+    }
+
+    if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
+      result = allocate_memory_under_lock(req, in_new_region);
+    }
+
+    // Allocation failed, block until control thread reacted, then retry allocation.
+    //
+    // It might happen that one of the threads requesting allocation would unblock
+    // way later after GC happened, only to fail the second allocation, because
+    // other threads have already depleted the free storage. In this case, a better
+    // strategy is to try again, as long as GC makes progress.
+    //
+    // Then, we need to make sure the allocation was retried after at least one
+    // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
+
+    size_t tries = 0;
+
+    while (result == NULL && _progress_last_gc.is_set()) {
+      tries++;
+      control_thread()->handle_alloc_failure(req.size());
+      result = allocate_memory_under_lock(req, in_new_region);
+    }
+
+    while (result == NULL && tries <= ShenandoahFullGCThreshold) {
+      tries++;
+      control_thread()->handle_alloc_failure(req.size());
+      result = allocate_memory_under_lock(req, in_new_region);
+    }
+
+  } else {
+    assert(req.is_gc_alloc(), "Can only accept GC allocs here");
+    result = allocate_memory_under_lock(req, in_new_region);
+    // Do not call handle_alloc_failure() here, because we cannot block.
+    // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
+  }
+
+  if (in_new_region) {
+    control_thread()->notify_heap_changed();
+  }
+
+  if (result != NULL) {
+    size_t requested = req.size();
+    size_t actual = req.actual_size();
+
+    assert (req.is_lab_alloc() || (requested == actual),
+            "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
+            ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
+
+    if (req.is_mutator_alloc()) {
+      notify_mutator_alloc_words(actual, false);
+
+      // If we requested more than we were granted, give the rest back to pacer.
+      // This only matters if we are in the same pacing epoch: do not try to unpace
+      // over the budget for the other phase.
+      if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
+        pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
+      }
+    } else {
+      increase_used(actual*HeapWordSize);
+    }
+  }
+
+  return result;
+}
+
+HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
+  ShenandoahHeapLocker locker(lock());
+  return _free_set->allocate(req, in_new_region);
+}
+
+class ShenandoahMemAllocator : public MemAllocator {
+private:
+  MemAllocator& _initializer;
+public:
+  ShenandoahMemAllocator(MemAllocator& initializer, Klass* klass, size_t word_size, Thread* thread) :
+  MemAllocator(klass, word_size + ShenandoahBrooksPointer::word_size(), thread),
+    _initializer(initializer) {}
+
+protected:
+  virtual HeapWord* mem_allocate(Allocation& allocation) const {
+    HeapWord* result = MemAllocator::mem_allocate(allocation);
+    // Initialize brooks-pointer
+    if (result != NULL) {
+      result += ShenandoahBrooksPointer::word_size();
+      ShenandoahBrooksPointer::initialize(oop(result));
+      assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
+    }
+    return result;
+  }
+
+  virtual oop initialize(HeapWord* mem) const {
+     return _initializer.initialize(mem);
+  }
+};
+
+oop ShenandoahHeap::obj_allocate(Klass* klass, int size, TRAPS) {
+  ObjAllocator initializer(klass, size, THREAD);
+  ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
+  return allocator.allocate();
+}
+
+oop ShenandoahHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
+  ObjArrayAllocator initializer(klass, size, length, do_zero, THREAD);
+  ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
+  return allocator.allocate();
+}
+
+oop ShenandoahHeap::class_allocate(Klass* klass, int size, TRAPS) {
+  ClassAllocator initializer(klass, size, THREAD);
+  ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
+  return allocator.allocate();
+}
+
+HeapWord* ShenandoahHeap::mem_allocate(size_t size,
+                                        bool*  gc_overhead_limit_was_exceeded) {
+  ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
+  return allocate_memory(req);
+}
+
+MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
+                                                             size_t size,
+                                                             Metaspace::MetadataType mdtype) {
+  MetaWord* result;
+
+  // Inform metaspace OOM to GC heuristics if class unloading is possible.
+  if (heuristics()->can_unload_classes()) {
+    ShenandoahHeuristics* h = heuristics();
+    h->record_metaspace_oom();
+  }
+
+  // Expand and retry allocation
+  result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Start full GC
+  collect(GCCause::_metadata_GC_clear_soft_refs);
+
+  // Retry allocation
+  result = loader_data->metaspace_non_null()->allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Expand and retry allocation
+  result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Out of memory
+  return NULL;
+}
+
+void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
+  HeapWord* obj = tlab_post_allocation_setup(start);
+  CollectedHeap::fill_with_object(obj, end);
+}
+
+size_t ShenandoahHeap::min_dummy_object_size() const {
+  return CollectedHeap::min_dummy_object_size() + ShenandoahBrooksPointer::word_size();
+}
+
+class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
+private:
+  ShenandoahHeap* _heap;
+  Thread* _thread;
+public:
+  ShenandoahEvacuateUpdateRootsClosure() :
+    _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
+  }
+
+private:
+  template <class T>
+  void do_oop_work(T* p) {
+    assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
+
+    T o = RawAccess<>::oop_load(p);
+    if (! CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      if (_heap->in_collection_set(obj)) {
+        shenandoah_assert_marked(p, obj);
+        oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+        if (oopDesc::equals_raw(resolved, obj)) {
+          resolved = _heap->evacuate_object(obj, _thread);
+        }
+        RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
+      }
+    }
+  }
+
+public:
+  void do_oop(oop* p) {
+    do_oop_work(p);
+  }
+  void do_oop(narrowOop* p) {
+    do_oop_work(p);
+  }
+};
+
+class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
+private:
+  ShenandoahHeap* const _heap;
+  Thread* const _thread;
+public:
+  ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
+    _heap(heap), _thread(Thread::current()) {}
+
+  void do_object(oop p) {
+    shenandoah_assert_marked(NULL, p);
+    if (oopDesc::equals_raw(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
+      _heap->evacuate_object(p, _thread);
+    }
+  }
+};
+
+class ShenandoahEvacuationTask : public AbstractGangTask {
+private:
+  ShenandoahHeap* const _sh;
+  ShenandoahCollectionSet* const _cs;
+  bool _concurrent;
+public:
+  ShenandoahEvacuationTask(ShenandoahHeap* sh,
+                           ShenandoahCollectionSet* cs,
+                           bool concurrent) :
+    AbstractGangTask("Parallel Evacuation Task"),
+    _sh(sh),
+    _cs(cs),
+    _concurrent(concurrent)
+  {}
+
+  void work(uint worker_id) {
+    if (_concurrent) {
+      ShenandoahConcurrentWorkerSession worker_session(worker_id);
+      ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
+      ShenandoahEvacOOMScope oom_evac_scope;
+      do_work();
+    } else {
+      ShenandoahParallelWorkerSession worker_session(worker_id);
+      ShenandoahEvacOOMScope oom_evac_scope;
+      do_work();
+    }
+  }
+
+private:
+  void do_work() {
+    ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
+    ShenandoahHeapRegion* r;
+    while ((r =_cs->claim_next()) != NULL) {
+      assert(r->has_live(), "all-garbage regions are reclaimed early");
+      _sh->marked_object_iterate(r, &cl);
+
+      if (ShenandoahPacing) {
+        _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
+      }
+
+      if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
+        break;
+      }
+    }
+  }
+};
+
+void ShenandoahHeap::trash_cset_regions() {
+  ShenandoahHeapLocker locker(lock());
+
+  ShenandoahCollectionSet* set = collection_set();
+  ShenandoahHeapRegion* r;
+  set->clear_current_index();
+  while ((r = set->next()) != NULL) {
+    r->make_trash();
+  }
+  collection_set()->clear();
+}
+
+void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
+  st->print_cr("Heap Regions:");
+  st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
+  st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
+  st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
+  st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
+
+  for (size_t i = 0; i < num_regions(); i++) {
+    get_region(i)->print_on(st);
+  }
+}
+
+void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
+  assert(start->is_humongous_start(), "reclaim regions starting with the first one");
+
+  oop humongous_obj = oop(start->bottom() + ShenandoahBrooksPointer::word_size());
+  size_t size = humongous_obj->size() + ShenandoahBrooksPointer::word_size();
+  size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
+  size_t index = start->region_number() + required_regions - 1;
+
+  assert(!start->has_live(), "liveness must be zero");
+
+  for(size_t i = 0; i < required_regions; i++) {
+    // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
+    // as it expects that every region belongs to a humongous region starting with a humongous start region.
+    ShenandoahHeapRegion* region = get_region(index --);
+
+    assert(region->is_humongous(), "expect correct humongous start or continuation");
+    assert(!region->is_cset(), "Humongous region should not be in collection set");
+
+    region->make_trash_immediate();
+  }
+}
+
+class ShenandoahRetireGCLABClosure : public ThreadClosure {
+public:
+  void do_thread(Thread* thread) {
+    PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
+    assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
+    gclab->retire();
+  }
+};
+
+void ShenandoahHeap::make_parsable(bool retire_tlabs) {
+  if (UseTLAB) {
+    CollectedHeap::ensure_parsability(retire_tlabs);
+  }
+  ShenandoahRetireGCLABClosure cl;
+  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+    cl.do_thread(t);
+  }
+  workers()->threads_do(&cl);
+  _safepoint_workers->threads_do(&cl);
+}
+
+void ShenandoahHeap::resize_tlabs() {
+  CollectedHeap::resize_all_tlabs();
+}
+
+class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
+private:
+  ShenandoahRootEvacuator* _rp;
+
+public:
+  ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
+    AbstractGangTask("Shenandoah evacuate and update roots"),
+    _rp(rp) {}
+
+  void work(uint worker_id) {
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+    ShenandoahEvacOOMScope oom_evac_scope;
+    ShenandoahEvacuateUpdateRootsClosure cl;
+
+    MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
+    _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
+  }
+};
+
+void ShenandoahHeap::evacuate_and_update_roots() {
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  DerivedPointerTable::clear();
+#endif
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
+
+  {
+    ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
+    ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
+    workers()->run_task(&roots_task);
+  }
+
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  DerivedPointerTable::update_pointers();
+#endif
+}
+
+void ShenandoahHeap::roots_iterate(OopClosure* cl) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
+
+  CodeBlobToOopClosure blobsCl(cl, false);
+  CLDToOopClosure cldCl(cl, ClassLoaderData::_claim_strong);
+
+  ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
+  rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
+}
+
+// Returns size in bytes
+size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
+  if (ShenandoahElasticTLAB) {
+    // With Elastic TLABs, return the max allowed size, and let the allocation path
+    // figure out the safe size for current allocation.
+    return ShenandoahHeapRegion::max_tlab_size_bytes();
+  } else {
+    return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
+  }
+}
+
+size_t ShenandoahHeap::max_tlab_size() const {
+  // Returns size in words
+  return ShenandoahHeapRegion::max_tlab_size_words();
+}
+
+class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
+public:
+  void do_thread(Thread* thread) {
+    PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
+    gclab->retire();
+    if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
+      ShenandoahThreadLocalData::set_gclab_size(thread, 0);
+    }
+  }
+};
+
+void ShenandoahHeap::retire_and_reset_gclabs() {
+  ShenandoahRetireAndResetGCLABClosure cl;
+  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+    cl.do_thread(t);
+  }
+  workers()->threads_do(&cl);
+  _safepoint_workers->threads_do(&cl);
+}
+
+void ShenandoahHeap::collect(GCCause::Cause cause) {
+  control_thread()->request_gc(cause);
+}
+
+void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
+  //assert(false, "Shouldn't need to do full collections");
+}
+
+CollectorPolicy* ShenandoahHeap::collector_policy() const {
+  return _shenandoah_policy;
+}
+
+HeapWord* ShenandoahHeap::block_start(const void* addr) const {
+  Space* sp = heap_region_containing(addr);
+  if (sp != NULL) {
+    return sp->block_start(addr);
+  }
+  return NULL;
+}
+
+size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
+  Space* sp = heap_region_containing(addr);
+  assert(sp != NULL, "block_size of address outside of heap");
+  return sp->block_size(addr);
+}
+
+bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
+  Space* sp = heap_region_containing(addr);
+  return sp->block_is_obj(addr);
+}
+
+jlong ShenandoahHeap::millis_since_last_gc() {
+  double v = heuristics()->time_since_last_gc() * 1000;
+  assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
+  return (jlong)v;
+}
+
+void ShenandoahHeap::prepare_for_verify() {
+  if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
+    make_parsable(false);
+  }
+}
+
+void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
+  workers()->print_worker_threads_on(st);
+  if (ShenandoahStringDedup::is_enabled()) {
+    ShenandoahStringDedup::print_worker_threads_on(st);
+  }
+}
+
+void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
+  workers()->threads_do(tcl);
+  _safepoint_workers->threads_do(tcl);
+  if (ShenandoahStringDedup::is_enabled()) {
+    ShenandoahStringDedup::threads_do(tcl);
+  }
+}
+
+void ShenandoahHeap::print_tracing_info() const {
+  LogTarget(Info, gc, stats) lt;
+  if (lt.is_enabled()) {
+    ResourceMark rm;
+    LogStream ls(lt);
+
+    phase_timings()->print_on(&ls);
+
+    ls.cr();
+    ls.cr();
+
+    shenandoah_policy()->print_gc_stats(&ls);
+
+    ls.cr();
+    ls.cr();
+
+    if (ShenandoahPacing) {
+      pacer()->print_on(&ls);
+    }
+
+    ls.cr();
+    ls.cr();
+
+    if (ShenandoahAllocationTrace) {
+      assert(alloc_tracker() != NULL, "Must be");
+      alloc_tracker()->print_on(&ls);
+    } else {
+      ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
+    }
+  }
+}
+
+void ShenandoahHeap::verify(VerifyOption vo) {
+  if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
+    if (ShenandoahVerify) {
+      verifier()->verify_generic(vo);
+    } else {
+      // TODO: Consider allocating verification bitmaps on demand,
+      // and turn this on unconditionally.
+    }
+  }
+}
+size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
+  return _free_set->capacity();
+}
+
+class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
+private:
+  MarkBitMap* _bitmap;
+  Stack<oop,mtGC>* _oop_stack;
+
+  template <class T>
+  void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      assert(oopDesc::is_oop(obj), "must be a valid oop");
+      if (!_bitmap->is_marked((HeapWord*) obj)) {
+        _bitmap->mark((HeapWord*) obj);
+        _oop_stack->push(obj);
+      }
+    }
+  }
+public:
+  ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
+    _bitmap(bitmap), _oop_stack(oop_stack) {}
+  void do_oop(oop* p)       { do_oop_work(p); }
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+/*
+ * This is public API, used in preparation of object_iterate().
+ * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
+ * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
+ * control, we call SH::make_tlabs_parsable().
+ */
+void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
+  // No-op.
+}
+
+/*
+ * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
+ *
+ * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
+ * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
+ * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
+ * scanning therefore depends on having a valid marking bitmap to support it. However, we only
+ * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
+ * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
+ * wiped the bitmap in preparation for next marking).
+ *
+ * For all those reasons, we implement object iteration as a single marking traversal, reporting
+ * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
+ * is allowed to report dead objects, but is not required to do so.
+ */
+void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
+  assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
+  if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
+    log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
+    return;
+  }
+
+  // Reset bitmap
+  _aux_bit_map.clear();
+
+  Stack<oop,mtGC> oop_stack;
+
+  // First, we process all GC roots. This populates the work stack with initial objects.
+  ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
+  ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
+  CLDToOopClosure clds(&oops, ClassLoaderData::_claim_none);
+  CodeBlobToOopClosure blobs(&oops, false);
+  rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
+
+  // Work through the oop stack to traverse heap.
+  while (! oop_stack.is_empty()) {
+    oop obj = oop_stack.pop();
+    assert(oopDesc::is_oop(obj), "must be a valid oop");
+    cl->do_object(obj);
+    obj->oop_iterate(&oops);
+  }
+
+  assert(oop_stack.is_empty(), "should be empty");
+
+  if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
+    log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
+  }
+}
+
+void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
+  assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
+  object_iterate(cl);
+}
+
+void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
+  for (size_t i = 0; i < num_regions(); i++) {
+    ShenandoahHeapRegion* current = get_region(i);
+    blk->heap_region_do(current);
+  }
+}
+
+class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
+private:
+  ShenandoahHeap* const _heap;
+  ShenandoahHeapRegionClosure* const _blk;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
+  volatile size_t _index;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+public:
+  ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
+          AbstractGangTask("Parallel Region Task"),
+          _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
+
+  void work(uint worker_id) {
+    size_t stride = ShenandoahParallelRegionStride;
+
+    size_t max = _heap->num_regions();
+    while (_index < max) {
+      size_t cur = Atomic::add(stride, &_index) - stride;
+      size_t start = cur;
+      size_t end = MIN2(cur + stride, max);
+      if (start >= max) break;
+
+      for (size_t i = cur; i < end; i++) {
+        ShenandoahHeapRegion* current = _heap->get_region(i);
+        _blk->heap_region_do(current);
+      }
+    }
+  }
+};
+
+void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
+  assert(blk->is_thread_safe(), "Only thread-safe closures here");
+  if (num_regions() > ShenandoahParallelRegionStride) {
+    ShenandoahParallelHeapRegionTask task(blk);
+    workers()->run_task(&task);
+  } else {
+    heap_region_iterate(blk);
+  }
+}
+
+class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahMarkingContext* const _ctx;
+public:
+  ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
+
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    if (r->is_active()) {
+      r->clear_live_data();
+      _ctx->capture_top_at_mark_start(r);
+    } else {
+      assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
+      assert(_ctx->top_at_mark_start(r) == r->top(),
+             "Region " SIZE_FORMAT " should already have correct TAMS", r->region_number());
+    }
+  }
+
+  bool is_thread_safe() { return true; }
+};
+
+void ShenandoahHeap::op_init_mark() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
+  assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
+
+  assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
+  assert(!marking_context()->is_complete(), "should not be complete");
+
+  if (ShenandoahVerify) {
+    verifier()->verify_before_concmark();
+  }
+
+  if (VerifyBeforeGC) {
+    Universe::verify();
+  }
+
+  set_concurrent_mark_in_progress(true);
+  // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
+    make_parsable(true);
+  }
+
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
+    ShenandoahClearLivenessClosure clc;
+    parallel_heap_region_iterate(&clc);
+  }
+
+  // Make above changes visible to worker threads
+  OrderAccess::fence();
+
+  concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
+
+  if (UseTLAB) {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
+    resize_tlabs();
+  }
+
+  if (ShenandoahPacing) {
+    pacer()->setup_for_mark();
+  }
+}
+
+void ShenandoahHeap::op_mark() {
+  concurrent_mark()->mark_from_roots();
+}
+
+class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahMarkingContext* const _ctx;
+public:
+  ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
+
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    if (r->is_active()) {
+      HeapWord *tams = _ctx->top_at_mark_start(r);
+      HeapWord *top = r->top();
+      if (top > tams) {
+        r->increase_live_data_alloc_words(pointer_delta(top, tams));
+      }
+    } else {
+      assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
+      assert(_ctx->top_at_mark_start(r) == r->top(),
+             "Region " SIZE_FORMAT " should have correct TAMS", r->region_number());
+    }
+  }
+
+  bool is_thread_safe() { return true; }
+};
+
+void ShenandoahHeap::op_final_mark() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
+
+  // It is critical that we
+  // evacuate roots right after finishing marking, so that we don't
+  // get unmarked objects in the roots.
+
+  if (!cancelled_gc()) {
+    concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
+
+    if (has_forwarded_objects()) {
+      concurrent_mark()->update_roots(ShenandoahPhaseTimings::update_roots);
+    }
+
+    stop_concurrent_marking();
+
+    {
+      ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
+
+      // All allocations past TAMS are implicitly live, adjust the region data.
+      // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
+      ShenandoahCompleteLivenessClosure cl;
+      parallel_heap_region_iterate(&cl);
+    }
+
+    {
+      ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
+
+      make_parsable(true);
+
+      trash_cset_regions();
+
+      {
+        ShenandoahHeapLocker locker(lock());
+        _collection_set->clear();
+        _free_set->clear();
+
+        heuristics()->choose_collection_set(_collection_set);
+
+        _free_set->rebuild();
+      }
+    }
+
+    // If collection set has candidates, start evacuation.
+    // Otherwise, bypass the rest of the cycle.
+    if (!collection_set()->is_empty()) {
+      ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
+
+      if (ShenandoahVerify) {
+        verifier()->verify_before_evacuation();
+      }
+
+      set_evacuation_in_progress(true);
+      // From here on, we need to update references.
+      set_has_forwarded_objects(true);
+
+      evacuate_and_update_roots();
+
+      if (ShenandoahPacing) {
+        pacer()->setup_for_evac();
+      }
+    } else {
+      if (ShenandoahVerify) {
+        verifier()->verify_after_concmark();
+      }
+
+      if (VerifyAfterGC) {
+        Universe::verify();
+      }
+    }
+
+  } else {
+    concurrent_mark()->cancel();
+    stop_concurrent_marking();
+
+    if (process_references()) {
+      // Abandon reference processing right away: pre-cleaning must have failed.
+      ReferenceProcessor *rp = ref_processor();
+      rp->disable_discovery();
+      rp->abandon_partial_discovery();
+      rp->verify_no_references_recorded();
+    }
+  }
+}
+
+void ShenandoahHeap::op_final_evac() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
+
+  set_evacuation_in_progress(false);
+
+  retire_and_reset_gclabs();
+
+  if (ShenandoahVerify) {
+    verifier()->verify_after_evacuation();
+  }
+
+  if (VerifyAfterGC) {
+    Universe::verify();
+  }
+}
+
+void ShenandoahHeap::op_conc_evac() {
+  ShenandoahEvacuationTask task(this, _collection_set, true);
+  workers()->run_task(&task);
+}
+
+void ShenandoahHeap::op_stw_evac() {
+  ShenandoahEvacuationTask task(this, _collection_set, false);
+  workers()->run_task(&task);
+}
+
+void ShenandoahHeap::op_updaterefs() {
+  update_heap_references(true);
+}
+
+void ShenandoahHeap::op_cleanup() {
+  free_set()->recycle_trash();
+}
+
+void ShenandoahHeap::op_reset() {
+  reset_mark_bitmap();
+}
+
+void ShenandoahHeap::op_preclean() {
+  concurrent_mark()->preclean_weak_refs();
+}
+
+void ShenandoahHeap::op_init_traversal() {
+  traversal_gc()->init_traversal_collection();
+}
+
+void ShenandoahHeap::op_traversal() {
+  traversal_gc()->concurrent_traversal_collection();
+}
+
+void ShenandoahHeap::op_final_traversal() {
+  traversal_gc()->final_traversal_collection();
+}
+
+void ShenandoahHeap::op_full(GCCause::Cause cause) {
+  ShenandoahMetricsSnapshot metrics;
+  metrics.snap_before();
+
+  full_gc()->do_it(cause);
+  if (UseTLAB) {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
+    resize_all_tlabs();
+  }
+
+  metrics.snap_after();
+  metrics.print();
+
+  if (metrics.is_good_progress("Full GC")) {
+    _progress_last_gc.set();
+  } else {
+    // Nothing to do. Tell the allocation path that we have failed to make
+    // progress, and it can finally fail.
+    _progress_last_gc.unset();
+  }
+}
+
+void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
+  // Degenerated GC is STW, but it can also fail. Current mechanics communicates
+  // GC failure via cancelled_concgc() flag. So, if we detect the failure after
+  // some phase, we have to upgrade the Degenerate GC to Full GC.
+
+  clear_cancelled_gc();
+
+  ShenandoahMetricsSnapshot metrics;
+  metrics.snap_before();
+
+  switch (point) {
+    case _degenerated_traversal:
+      {
+        // Drop the collection set. Note: this leaves some already forwarded objects
+        // behind, which may be problematic, see comments for ShenandoahEvacAssist
+        // workarounds in ShenandoahTraversalHeuristics.
+
+        ShenandoahHeapLocker locker(lock());
+        collection_set()->clear_current_index();
+        for (size_t i = 0; i < collection_set()->count(); i++) {
+          ShenandoahHeapRegion* r = collection_set()->next();
+          r->make_regular_bypass();
+        }
+        collection_set()->clear();
+      }
+      op_final_traversal();
+      op_cleanup();
+      return;
+
+    // The cases below form the Duff's-like device: it describes the actual GC cycle,
+    // but enters it at different points, depending on which concurrent phase had
+    // degenerated.
+
+    case _degenerated_outside_cycle:
+      // We have degenerated from outside the cycle, which means something is bad with
+      // the heap, most probably heavy humongous fragmentation, or we are very low on free
+      // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
+      // we can do the most aggressive degen cycle, which includes processing references and
+      // class unloading, unless those features are explicitly disabled.
+      //
+      // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
+      // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
+      set_process_references(heuristics()->can_process_references());
+      set_unload_classes(heuristics()->can_unload_classes());
+
+      if (heuristics()->can_do_traversal_gc()) {
+        // Not possible to degenerate from here, upgrade to Full GC right away.
+        cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
+        op_degenerated_fail();
+        return;
+      }
+
+      op_reset();
+
+      op_init_mark();
+      if (cancelled_gc()) {
+        op_degenerated_fail();
+        return;
+      }
+
+    case _degenerated_mark:
+      op_final_mark();
+      if (cancelled_gc()) {
+        op_degenerated_fail();
+        return;
+      }
+
+      op_cleanup();
+
+    case _degenerated_evac:
+      // If heuristics thinks we should do the cycle, this flag would be set,
+      // and we can do evacuation. Otherwise, it would be the shortcut cycle.
+      if (is_evacuation_in_progress()) {
+
+        // Degeneration under oom-evac protocol might have left some objects in
+        // collection set un-evacuated. Restart evacuation from the beginning to
+        // capture all objects. For all the objects that are already evacuated,
+        // it would be a simple check, which is supposed to be fast. This is also
+        // safe to do even without degeneration, as CSet iterator is at beginning
+        // in preparation for evacuation anyway.
+        collection_set()->clear_current_index();
+
+        op_stw_evac();
+        if (cancelled_gc()) {
+          op_degenerated_fail();
+          return;
+        }
+      }
+
+      // If heuristics thinks we should do the cycle, this flag would be set,
+      // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
+      if (has_forwarded_objects()) {
+        op_init_updaterefs();
+        if (cancelled_gc()) {
+          op_degenerated_fail();
+          return;
+        }
+      }
+
+    case _degenerated_updaterefs:
+      if (has_forwarded_objects()) {
+        op_final_updaterefs();
+        if (cancelled_gc()) {
+          op_degenerated_fail();
+          return;
+        }
+      }
+
+      op_cleanup();
+      break;
+
+    default:
+      ShouldNotReachHere();
+  }
+
+  if (ShenandoahVerify) {
+    verifier()->verify_after_degenerated();
+  }
+
+  if (VerifyAfterGC) {
+    Universe::verify();
+  }
+
+  metrics.snap_after();
+  metrics.print();
+
+  // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
+  // because that probably means the heap is overloaded and/or fragmented.
+  if (!metrics.is_good_progress("Degenerated GC")) {
+    _progress_last_gc.unset();
+    cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
+    op_degenerated_futile();
+  } else {
+    _progress_last_gc.set();
+  }
+}
+
+void ShenandoahHeap::op_degenerated_fail() {
+  log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
+  shenandoah_policy()->record_degenerated_upgrade_to_full();
+  op_full(GCCause::_shenandoah_upgrade_to_full_gc);
+}
+
+void ShenandoahHeap::op_degenerated_futile() {
+  shenandoah_policy()->record_degenerated_upgrade_to_full();
+  op_full(GCCause::_shenandoah_upgrade_to_full_gc);
+}
+
+void ShenandoahHeap::stop_concurrent_marking() {
+  assert(is_concurrent_mark_in_progress(), "How else could we get here?");
+  if (!cancelled_gc()) {
+    // If we needed to update refs, and concurrent marking has been cancelled,
+    // we need to finish updating references.
+    set_has_forwarded_objects(false);
+    mark_complete_marking_context();
+  }
+  set_concurrent_mark_in_progress(false);
+}
+
+void ShenandoahHeap::force_satb_flush_all_threads() {
+  if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
+    // No need to flush SATBs
+    return;
+  }
+
+  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+    ShenandoahThreadLocalData::set_force_satb_flush(t, true);
+  }
+  // The threads are not "acquiring" their thread-local data, but it does not
+  // hurt to "release" the updates here anyway.
+  OrderAccess::fence();
+}
+
+void ShenandoahHeap::set_gc_state_all_threads(char state) {
+  for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+    ShenandoahThreadLocalData::set_gc_state(t, state);
+  }
+}
+
+void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
+  _gc_state.set_cond(mask, value);
+  set_gc_state_all_threads(_gc_state.raw_value());
+}
+
+void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
+  set_gc_state_mask(MARKING, in_progress);
+  ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
+}
+
+void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
+   set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
+   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
+}
+
+void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
+  set_gc_state_mask(EVACUATION, in_progress);
+}
+
+HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
+  // Initialize Brooks pointer for the next object
+  HeapWord* result = obj + ShenandoahBrooksPointer::word_size();
+  ShenandoahBrooksPointer::initialize(oop(result));
+  return result;
+}
+
+ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
+  _mark_context(ShenandoahHeap::heap()->marking_context()) {
+}
+
+ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
+  _mark_context(ShenandoahHeap::heap()->marking_context()) {
+}
+
+bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
+  if (CompressedOops::is_null(obj)) {
+    return false;
+  }
+  obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+  shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress() || ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
+  return _mark_context->is_marked(obj);
+}
+
+bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
+  if (CompressedOops::is_null(obj)) {
+    return false;
+  }
+  shenandoah_assert_not_forwarded(NULL, obj);
+  return _mark_context->is_marked(obj);
+}
+
+void ShenandoahHeap::ref_processing_init() {
+  assert(_max_workers > 0, "Sanity");
+
+  _ref_processor =
+    new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
+                           ParallelRefProcEnabled,  // MT processing
+                           _max_workers,            // Degree of MT processing
+                           true,                    // MT discovery
+                           _max_workers,            // Degree of MT discovery
+                           false,                   // Reference discovery is not atomic
+                           NULL,                    // No closure, should be installed before use
+                           true);                   // Scale worker threads
+
+  shenandoah_assert_rp_isalive_not_installed();
+}
+
+GCTracer* ShenandoahHeap::tracer() {
+  return shenandoah_policy()->tracer();
+}
+
+size_t ShenandoahHeap::tlab_used(Thread* thread) const {
+  return _free_set->used();
+}
+
+void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
+  if (try_cancel_gc()) {
+    FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
+    log_info(gc)("%s", msg.buffer());
+    Events::log(Thread::current(), "%s", msg.buffer());
+  }
+}
+
+uint ShenandoahHeap::max_workers() {
+  return _max_workers;
+}
+
+void ShenandoahHeap::stop() {
+  // The shutdown sequence should be able to terminate when GC is running.
+
+  // Step 0. Notify policy to disable event recording.
+  _shenandoah_policy->record_shutdown();
+
+  // Step 1. Notify control thread that we are in shutdown.
+  // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
+  // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
+  control_thread()->prepare_for_graceful_shutdown();
+
+  // Step 2. Notify GC workers that we are cancelling GC.
+  cancel_gc(GCCause::_shenandoah_stop_vm);
+
+  // Step 3. Wait until GC worker exits normally.
+  control_thread()->stop();
+
+  // Step 4. Stop String Dedup thread if it is active
+  if (ShenandoahStringDedup::is_enabled()) {
+    ShenandoahStringDedup::stop();
+  }
+}
+
+void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
+  assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");
+
+  ShenandoahGCPhase root_phase(full_gc ?
+                               ShenandoahPhaseTimings::full_gc_purge :
+                               ShenandoahPhaseTimings::purge);
+
+  ShenandoahIsAliveSelector alive;
+  BoolObjectClosure* is_alive = alive.is_alive_closure();
+
+  bool purged_class;
+
+  // Unload classes and purge SystemDictionary.
+  {
+    ShenandoahGCPhase phase(full_gc ?
+                            ShenandoahPhaseTimings::full_gc_purge_class_unload :
+                            ShenandoahPhaseTimings::purge_class_unload);
+    purged_class = SystemDictionary::do_unloading(gc_timer());
+  }
+
+  {
+    ShenandoahGCPhase phase(full_gc ?
+                            ShenandoahPhaseTimings::full_gc_purge_par :
+                            ShenandoahPhaseTimings::purge_par);
+    uint active = _workers->active_workers();
+    StringDedupUnlinkOrOopsDoClosure dedup_cl(is_alive, NULL);
+    ParallelCleaningTask unlink_task(is_alive, &dedup_cl, active, purged_class);
+    _workers->run_task(&unlink_task);
+  }
+
+  if (ShenandoahStringDedup::is_enabled()) {
+    ShenandoahGCPhase phase(full_gc ?
+                            ShenandoahPhaseTimings::full_gc_purge_string_dedup :
+                            ShenandoahPhaseTimings::purge_string_dedup);
+    ShenandoahStringDedup::parallel_cleanup();
+  }
+
+  {
+    ShenandoahGCPhase phase(full_gc ?
+                      ShenandoahPhaseTimings::full_gc_purge_cldg :
+                      ShenandoahPhaseTimings::purge_cldg);
+    ClassLoaderDataGraph::purge();
+  }
+}
+
+void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
+  set_gc_state_mask(HAS_FORWARDED, cond);
+}
+
+void ShenandoahHeap::set_process_references(bool pr) {
+  _process_references.set_cond(pr);
+}
+
+void ShenandoahHeap::set_unload_classes(bool uc) {
+  _unload_classes.set_cond(uc);
+}
+
+bool ShenandoahHeap::process_references() const {
+  return _process_references.is_set();
+}
+
+bool ShenandoahHeap::unload_classes() const {
+  return _unload_classes.is_set();
+}
+
+address ShenandoahHeap::in_cset_fast_test_addr() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  assert(heap->collection_set() != NULL, "Sanity");
+  return (address) heap->collection_set()->biased_map_address();
+}
+
+address ShenandoahHeap::cancelled_gc_addr() {
+  return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
+}
+
+address ShenandoahHeap::gc_state_addr() {
+  return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
+}
+
+size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
+  return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
+}
+
+void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
+  OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
+}
+
+void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
+  _degenerated_gc_in_progress.set_cond(in_progress);
+}
+
+void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
+  _full_gc_in_progress.set_cond(in_progress);
+}
+
+void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
+  assert (is_full_gc_in_progress(), "should be");
+  _full_gc_move_in_progress.set_cond(in_progress);
+}
+
+void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
+  set_gc_state_mask(UPDATEREFS, in_progress);
+}
+
+void ShenandoahHeap::register_nmethod(nmethod* nm) {
+  ShenandoahCodeRoots::add_nmethod(nm);
+}
+
+void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
+  ShenandoahCodeRoots::remove_nmethod(nm);
+}
+
+oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
+  o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
+  ShenandoahHeapLocker locker(lock());
+  heap_region_containing(o)->make_pinned();
+  return o;
+}
+
+void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
+  o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
+  ShenandoahHeapLocker locker(lock());
+  heap_region_containing(o)->make_unpinned();
+}
+
+GCTimer* ShenandoahHeap::gc_timer() const {
+  return _gc_timer;
+}
+
+#ifdef ASSERT
+void ShenandoahHeap::assert_gc_workers(uint nworkers) {
+  assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
+
+  if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
+    if (UseDynamicNumberOfGCThreads ||
+        (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
+      assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
+    } else {
+      // Use ParallelGCThreads inside safepoints
+      assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
+    }
+  } else {
+    if (UseDynamicNumberOfGCThreads ||
+        (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
+      assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
+    } else {
+      // Use ConcGCThreads outside safepoints
+      assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
+    }
+  }
+}
+#endif
+
+ShenandoahVerifier* ShenandoahHeap::verifier() {
+  guarantee(ShenandoahVerify, "Should be enabled");
+  assert (_verifier != NULL, "sanity");
+  return _verifier;
+}
+
+template<class T>
+class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
+private:
+  T cl;
+  ShenandoahHeap* _heap;
+  ShenandoahRegionIterator* _regions;
+  bool _concurrent;
+public:
+  ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
+    AbstractGangTask("Concurrent Update References Task"),
+    cl(T()),
+    _heap(ShenandoahHeap::heap()),
+    _regions(regions),
+    _concurrent(concurrent) {
+  }
+
+  void work(uint worker_id) {
+    if (_concurrent) {
+      ShenandoahConcurrentWorkerSession worker_session(worker_id);
+      ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
+      do_work();
+    } else {
+      ShenandoahParallelWorkerSession worker_session(worker_id);
+      do_work();
+    }
+  }
+
+private:
+  void do_work() {
+    ShenandoahHeapRegion* r = _regions->next();
+    ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
+    while (r != NULL) {
+      HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
+      assert (top_at_start_ur >= r->bottom(), "sanity");
+      if (r->is_active() && !r->is_cset()) {
+        _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
+      }
+      if (ShenandoahPacing) {
+        _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
+      }
+      if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
+        return;
+      }
+      r = _regions->next();
+    }
+  }
+};
+
+void ShenandoahHeap::update_heap_references(bool concurrent) {
+  ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
+  workers()->run_task(&task);
+}
+
+void ShenandoahHeap::op_init_updaterefs() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
+
+  set_evacuation_in_progress(false);
+
+  retire_and_reset_gclabs();
+
+  if (ShenandoahVerify) {
+    verifier()->verify_before_updaterefs();
+  }
+
+  set_update_refs_in_progress(true);
+  make_parsable(true);
+  for (uint i = 0; i < num_regions(); i++) {
+    ShenandoahHeapRegion* r = get_region(i);
+    r->set_concurrent_iteration_safe_limit(r->top());
+  }
+
+  // Reset iterator.
+  _update_refs_iterator.reset();
+
+  if (ShenandoahPacing) {
+    pacer()->setup_for_updaterefs();
+  }
+}
+
+void ShenandoahHeap::op_final_updaterefs() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
+
+  // Check if there is left-over work, and finish it
+  if (_update_refs_iterator.has_next()) {
+    ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
+
+    // Finish updating references where we left off.
+    clear_cancelled_gc();
+    update_heap_references(false);
+  }
+
+  // Clear cancelled GC, if set. On cancellation path, the block before would handle
+  // everything. On degenerated paths, cancelled gc would not be set anyway.
+  if (cancelled_gc()) {
+    clear_cancelled_gc();
+  }
+  assert(!cancelled_gc(), "Should have been done right before");
+
+  concurrent_mark()->update_roots(is_degenerated_gc_in_progress() ?
+                                 ShenandoahPhaseTimings::degen_gc_update_roots:
+                                 ShenandoahPhaseTimings::final_update_refs_roots);
+
+  ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
+
+  trash_cset_regions();
+  set_has_forwarded_objects(false);
+  set_update_refs_in_progress(false);
+
+  if (ShenandoahVerify) {
+    verifier()->verify_after_updaterefs();
+  }
+
+  if (VerifyAfterGC) {
+    Universe::verify();
+  }
+
+  {
+    ShenandoahHeapLocker locker(lock());
+    _free_set->rebuild();
+  }
+}
+
+#ifdef ASSERT
+void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
+  _lock.assert_owned_by_current_thread();
+}
+
+void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
+  _lock.assert_not_owned_by_current_thread();
+}
+
+void ShenandoahHeap::assert_heaplock_or_safepoint() {
+  _lock.assert_owned_by_current_thread_or_safepoint();
+}
+#endif
+
+void ShenandoahHeap::print_extended_on(outputStream *st) const {
+  print_on(st);
+  print_heap_regions_on(st);
+}
+
+bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
+  size_t slice = r->region_number() / _bitmap_regions_per_slice;
+
+  size_t regions_from = _bitmap_regions_per_slice * slice;
+  size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
+  for (size_t g = regions_from; g < regions_to; g++) {
+    assert (g / _bitmap_regions_per_slice == slice, "same slice");
+    if (skip_self && g == r->region_number()) continue;
+    if (get_region(g)->is_committed()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
+  assert_heaplock_owned_by_current_thread();
+
+  if (is_bitmap_slice_committed(r, true)) {
+    // Some other region from the group is already committed, meaning the bitmap
+    // slice is already committed, we exit right away.
+    return true;
+  }
+
+  // Commit the bitmap slice:
+  size_t slice = r->region_number() / _bitmap_regions_per_slice;
+  size_t off = _bitmap_bytes_per_slice * slice;
+  size_t len = _bitmap_bytes_per_slice;
+  if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
+    return false;
+  }
+  return true;
+}
+
+bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
+  assert_heaplock_owned_by_current_thread();
+
+  if (is_bitmap_slice_committed(r, true)) {
+    // Some other region from the group is still committed, meaning the bitmap
+    // slice is should stay committed, exit right away.
+    return true;
+  }
+
+  // Uncommit the bitmap slice:
+  size_t slice = r->region_number() / _bitmap_regions_per_slice;
+  size_t off = _bitmap_bytes_per_slice * slice;
+  size_t len = _bitmap_bytes_per_slice;
+  if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
+    return false;
+  }
+  return true;
+}
+
+void ShenandoahHeap::safepoint_synchronize_begin() {
+  if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
+    SuspendibleThreadSet::synchronize();
+  }
+}
+
+void ShenandoahHeap::safepoint_synchronize_end() {
+  if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
+    SuspendibleThreadSet::desynchronize();
+  }
+}
+
+void ShenandoahHeap::vmop_entry_init_mark() {
+  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
+
+  try_inject_alloc_failure();
+  VM_ShenandoahInitMark op;
+  VMThread::execute(&op); // jump to entry_init_mark() under safepoint
+}
+
+void ShenandoahHeap::vmop_entry_final_mark() {
+  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
+
+  try_inject_alloc_failure();
+  VM_ShenandoahFinalMarkStartEvac op;
+  VMThread::execute(&op); // jump to entry_final_mark under safepoint
+}
+
+void ShenandoahHeap::vmop_entry_final_evac() {
+  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
+
+  VM_ShenandoahFinalEvac op;
+  VMThread::execute(&op); // jump to entry_final_evac under safepoint
+}
+
+void ShenandoahHeap::vmop_entry_init_updaterefs() {
+  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
+
+  try_inject_alloc_failure();
+  VM_ShenandoahInitUpdateRefs op;
+  VMThread::execute(&op);
+}
+
+void ShenandoahHeap::vmop_entry_final_updaterefs() {
+  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
+
+  try_inject_alloc_failure();
+  VM_ShenandoahFinalUpdateRefs op;
+  VMThread::execute(&op);
+}
+
+void ShenandoahHeap::vmop_entry_init_traversal() {
+  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
+
+  try_inject_alloc_failure();
+  VM_ShenandoahInitTraversalGC op;
+  VMThread::execute(&op);
+}
+
+void ShenandoahHeap::vmop_entry_final_traversal() {
+  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
+
+  try_inject_alloc_failure();
+  VM_ShenandoahFinalTraversalGC op;
+  VMThread::execute(&op);
+}
+
+void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
+  TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
+
+  try_inject_alloc_failure();
+  VM_ShenandoahFullGC op(cause);
+  VMThread::execute(&op);
+}
+
+void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
+  TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
+  ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
+
+  VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
+  VMThread::execute(&degenerated_gc);
+}
+
+void ShenandoahHeap::entry_init_mark() {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
+  const char* msg = init_mark_event_message();
+  GCTraceTime(Info, gc) time(msg, gc_timer());
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
+                              "init marking");
+
+  op_init_mark();
+}
+
+void ShenandoahHeap::entry_final_mark() {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
+  const char* msg = final_mark_event_message();
+  GCTraceTime(Info, gc) time(msg, gc_timer());
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
+                              "final marking");
+
+  op_final_mark();
+}
+
+void ShenandoahHeap::entry_final_evac() {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
+  static const char* msg = "Pause Final Evac";
+  GCTraceTime(Info, gc) time(msg, gc_timer());
+  EventMark em("%s", msg);
+
+  op_final_evac();
+}
+
+void ShenandoahHeap::entry_init_updaterefs() {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
+
+  static const char* msg = "Pause Init Update Refs";
+  GCTraceTime(Info, gc) time(msg, gc_timer());
+  EventMark em("%s", msg);
+
+  // No workers used in this phase, no setup required
+
+  op_init_updaterefs();
+}
+
+void ShenandoahHeap::entry_final_updaterefs() {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
+
+  static const char* msg = "Pause Final Update Refs";
+  GCTraceTime(Info, gc) time(msg, gc_timer());
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
+                              "final reference update");
+
+  op_final_updaterefs();
+}
+
+void ShenandoahHeap::entry_init_traversal() {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
+
+  static const char* msg = "Pause Init Traversal";
+  GCTraceTime(Info, gc) time(msg, gc_timer());
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
+                              "init traversal");
+
+  op_init_traversal();
+}
+
+void ShenandoahHeap::entry_final_traversal() {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
+
+  static const char* msg = "Pause Final Traversal";
+  GCTraceTime(Info, gc) time(msg, gc_timer());
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
+                              "final traversal");
+
+  op_final_traversal();
+}
+
+void ShenandoahHeap::entry_full(GCCause::Cause cause) {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
+
+  static const char* msg = "Pause Full";
+  GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
+                              "full gc");
+
+  op_full(cause);
+}
+
+void ShenandoahHeap::entry_degenerated(int point) {
+  ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
+
+  ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
+  const char* msg = degen_event_message(dpoint);
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
+                              "stw degenerated gc");
+
+  set_degenerated_gc_in_progress(true);
+  op_degenerated(dpoint);
+  set_degenerated_gc_in_progress(false);
+}
+
+void ShenandoahHeap::entry_mark() {
+  TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
+
+  const char* msg = conc_mark_event_message();
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
+                              "concurrent marking");
+
+  try_inject_alloc_failure();
+  op_mark();
+}
+
+void ShenandoahHeap::entry_evac() {
+  ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
+  TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
+
+  static const char* msg = "Concurrent evacuation";
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
+                              "concurrent evacuation");
+
+  try_inject_alloc_failure();
+  op_conc_evac();
+}
+
+void ShenandoahHeap::entry_updaterefs() {
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
+
+  static const char* msg = "Concurrent update references";
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
+                              "concurrent reference update");
+
+  try_inject_alloc_failure();
+  op_updaterefs();
+}
+void ShenandoahHeap::entry_cleanup() {
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
+
+  static const char* msg = "Concurrent cleanup";
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  // This phase does not use workers, no need for setup
+
+  try_inject_alloc_failure();
+  op_cleanup();
+}
+
+void ShenandoahHeap::entry_reset() {
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
+
+  static const char* msg = "Concurrent reset";
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
+                              "concurrent reset");
+
+  try_inject_alloc_failure();
+  op_reset();
+}
+
+void ShenandoahHeap::entry_preclean() {
+  if (ShenandoahPreclean && process_references()) {
+    static const char* msg = "Concurrent precleaning";
+    GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+    EventMark em("%s", msg);
+
+    ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
+
+    ShenandoahWorkerScope scope(workers(),
+                                ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
+                                "concurrent preclean",
+                                /* check_workers = */ false);
+
+    try_inject_alloc_failure();
+    op_preclean();
+  }
+}
+
+void ShenandoahHeap::entry_traversal() {
+  static const char* msg = "Concurrent traversal";
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
+
+  ShenandoahWorkerScope scope(workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
+                              "concurrent traversal");
+
+  try_inject_alloc_failure();
+  op_traversal();
+}
+
+void ShenandoahHeap::entry_uncommit(double shrink_before) {
+  static const char *msg = "Concurrent uncommit";
+  GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
+  EventMark em("%s", msg);
+
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
+
+  op_uncommit(shrink_before);
+}
+
+void ShenandoahHeap::try_inject_alloc_failure() {
+  if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
+    _inject_alloc_failure.set();
+    os::naked_short_sleep(1);
+    if (cancelled_gc()) {
+      log_info(gc)("Allocation failure was successfully injected");
+    }
+  }
+}
+
+bool ShenandoahHeap::should_inject_alloc_failure() {
+  return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
+}
+
+void ShenandoahHeap::initialize_serviceability() {
+  _memory_pool = new ShenandoahMemoryPool(this);
+  _cycle_memory_manager.add_pool(_memory_pool);
+  _stw_memory_manager.add_pool(_memory_pool);
+}
+
+GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
+  GrowableArray<GCMemoryManager*> memory_managers(2);
+  memory_managers.append(&_cycle_memory_manager);
+  memory_managers.append(&_stw_memory_manager);
+  return memory_managers;
+}
+
+GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
+  GrowableArray<MemoryPool*> memory_pools(1);
+  memory_pools.append(_memory_pool);
+  return memory_pools;
+}
+
+void ShenandoahHeap::enter_evacuation() {
+  _oom_evac_handler.enter_evacuation();
+}
+
+void ShenandoahHeap::leave_evacuation() {
+  _oom_evac_handler.leave_evacuation();
+}
+
+ShenandoahRegionIterator::ShenandoahRegionIterator() :
+  _heap(ShenandoahHeap::heap()),
+  _index(0) {}
+
+ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
+  _heap(heap),
+  _index(0) {}
+
+void ShenandoahRegionIterator::reset() {
+  _index = 0;
+}
+
+bool ShenandoahRegionIterator::has_next() const {
+  return _index < _heap->num_regions();
+}
+
+char ShenandoahHeap::gc_state() const {
+  return _gc_state.raw_value();
+}
+
+void ShenandoahHeap::deduplicate_string(oop str) {
+  assert(java_lang_String::is_instance(str), "invariant");
+
+  if (ShenandoahStringDedup::is_enabled()) {
+    ShenandoahStringDedup::deduplicate(str);
+  }
+}
+
+const char* ShenandoahHeap::init_mark_event_message() const {
+  bool update_refs = has_forwarded_objects();
+  bool proc_refs = process_references();
+  bool unload_cls = unload_classes();
+
+  if (update_refs && proc_refs && unload_cls) {
+    return "Pause Init Mark (update refs) (process weakrefs) (unload classes)";
+  } else if (update_refs && proc_refs) {
+    return "Pause Init Mark (update refs) (process weakrefs)";
+  } else if (update_refs && unload_cls) {
+    return "Pause Init Mark (update refs) (unload classes)";
+  } else if (proc_refs && unload_cls) {
+    return "Pause Init Mark (process weakrefs) (unload classes)";
+  } else if (update_refs) {
+    return "Pause Init Mark (update refs)";
+  } else if (proc_refs) {
+    return "Pause Init Mark (process weakrefs)";
+  } else if (unload_cls) {
+    return "Pause Init Mark (unload classes)";
+  } else {
+    return "Pause Init Mark";
+  }
+}
+
+const char* ShenandoahHeap::final_mark_event_message() const {
+  bool update_refs = has_forwarded_objects();
+  bool proc_refs = process_references();
+  bool unload_cls = unload_classes();
+
+  if (update_refs && proc_refs && unload_cls) {
+    return "Pause Final Mark (update refs) (process weakrefs) (unload classes)";
+  } else if (update_refs && proc_refs) {
+    return "Pause Final Mark (update refs) (process weakrefs)";
+  } else if (update_refs && unload_cls) {
+    return "Pause Final Mark (update refs) (unload classes)";
+  } else if (proc_refs && unload_cls) {
+    return "Pause Final Mark (process weakrefs) (unload classes)";
+  } else if (update_refs) {
+    return "Pause Final Mark (update refs)";
+  } else if (proc_refs) {
+    return "Pause Final Mark (process weakrefs)";
+  } else if (unload_cls) {
+    return "Pause Final Mark (unload classes)";
+  } else {
+    return "Pause Final Mark";
+  }
+}
+
+const char* ShenandoahHeap::conc_mark_event_message() const {
+  bool update_refs = has_forwarded_objects();
+  bool proc_refs = process_references();
+  bool unload_cls = unload_classes();
+
+  if (update_refs && proc_refs && unload_cls) {
+    return "Concurrent marking (update refs) (process weakrefs) (unload classes)";
+  } else if (update_refs && proc_refs) {
+    return "Concurrent marking (update refs) (process weakrefs)";
+  } else if (update_refs && unload_cls) {
+    return "Concurrent marking (update refs) (unload classes)";
+  } else if (proc_refs && unload_cls) {
+    return "Concurrent marking (process weakrefs) (unload classes)";
+  } else if (update_refs) {
+    return "Concurrent marking (update refs)";
+  } else if (proc_refs) {
+    return "Concurrent marking (process weakrefs)";
+  } else if (unload_cls) {
+    return "Concurrent marking (unload classes)";
+  } else {
+    return "Concurrent marking";
+  }
+}
+
+const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
+  switch (point) {
+    case _degenerated_unset:
+      return "Pause Degenerated GC (<UNSET>)";
+    case _degenerated_traversal:
+      return "Pause Degenerated GC (Traversal)";
+    case _degenerated_outside_cycle:
+      return "Pause Degenerated GC (Outside of Cycle)";
+    case _degenerated_mark:
+      return "Pause Degenerated GC (Mark)";
+    case _degenerated_evac:
+      return "Pause Degenerated GC (Evacuation)";
+    case _degenerated_updaterefs:
+      return "Pause Degenerated GC (Update Refs)";
+    default:
+      ShouldNotReachHere();
+      return "ERROR";
+  }
+}
+
+jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
+#ifdef ASSERT
+  assert(worker_id < _max_workers, "sanity");
+  for (uint i = 0; i < num_regions(); i++) {
+    assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
+  }
+#endif
+  return _liveness_cache[worker_id];
+}
+
+void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
+  assert(worker_id < _max_workers, "sanity");
+  jushort* ld = _liveness_cache[worker_id];
+  for (uint i = 0; i < num_regions(); i++) {
+    ShenandoahHeapRegion* r = get_region(i);
+    jushort live = ld[i];
+    if (live > 0) {
+      r->increase_live_data_gc_words(live);
+      ld[i] = 0;
+    }
+  }
+}
+
+size_t ShenandoahHeap::obj_size(oop obj) const {
+  return CollectedHeap::obj_size(obj) + ShenandoahBrooksPointer::word_size();
+}
+
+ptrdiff_t ShenandoahHeap::cell_header_size() const {
+  return ShenandoahBrooksPointer::byte_size();
+}
+
+BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
+  return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
+                                                         : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,757 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
+
+#include "gc/shared/markBitMap.hpp"
+#include "gc/shared/softRefPolicy.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahAllocRequest.hpp"
+#include "gc/shenandoah/shenandoahHeapLock.hpp"
+#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
+#include "gc/shenandoah/shenandoahSharedVariables.hpp"
+#include "services/memoryManager.hpp"
+
+class ConcurrentGCTimer;
+class ReferenceProcessor;
+class ShenandoahAllocTracker;
+class ShenandoahCollectorPolicy;
+class ShenandoahControlThread;
+class ShenandoahGCSession;
+class ShenandoahHeuristics;
+class ShenandoahMarkingContext;
+class ShenandoahPhaseTimings;
+class ShenandoahHeap;
+class ShenandoahHeapRegion;
+class ShenandoahHeapRegionClosure;
+class ShenandoahCollectionSet;
+class ShenandoahFreeSet;
+class ShenandoahConcurrentMark;
+class ShenandoahMarkCompact;
+class ShenandoahMonitoringSupport;
+class ShenandoahPacer;
+class ShenandoahTraversalGC;
+class ShenandoahVerifier;
+class ShenandoahWorkGang;
+class VMStructs;
+
+class ShenandoahRegionIterator : public StackObj {
+private:
+  ShenandoahHeap* _heap;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
+  volatile size_t _index;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  // No implicit copying: iterators should be passed by reference to capture the state
+  ShenandoahRegionIterator(const ShenandoahRegionIterator& that);
+  ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o);
+
+public:
+  ShenandoahRegionIterator();
+  ShenandoahRegionIterator(ShenandoahHeap* heap);
+
+  // Reset iterator to default state
+  void reset();
+
+  // Returns next region, or NULL if there are no more regions.
+  // This is multi-thread-safe.
+  inline ShenandoahHeapRegion* next();
+
+  // This is *not* MT safe. However, in the absence of multithreaded access, it
+  // can be used to determine if there is more work to do.
+  bool has_next() const;
+};
+
+class ShenandoahHeapRegionClosure : public StackObj {
+public:
+  virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
+  virtual bool is_thread_safe() { return false; }
+};
+
+class ShenandoahUpdateRefsClosure: public OopClosure {
+private:
+  ShenandoahHeap* _heap;
+
+  template <class T>
+  inline void do_oop_work(T* p);
+
+public:
+  ShenandoahUpdateRefsClosure();
+  inline void do_oop(oop* p);
+  inline void do_oop(narrowOop* p);
+};
+
+#ifdef ASSERT
+class ShenandoahAssertToSpaceClosure : public OopClosure {
+private:
+  template <class T>
+  void do_oop_work(T* p);
+public:
+  void do_oop(narrowOop* p);
+  void do_oop(oop* p);
+};
+#endif
+
+class ShenandoahAlwaysTrueClosure : public BoolObjectClosure {
+public:
+  bool do_object_b(oop p) { return true; }
+};
+
+class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure {
+private:
+  ShenandoahMarkingContext* const _mark_context;
+public:
+  ShenandoahForwardedIsAliveClosure();
+  bool do_object_b(oop obj);
+};
+
+class ShenandoahIsAliveClosure: public BoolObjectClosure {
+private:
+  ShenandoahMarkingContext* const _mark_context;
+public:
+  ShenandoahIsAliveClosure();
+  bool do_object_b(oop obj);
+};
+
+class ShenandoahIsAliveSelector : public StackObj {
+private:
+  ShenandoahIsAliveClosure _alive_cl;
+  ShenandoahForwardedIsAliveClosure _fwd_alive_cl;
+public:
+  BoolObjectClosure* is_alive_closure();
+};
+
+// Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
+// to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
+// See ShenandoahControlThread for GC cycle structure.
+//
+class ShenandoahHeap : public CollectedHeap {
+  friend class ShenandoahAsserts;
+  friend class VMStructs;
+  friend class ShenandoahGCSession;
+
+// ---------- Locks that guard important data structures in Heap
+//
+private:
+  ShenandoahHeapLock _lock;
+
+public:
+  ShenandoahHeapLock* lock() {
+    return &_lock;
+  }
+
+  void assert_heaplock_owned_by_current_thread()     PRODUCT_RETURN;
+  void assert_heaplock_not_owned_by_current_thread() PRODUCT_RETURN;
+  void assert_heaplock_or_safepoint()                PRODUCT_RETURN;
+
+// ---------- Initialization, termination, identification, printing routines
+//
+public:
+  static ShenandoahHeap* heap();
+  static ShenandoahHeap* heap_no_check();
+
+  const char* name()          const { return "Shenandoah"; }
+  ShenandoahHeap::Name kind() const { return CollectedHeap::Shenandoah; }
+
+  ShenandoahHeap(ShenandoahCollectorPolicy* policy);
+  jint initialize();
+  void post_initialize();
+  void initialize_heuristics();
+
+  void initialize_serviceability();
+
+  void print_on(outputStream* st)              const;
+  void print_extended_on(outputStream *st)     const;
+  void print_tracing_info()                    const;
+  void print_gc_threads_on(outputStream* st)   const;
+  void print_heap_regions_on(outputStream* st) const;
+
+  void stop();
+
+  void prepare_for_verify();
+  void verify(VerifyOption vo);
+
+// ---------- Heap counters and metrics
+//
+private:
+           size_t _initial_size;
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
+  volatile size_t _used;
+  volatile size_t _committed;
+  volatile size_t _bytes_allocated_since_gc_start;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+public:
+  void increase_used(size_t bytes);
+  void decrease_used(size_t bytes);
+  void set_used(size_t bytes);
+
+  void increase_committed(size_t bytes);
+  void decrease_committed(size_t bytes);
+  void increase_allocated(size_t bytes);
+
+  size_t bytes_allocated_since_gc_start();
+  void reset_bytes_allocated_since_gc_start();
+
+  size_t max_capacity()     const;
+  size_t initial_capacity() const;
+  size_t capacity()         const;
+  size_t used()             const;
+  size_t committed()        const;
+
+// ---------- Workers handling
+//
+private:
+  uint _max_workers;
+  ShenandoahWorkGang* _workers;
+  ShenandoahWorkGang* _safepoint_workers;
+
+public:
+  uint max_workers();
+  void assert_gc_workers(uint nworker) PRODUCT_RETURN;
+
+  WorkGang* workers() const;
+  WorkGang* get_safepoint_workers();
+
+  void gc_threads_do(ThreadClosure* tcl) const;
+
+// ---------- Heap regions handling machinery
+//
+private:
+  MemRegion _heap_region;
+  size_t    _num_regions;
+  ShenandoahHeapRegion** _regions;
+  ShenandoahRegionIterator _update_refs_iterator;
+
+public:
+  inline size_t num_regions() const { return _num_regions; }
+
+  inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
+  inline size_t heap_region_index_containing(const void* addr) const;
+
+  inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
+
+  void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
+  void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
+
+// ---------- GC state machinery
+//
+// GC state describes the important parts of collector state, that may be
+// used to make barrier selection decisions in the native and generated code.
+// Multiple bits can be set at once.
+//
+// Important invariant: when GC state is zero, the heap is stable, and no barriers
+// are required.
+//
+public:
+  enum GCStateBitPos {
+    // Heap has forwarded objects: need RB, ACMP, CAS barriers.
+    HAS_FORWARDED_BITPOS   = 0,
+
+    // Heap is under marking: needs SATB barriers.
+    MARKING_BITPOS    = 1,
+
+    // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
+    EVACUATION_BITPOS = 2,
+
+    // Heap is under updating: needs SVRB/SVWB barriers.
+    UPDATEREFS_BITPOS = 3,
+
+    // Heap is under traversal collection
+    TRAVERSAL_BITPOS  = 4,
+  };
+
+  enum GCState {
+    STABLE        = 0,
+    HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
+    MARKING       = 1 << MARKING_BITPOS,
+    EVACUATION    = 1 << EVACUATION_BITPOS,
+    UPDATEREFS    = 1 << UPDATEREFS_BITPOS,
+    TRAVERSAL     = 1 << TRAVERSAL_BITPOS,
+  };
+
+private:
+  ShenandoahSharedBitmap _gc_state;
+  ShenandoahSharedFlag   _degenerated_gc_in_progress;
+  ShenandoahSharedFlag   _full_gc_in_progress;
+  ShenandoahSharedFlag   _full_gc_move_in_progress;
+  ShenandoahSharedFlag   _progress_last_gc;
+
+  void set_gc_state_all_threads(char state);
+  void set_gc_state_mask(uint mask, bool value);
+
+public:
+  char gc_state() const;
+  static address gc_state_addr();
+
+  void set_concurrent_mark_in_progress(bool in_progress);
+  void set_evacuation_in_progress(bool in_progress);
+  void set_update_refs_in_progress(bool in_progress);
+  void set_degenerated_gc_in_progress(bool in_progress);
+  void set_full_gc_in_progress(bool in_progress);
+  void set_full_gc_move_in_progress(bool in_progress);
+  void set_concurrent_traversal_in_progress(bool in_progress);
+  void set_has_forwarded_objects(bool cond);
+
+  inline bool is_stable() const;
+  inline bool is_idle() const;
+  inline bool is_concurrent_mark_in_progress() const;
+  inline bool is_update_refs_in_progress() const;
+  inline bool is_evacuation_in_progress() const;
+  inline bool is_degenerated_gc_in_progress() const;
+  inline bool is_full_gc_in_progress() const;
+  inline bool is_full_gc_move_in_progress() const;
+  inline bool is_concurrent_traversal_in_progress() const;
+  inline bool has_forwarded_objects() const;
+  inline bool is_gc_in_progress_mask(uint mask) const;
+
+// ---------- GC cancellation and degeneration machinery
+//
+// Cancelled GC flag is used to notify concurrent phases that they should terminate.
+//
+public:
+  enum ShenandoahDegenPoint {
+    _degenerated_unset,
+    _degenerated_traversal,
+    _degenerated_outside_cycle,
+    _degenerated_mark,
+    _degenerated_evac,
+    _degenerated_updaterefs,
+    _DEGENERATED_LIMIT,
+  };
+
+  static const char* degen_point_to_string(ShenandoahDegenPoint point) {
+    switch (point) {
+      case _degenerated_unset:
+        return "<UNSET>";
+      case _degenerated_traversal:
+        return "Traversal";
+      case _degenerated_outside_cycle:
+        return "Outside of Cycle";
+      case _degenerated_mark:
+        return "Mark";
+      case _degenerated_evac:
+        return "Evacuation";
+      case _degenerated_updaterefs:
+        return "Update Refs";
+      default:
+        ShouldNotReachHere();
+        return "ERROR";
+    }
+  };
+
+private:
+  enum CancelState {
+    // Normal state. GC has not been cancelled and is open for cancellation.
+    // Worker threads can suspend for safepoint.
+    CANCELLABLE,
+
+    // GC has been cancelled. Worker threads can not suspend for
+    // safepoint but must finish their work as soon as possible.
+    CANCELLED,
+
+    // GC has not been cancelled and must not be cancelled. At least
+    // one worker thread checks for pending safepoint and may suspend
+    // if a safepoint is pending.
+    NOT_CANCELLED
+  };
+
+  ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
+  inline bool try_cancel_gc();
+
+public:
+  static address cancelled_gc_addr();
+
+  inline bool cancelled_gc() const;
+  inline bool check_cancelled_gc_and_yield(bool sts_active = true);
+
+  inline void clear_cancelled_gc();
+
+  void cancel_gc(GCCause::Cause cause);
+
+// ---------- GC operations entry points
+//
+public:
+  // Entry points to STW GC operations, these cause a related safepoint, that then
+  // call the entry method below
+  void vmop_entry_init_mark();
+  void vmop_entry_final_mark();
+  void vmop_entry_final_evac();
+  void vmop_entry_init_updaterefs();
+  void vmop_entry_final_updaterefs();
+  void vmop_entry_init_traversal();
+  void vmop_entry_final_traversal();
+  void vmop_entry_full(GCCause::Cause cause);
+  void vmop_degenerated(ShenandoahDegenPoint point);
+
+  // Entry methods to normally STW GC operations. These set up logging, monitoring
+  // and workers for net VM operation
+  void entry_init_mark();
+  void entry_final_mark();
+  void entry_final_evac();
+  void entry_init_updaterefs();
+  void entry_final_updaterefs();
+  void entry_init_traversal();
+  void entry_final_traversal();
+  void entry_full(GCCause::Cause cause);
+  void entry_degenerated(int point);
+
+  // Entry methods to normally concurrent GC operations. These set up logging, monitoring
+  // for concurrent operation.
+  void entry_reset();
+  void entry_mark();
+  void entry_preclean();
+  void entry_cleanup();
+  void entry_evac();
+  void entry_updaterefs();
+  void entry_traversal();
+  void entry_uncommit(double shrink_before);
+
+private:
+  // Actual work for the phases
+  void op_init_mark();
+  void op_final_mark();
+  void op_final_evac();
+  void op_init_updaterefs();
+  void op_final_updaterefs();
+  void op_init_traversal();
+  void op_final_traversal();
+  void op_full(GCCause::Cause cause);
+  void op_degenerated(ShenandoahDegenPoint point);
+  void op_degenerated_fail();
+  void op_degenerated_futile();
+
+  void op_reset();
+  void op_mark();
+  void op_preclean();
+  void op_cleanup();
+  void op_conc_evac();
+  void op_stw_evac();
+  void op_updaterefs();
+  void op_traversal();
+  void op_uncommit(double shrink_before);
+
+  // Messages for GC trace events, they have to be immortal for
+  // passing around the logging/tracing systems
+  const char* init_mark_event_message() const;
+  const char* final_mark_event_message() const;
+  const char* conc_mark_event_message() const;
+  const char* degen_event_message(ShenandoahDegenPoint point) const;
+
+// ---------- GC subsystems
+//
+private:
+  ShenandoahControlThread*   _control_thread;
+  ShenandoahCollectorPolicy* _shenandoah_policy;
+  ShenandoahHeuristics*      _heuristics;
+  ShenandoahFreeSet*         _free_set;
+  ShenandoahConcurrentMark*  _scm;
+  ShenandoahTraversalGC*     _traversal_gc;
+  ShenandoahMarkCompact*     _full_gc;
+  ShenandoahPacer*           _pacer;
+  ShenandoahVerifier*        _verifier;
+
+  ShenandoahAllocTracker*    _alloc_tracker;
+  ShenandoahPhaseTimings*    _phase_timings;
+
+  ShenandoahControlThread*   control_thread()          { return _control_thread;    }
+  ShenandoahMarkCompact*     full_gc()                 { return _full_gc;           }
+
+public:
+  ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
+  ShenandoahHeuristics*      heuristics()        const { return _heuristics;        }
+  ShenandoahFreeSet*         free_set()          const { return _free_set;          }
+  ShenandoahConcurrentMark*  concurrent_mark()         { return _scm;               }
+  ShenandoahTraversalGC*     traversal_gc()            { return _traversal_gc;      }
+  ShenandoahPacer*           pacer() const             { return _pacer;             }
+
+  ShenandoahPhaseTimings*    phase_timings()     const { return _phase_timings;     }
+  ShenandoahAllocTracker*    alloc_tracker()     const { return _alloc_tracker;     }
+
+  ShenandoahVerifier*        verifier();
+
+// ---------- VM subsystem bindings
+//
+private:
+  ShenandoahMonitoringSupport* _monitoring_support;
+  MemoryPool*                  _memory_pool;
+  GCMemoryManager              _stw_memory_manager;
+  GCMemoryManager              _cycle_memory_manager;
+  ConcurrentGCTimer*           _gc_timer;
+  SoftRefPolicy                _soft_ref_policy;
+
+public:
+  ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support;    }
+  GCMemoryManager* cycle_memory_manager()           { return &_cycle_memory_manager; }
+  GCMemoryManager* stw_memory_manager()             { return &_stw_memory_manager;   }
+  SoftRefPolicy* soft_ref_policy()                  { return &_soft_ref_policy;      }
+
+  GrowableArray<GCMemoryManager*> memory_managers();
+  GrowableArray<MemoryPool*> memory_pools();
+  GCTracer* tracer();
+  GCTimer* gc_timer() const;
+  CollectorPolicy* collector_policy() const;
+
+// ---------- Reference processing
+//
+private:
+  AlwaysTrueClosure    _subject_to_discovery;
+  ReferenceProcessor*  _ref_processor;
+  ShenandoahSharedFlag _process_references;
+
+  void ref_processing_init();
+
+public:
+  ReferenceProcessor* ref_processor() { return _ref_processor; }
+  void set_process_references(bool pr);
+  bool process_references() const;
+
+// ---------- Class Unloading
+//
+private:
+  ShenandoahSharedFlag _unload_classes;
+
+public:
+  void set_unload_classes(bool uc);
+  bool unload_classes() const;
+
+  // Delete entries for dead interned string and clean up unreferenced symbols
+  // in symbol table, possibly in parallel.
+  void unload_classes_and_cleanup_tables(bool full_gc);
+
+// ---------- Generic interface hooks
+// Minor things that super-interface expects us to implement to play nice with
+// the rest of runtime. Some of the things here are not required to be implemented,
+// and can be stubbed out.
+//
+public:
+  AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL);
+  bool is_maximal_no_gc() const shenandoah_not_implemented_return(false);
+
+  bool is_in(const void* p) const;
+
+  size_t obj_size(oop obj) const;
+  virtual ptrdiff_t cell_header_size() const;
+
+  // All objects can potentially move
+  bool is_scavengable(oop obj) { return true; };
+
+  void collect(GCCause::Cause cause);
+  void do_full_collection(bool clear_all_soft_refs);
+
+  // Used for parsing heap during error printing
+  HeapWord* block_start(const void* addr) const;
+  size_t block_size(const HeapWord* addr) const;
+  bool block_is_obj(const HeapWord* addr) const;
+
+  // Used for native heap walkers: heap dumpers, mostly
+  void object_iterate(ObjectClosure* cl);
+  void safe_object_iterate(ObjectClosure* cl);
+
+  // Used by RMI
+  jlong millis_since_last_gc();
+
+// ---------- Safepoint interface hooks
+//
+public:
+  void safepoint_synchronize_begin();
+  void safepoint_synchronize_end();
+
+// ---------- Code roots handling hooks
+//
+public:
+  void register_nmethod(nmethod* nm);
+  void unregister_nmethod(nmethod* nm);
+
+// ---------- Pinning hooks
+//
+public:
+  // Shenandoah supports per-object (per-region) pinning
+  bool supports_object_pinning() const { return true; }
+
+  oop pin_object(JavaThread* thread, oop obj);
+  void unpin_object(JavaThread* thread, oop obj);
+
+// ---------- Allocation support
+//
+private:
+  HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
+  inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
+  HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
+  HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
+  void retire_and_reset_gclabs();
+
+public:
+  HeapWord* allocate_memory(ShenandoahAllocRequest& request);
+  HeapWord* mem_allocate(size_t size, bool* what);
+  MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
+                                               size_t size,
+                                               Metaspace::MetadataType mdtype);
+
+  oop obj_allocate(Klass* klass, int size, TRAPS);
+  oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS);
+  oop class_allocate(Klass* klass, int size, TRAPS);
+
+  void notify_mutator_alloc_words(size_t words, bool waste);
+
+  // Shenandoah supports TLAB allocation
+  bool supports_tlab_allocation() const { return true; }
+
+  HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size);
+  size_t tlab_capacity(Thread *thr) const;
+  size_t unsafe_max_tlab_alloc(Thread *thread) const;
+  size_t max_tlab_size() const;
+  size_t tlab_used(Thread* ignored) const;
+
+  HeapWord* tlab_post_allocation_setup(HeapWord* obj);
+  void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
+  size_t min_dummy_object_size() const;
+
+  void resize_tlabs();
+
+  void ensure_parsability(bool retire_tlabs);
+  void make_parsable(bool retire_tlabs);
+
+// ---------- Marking support
+//
+private:
+  ShenandoahMarkingContext* _marking_context;
+  MemRegion  _bitmap_region;
+  MemRegion  _aux_bitmap_region;
+  MarkBitMap _verification_bit_map;
+  MarkBitMap _aux_bit_map;
+
+  size_t _bitmap_size;
+  size_t _bitmap_regions_per_slice;
+  size_t _bitmap_bytes_per_slice;
+
+  // Used for buffering per-region liveness data.
+  // Needed since ShenandoahHeapRegion uses atomics to update liveness.
+  //
+  // The array has max-workers elements, each of which is an array of
+  // jushort * max_regions. The choice of jushort is not accidental:
+  // there is a tradeoff between static/dynamic footprint that translates
+  // into cache pressure (which is already high during marking), and
+  // too many atomic updates. size_t/jint is too large, jbyte is too small.
+  jushort** _liveness_cache;
+
+public:
+  inline ShenandoahMarkingContext* complete_marking_context() const;
+  inline ShenandoahMarkingContext* marking_context() const;
+  inline void mark_complete_marking_context();
+  inline void mark_incomplete_marking_context();
+
+  template<class T>
+  inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
+
+  template<class T>
+  inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
+
+  template<class T>
+  inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
+
+  void reset_mark_bitmap();
+
+  // SATB barriers hooks
+  inline bool requires_marking(const void* entry) const;
+  void force_satb_flush_all_threads();
+
+  // Support for bitmap uncommits
+  bool commit_bitmap_slice(ShenandoahHeapRegion *r);
+  bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
+  bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
+
+  // Liveness caching support
+  jushort* get_liveness_cache(uint worker_id);
+  void flush_liveness_cache(uint worker_id);
+
+// ---------- Evacuation support
+//
+private:
+  ShenandoahCollectionSet* _collection_set;
+  ShenandoahEvacOOMHandler _oom_evac_handler;
+
+  void evacuate_and_update_roots();
+
+public:
+  static address in_cset_fast_test_addr();
+
+  ShenandoahCollectionSet* collection_set() const { return _collection_set; }
+
+  template <class T>
+  inline bool in_collection_set(T obj) const;
+
+  // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*.
+  inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false);
+
+  // Evacuates object src. Returns the evacuated object, either evacuated
+  // by this thread, or by some other thread.
+  inline oop evacuate_object(oop src, Thread* thread);
+
+  // Call before/after evacuation.
+  void enter_evacuation();
+  void leave_evacuation();
+
+// ---------- Helper functions
+//
+public:
+  template <class T>
+  inline oop evac_update_with_forwarded(T* p);
+
+  template <class T>
+  inline oop maybe_update_with_forwarded(T* p);
+
+  template <class T>
+  inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
+
+  template <class T>
+  inline oop update_with_forwarded_not_null(T* p, oop obj);
+
+  inline oop atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c);
+  inline oop atomic_compare_exchange_oop(oop n, oop* addr, oop c);
+
+  void trash_humongous_region_at(ShenandoahHeapRegion *r);
+
+  void deduplicate_string(oop str);
+
+  void stop_concurrent_marking();
+
+  void roots_iterate(OopClosure* cl);
+
+private:
+  void trash_cset_regions();
+  void update_heap_references(bool concurrent);
+
+// ---------- Testing helpers functions
+//
+private:
+  ShenandoahSharedFlag _inject_alloc_failure;
+
+  void try_inject_alloc_failure();
+  bool should_inject_alloc_failure();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
+
+#include "classfile/javaClasses.inline.hpp"
+#include "gc/shared/markBitMap.inline.hpp"
+#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.inline.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahWorkGroup.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
+#include "gc/shenandoah/shenandoahControlThread.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/prefetch.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/copy.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+template <class T>
+void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    _heap->update_with_forwarded_not_null(p, obj);
+  }
+}
+
+void ShenandoahUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
+void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
+
+inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
+  size_t new_index = Atomic::add((size_t) 1, &_index);
+  // get_region() provides the bounds-check and returns NULL on OOB.
+  return _heap->get_region(new_index - 1);
+}
+
+inline bool ShenandoahHeap::has_forwarded_objects() const {
+  return _gc_state.is_set(HAS_FORWARDED);
+}
+
+inline WorkGang* ShenandoahHeap::workers() const {
+  return _workers;
+}
+
+inline WorkGang* ShenandoahHeap::get_safepoint_workers() {
+  return _safepoint_workers;
+}
+
+inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
+  uintptr_t region_start = ((uintptr_t) addr);
+  uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
+  assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
+  return index;
+}
+
+inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
+  size_t index = heap_region_index_containing(addr);
+  ShenandoahHeapRegion* const result = get_region(index);
+  assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
+  return result;
+}
+
+template <class T>
+inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) {
+  if (in_collection_set(obj)) {
+    shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress());
+    obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+    RawAccess<IS_NOT_NULL>::oop_store(p, obj);
+  }
+#ifdef ASSERT
+  else {
+    shenandoah_assert_not_forwarded(p, obj);
+  }
+#endif
+  return obj;
+}
+
+template <class T>
+inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    return maybe_update_with_forwarded_not_null(p, obj);
+  } else {
+    return NULL;
+  }
+}
+
+template <class T>
+inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop heap_oop = CompressedOops::decode_not_null(o);
+    if (in_collection_set(heap_oop)) {
+      oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
+      if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
+        forwarded_oop = evacuate_object(heap_oop, Thread::current());
+      }
+      oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
+      if (oopDesc::equals_raw(prev, heap_oop)) {
+        return forwarded_oop;
+      } else {
+        return NULL;
+      }
+    }
+    return heap_oop;
+  } else {
+    return NULL;
+  }
+}
+
+inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
+  return (oop) Atomic::cmpxchg(n, addr, c);
+}
+
+inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
+  narrowOop cmp = CompressedOops::encode(c);
+  narrowOop val = CompressedOops::encode(n);
+  return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp));
+}
+
+template <class T>
+inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) {
+  shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress());
+  shenandoah_assert_correct(p, heap_oop);
+
+  if (in_collection_set(heap_oop)) {
+    oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
+    if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
+      // E.g. during evacuation.
+      return forwarded_oop;
+    }
+
+    shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());
+    shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());
+
+    // If this fails, another thread wrote to p before us, it will be logged in SATB and the
+    // reference be updated later.
+    oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
+
+    if (oopDesc::equals_raw(result, heap_oop)) { // CAS successful.
+      return forwarded_oop;
+    } else {
+      // Note: we used to assert the following here. This doesn't work because sometimes, during
+      // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy,
+      // which first copies the array, which potentially contains from-space refs, and only afterwards
+      // updates all from-space refs to to-space refs, which leaves a short window where the new array
+      // elements can be from-space.
+      // assert(CompressedOops::is_null(result) ||
+      //        oopDesc::equals_raw(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)),
+      //       "expect not forwarded");
+      return NULL;
+    }
+  } else {
+    shenandoah_assert_not_forwarded(p, heap_oop);
+    return heap_oop;
+  }
+}
+
+inline bool ShenandoahHeap::cancelled_gc() const {
+  return _cancelled_gc.get() == CANCELLED;
+}
+
+inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
+  if (! (sts_active && ShenandoahSuspendibleWorkers)) {
+    return cancelled_gc();
+  }
+
+  jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
+  if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
+    if (SuspendibleThreadSet::should_yield()) {
+      SuspendibleThreadSet::yield();
+    }
+
+    // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
+    // to restore to CANCELLABLE.
+    if (prev == CANCELLABLE) {
+      _cancelled_gc.set(CANCELLABLE);
+    }
+    return false;
+  } else {
+    return true;
+  }
+}
+
+inline bool ShenandoahHeap::try_cancel_gc() {
+  while (true) {
+    jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
+    if (prev == CANCELLABLE) return true;
+    else if (prev == CANCELLED) return false;
+    assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
+    assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
+    {
+      // We need to provide a safepoint here, otherwise we might
+      // spin forever if a SP is pending.
+      ThreadBlockInVM sp(JavaThread::current());
+      SpinPause();
+    }
+  }
+}
+
+inline void ShenandoahHeap::clear_cancelled_gc() {
+  _cancelled_gc.set(CANCELLABLE);
+  _oom_evac_handler.clear();
+}
+
+inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
+  assert(UseTLAB, "TLABs should be enabled");
+
+  PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
+  if (gclab == NULL) {
+    assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
+           "Performance: thread should have GCLAB: %s", thread->name());
+    // No GCLABs in this thread, fallback to shared allocation
+    return NULL;
+  }
+  HeapWord* obj = gclab->allocate(size);
+  if (obj != NULL) {
+    return obj;
+  }
+  // Otherwise...
+  return allocate_from_gclab_slow(thread, size);
+}
+
+inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
+  if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
+    // This thread went through the OOM during evac protocol and it is safe to return
+    // the forward pointer. It must not attempt to evacuate any more.
+    return ShenandoahBarrierSet::resolve_forwarded(p);
+  }
+
+  assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
+
+  size_t size_no_fwdptr = (size_t) p->size();
+  size_t size_with_fwdptr = size_no_fwdptr + ShenandoahBrooksPointer::word_size();
+
+  assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
+
+  bool alloc_from_gclab = true;
+  HeapWord* filler = NULL;
+
+#ifdef ASSERT
+  if (ShenandoahOOMDuringEvacALot &&
+      (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
+        filler = NULL;
+  } else {
+#endif
+    if (UseTLAB) {
+      filler = allocate_from_gclab(thread, size_with_fwdptr);
+    }
+    if (filler == NULL) {
+      ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size_with_fwdptr);
+      filler = allocate_memory(req);
+      alloc_from_gclab = false;
+    }
+#ifdef ASSERT
+  }
+#endif
+
+  if (filler == NULL) {
+    control_thread()->handle_alloc_failure_evac(size_with_fwdptr);
+
+    _oom_evac_handler.handle_out_of_memory_during_evacuation();
+
+    return ShenandoahBarrierSet::resolve_forwarded(p);
+  }
+
+  // Copy the object and initialize its forwarding ptr:
+  HeapWord* copy = filler + ShenandoahBrooksPointer::word_size();
+  oop copy_val = oop(copy);
+
+  Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr);
+  ShenandoahBrooksPointer::initialize(oop(copy));
+
+  // Try to install the new forwarding pointer.
+  oop result = ShenandoahBrooksPointer::try_update_forwardee(p, copy_val);
+
+  if (oopDesc::equals_raw(result, p)) {
+    // Successfully evacuated. Our copy is now the public one!
+    shenandoah_assert_correct(NULL, copy_val);
+    return copy_val;
+  }  else {
+    // Failed to evacuate. We need to deal with the object that is left behind. Since this
+    // new allocation is certainly after TAMS, it will be considered live in the next cycle.
+    // But if it happens to contain references to evacuated regions, those references would
+    // not get updated for this stale copy during this cycle, and we will crash while scanning
+    // it the next cycle.
+    //
+    // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
+    // object will overwrite this stale copy, or the filler object on LAB retirement will
+    // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
+    // have to explicitly overwrite the copy with the filler object. With that overwrite,
+    // we have to keep the fwdptr initialized and pointing to our (stale) copy.
+    if (alloc_from_gclab) {
+      ShenandoahThreadLocalData::gclab(thread)->undo_allocation(filler, size_with_fwdptr);
+    } else {
+      fill_with_object(copy, size_no_fwdptr);
+    }
+    shenandoah_assert_correct(NULL, copy_val);
+    shenandoah_assert_correct(NULL, result);
+    return result;
+  }
+}
+
+inline bool ShenandoahHeap::requires_marking(const void* entry) const {
+  return !_marking_context->is_marked(oop(entry));
+}
+
+template <class T>
+inline bool ShenandoahHeap::in_collection_set(T p) const {
+  HeapWord* obj = (HeapWord*) p;
+  assert(collection_set() != NULL, "Sanity");
+  assert(is_in(obj), "should be in heap");
+
+  return collection_set()->is_in(obj);
+}
+
+inline bool ShenandoahHeap::is_stable() const {
+  return _gc_state.is_clear();
+}
+
+inline bool ShenandoahHeap::is_idle() const {
+  return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL);
+}
+
+inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
+  return _gc_state.is_set(MARKING);
+}
+
+inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const {
+  return _gc_state.is_set(TRAVERSAL);
+}
+
+inline bool ShenandoahHeap::is_evacuation_in_progress() const {
+  return _gc_state.is_set(EVACUATION);
+}
+
+inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
+  return _gc_state.is_set(mask);
+}
+
+inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
+  return _degenerated_gc_in_progress.is_set();
+}
+
+inline bool ShenandoahHeap::is_full_gc_in_progress() const {
+  return _full_gc_in_progress.is_set();
+}
+
+inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
+  return _full_gc_move_in_progress.is_set();
+}
+
+inline bool ShenandoahHeap::is_update_refs_in_progress() const {
+  return _gc_state.is_set(UPDATEREFS);
+}
+
+template<class T>
+inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
+  marked_object_iterate(region, cl, region->top());
+}
+
+template<class T>
+inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
+  assert(ShenandoahBrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
+  assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
+
+  ShenandoahMarkingContext* const ctx = complete_marking_context();
+  assert(ctx->is_complete(), "sanity");
+
+  MarkBitMap* mark_bit_map = ctx->mark_bit_map();
+  HeapWord* tams = ctx->top_at_mark_start(region);
+
+  size_t skip_bitmap_delta = ShenandoahBrooksPointer::word_size() + 1;
+  size_t skip_objsize_delta = ShenandoahBrooksPointer::word_size() /* + actual obj.size() below */;
+  HeapWord* start = region->bottom() + ShenandoahBrooksPointer::word_size();
+  HeapWord* end = MIN2(tams + ShenandoahBrooksPointer::word_size(), region->end());
+
+  // Step 1. Scan below the TAMS based on bitmap data.
+  HeapWord* limit_bitmap = MIN2(limit, tams);
+
+  // Try to scan the initial candidate. If the candidate is above the TAMS, it would
+  // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
+  HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end);
+
+  intx dist = ShenandoahMarkScanPrefetch;
+  if (dist > 0) {
+    // Batched scan that prefetches the oop data, anticipating the access to
+    // either header, oop field, or forwarding pointer. Not that we cannot
+    // touch anything in oop, while it still being prefetched to get enough
+    // time for prefetch to work. This is why we try to scan the bitmap linearly,
+    // disregarding the object size. However, since we know forwarding pointer
+    // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
+    // there is no point for prefetching the oop contents, as oop->size() will
+    // touch it prematurely.
+
+    // No variable-length arrays in standard C++, have enough slots to fit
+    // the prefetch distance.
+    static const int SLOT_COUNT = 256;
+    guarantee(dist <= SLOT_COUNT, "adjust slot count");
+    HeapWord* slots[SLOT_COUNT];
+
+    int avail;
+    do {
+      avail = 0;
+      for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
+        Prefetch::read(cb, ShenandoahBrooksPointer::byte_offset());
+        slots[avail++] = cb;
+        cb += skip_bitmap_delta;
+        if (cb < limit_bitmap) {
+          cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
+        }
+      }
+
+      for (int c = 0; c < avail; c++) {
+        assert (slots[c] < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
+        assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
+        oop obj = oop(slots[c]);
+        assert(oopDesc::is_oop(obj), "sanity");
+        assert(ctx->is_marked(obj), "object expected to be marked");
+        cl->do_object(obj);
+      }
+    } while (avail > 0);
+  } else {
+    while (cb < limit_bitmap) {
+      assert (cb < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
+      assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
+      oop obj = oop(cb);
+      assert(oopDesc::is_oop(obj), "sanity");
+      assert(ctx->is_marked(obj), "object expected to be marked");
+      cl->do_object(obj);
+      cb += skip_bitmap_delta;
+      if (cb < limit_bitmap) {
+        cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
+      }
+    }
+  }
+
+  // Step 2. Accurate size-based traversal, happens past the TAMS.
+  // This restarts the scan at TAMS, which makes sure we traverse all objects,
+  // regardless of what happened at Step 1.
+  HeapWord* cs = tams + ShenandoahBrooksPointer::word_size();
+  while (cs < limit) {
+    assert (cs > tams,  "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
+    assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
+    oop obj = oop(cs);
+    assert(oopDesc::is_oop(obj), "sanity");
+    assert(ctx->is_marked(obj), "object expected to be marked");
+    int size = obj->size();
+    cl->do_object(obj);
+    cs += size + skip_objsize_delta;
+  }
+}
+
+template <class T>
+class ShenandoahObjectToOopClosure : public ObjectClosure {
+  T* _cl;
+public:
+  ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
+
+  void do_object(oop obj) {
+    obj->oop_iterate(_cl);
+  }
+};
+
+template <class T>
+class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
+  T* _cl;
+  MemRegion _bounds;
+public:
+  ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
+    _cl(cl), _bounds(bottom, top) {}
+
+  void do_object(oop obj) {
+    obj->oop_iterate(_cl, _bounds);
+  }
+};
+
+template<class T>
+inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
+  if (region->is_humongous()) {
+    HeapWord* bottom = region->bottom();
+    if (top > bottom) {
+      region = region->humongous_start_region();
+      ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
+      marked_object_iterate(region, &objs);
+    }
+  } else {
+    ShenandoahObjectToOopClosure<T> objs(cl);
+    marked_object_iterate(region, &objs, top);
+  }
+}
+
+inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
+  if (region_idx < _num_regions) {
+    return _regions[region_idx];
+  } else {
+    return NULL;
+  }
+}
+
+inline void ShenandoahHeap::mark_complete_marking_context() {
+  _marking_context->mark_complete();
+}
+
+inline void ShenandoahHeap::mark_incomplete_marking_context() {
+  _marking_context->mark_incomplete();
+}
+
+inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
+  assert (_marking_context->is_complete()," sanity");
+  return _marking_context;
+}
+
+inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
+  return _marking_context;
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapLock.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPLOCK_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPLOCK_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/thread.hpp"
+
+class ShenandoahHeapLock  {
+private:
+  enum LockState { unlocked = 0, locked = 1 };
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
+  volatile int _state;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile Thread*));
+  volatile Thread* _owner;
+  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, 0);
+
+public:
+  ShenandoahHeapLock() : _state(unlocked), _owner(NULL) {};
+
+  void lock() {
+    Thread::SpinAcquire(&_state, "Shenandoah Heap Lock");
+#ifdef ASSERT
+    assert(_state == locked, "must be locked");
+    assert(_owner == NULL, "must not be owned");
+    _owner = Thread::current();
+#endif
+  }
+
+  void unlock() {
+#ifdef ASSERT
+    assert (_owner == Thread::current(), "sanity");
+    _owner = NULL;
+#endif
+    Thread::SpinRelease(&_state);
+  }
+
+#ifdef ASSERT
+  void assert_owned_by_current_thread() {
+    assert(_state == locked, "must be locked");
+    assert(_owner == Thread::current(), "must be owned by current thread");
+  }
+
+  void assert_not_owned_by_current_thread() {
+    assert(_owner != Thread::current(), "must be not owned by current thread");
+  }
+
+  void assert_owned_by_current_thread_or_safepoint() {
+    Thread* thr = Thread::current();
+    assert((_state == locked && _owner == thr) ||
+           (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
+           "must own heap lock or by VM thread at safepoint");
+  }
+#endif
+};
+
+class ShenandoahHeapLocker : public StackObj {
+private:
+  ShenandoahHeapLock* _lock;
+public:
+  ShenandoahHeapLocker(ShenandoahHeapLock* lock) {
+    _lock = lock;
+    _lock->lock();
+  }
+
+  ~ShenandoahHeapLocker() {
+    _lock->unlock();
+  }
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPLOCK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,681 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "gc/shared/space.inline.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "memory/universe.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/java.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+
+size_t ShenandoahHeapRegion::RegionCount = 0;
+size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
+size_t ShenandoahHeapRegion::RegionSizeWords = 0;
+size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
+size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
+size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
+size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
+size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
+size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
+size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
+size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
+
+ShenandoahHeapRegion::PaddedAllocSeqNum ShenandoahHeapRegion::_alloc_seq_num;
+
+ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
+                                           size_t size_words, size_t index, bool committed) :
+  _heap(heap),
+  _pacer(ShenandoahPacing ? heap->pacer() : NULL),
+  _reserved(MemRegion(start, size_words)),
+  _region_number(index),
+  _new_top(NULL),
+  _critical_pins(0),
+  _empty_time(os::elapsedTime()),
+  _state(committed ? _empty_committed : _empty_uncommitted),
+  _tlab_allocs(0),
+  _gclab_allocs(0),
+  _shared_allocs(0),
+  _seqnum_first_alloc_mutator(0),
+  _seqnum_first_alloc_gc(0),
+  _seqnum_last_alloc_mutator(0),
+  _seqnum_last_alloc_gc(0),
+  _live_data(0) {
+
+  ContiguousSpace::initialize(_reserved, true, committed);
+}
+
+size_t ShenandoahHeapRegion::region_number() const {
+  return _region_number;
+}
+
+void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
+  ResourceMark rm;
+  stringStream ss;
+  ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
+  print_on(&ss);
+  fatal("%s", ss.as_string());
+}
+
+void ShenandoahHeapRegion::make_regular_allocation() {
+  _heap->assert_heaplock_owned_by_current_thread();
+
+  switch (_state) {
+    case _empty_uncommitted:
+      do_commit();
+    case _empty_committed:
+      _state = _regular;
+    case _regular:
+    case _pinned:
+      return;
+    default:
+      report_illegal_transition("regular allocation");
+  }
+}
+
+void ShenandoahHeapRegion::make_regular_bypass() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  assert (_heap->is_full_gc_in_progress() || _heap->is_degenerated_gc_in_progress(),
+          "only for full or degen GC");
+
+  switch (_state) {
+    case _empty_uncommitted:
+      do_commit();
+    case _empty_committed:
+    case _cset:
+    case _humongous_start:
+    case _humongous_cont:
+      _state = _regular;
+      return;
+    case _pinned_cset:
+      _state = _pinned;
+      return;
+    case _regular:
+    case _pinned:
+      return;
+    default:
+      report_illegal_transition("regular bypass");
+  }
+}
+
+void ShenandoahHeapRegion::make_humongous_start() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _empty_uncommitted:
+      do_commit();
+    case _empty_committed:
+      _state = _humongous_start;
+      return;
+    default:
+      report_illegal_transition("humongous start allocation");
+  }
+}
+
+void ShenandoahHeapRegion::make_humongous_start_bypass() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  assert (_heap->is_full_gc_in_progress(), "only for full GC");
+
+  switch (_state) {
+    case _empty_committed:
+    case _regular:
+    case _humongous_start:
+    case _humongous_cont:
+      _state = _humongous_start;
+      return;
+    default:
+      report_illegal_transition("humongous start bypass");
+  }
+}
+
+void ShenandoahHeapRegion::make_humongous_cont() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _empty_uncommitted:
+      do_commit();
+    case _empty_committed:
+      _state = _humongous_cont;
+      return;
+    default:
+      report_illegal_transition("humongous continuation allocation");
+  }
+}
+
+void ShenandoahHeapRegion::make_humongous_cont_bypass() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  assert (_heap->is_full_gc_in_progress(), "only for full GC");
+
+  switch (_state) {
+    case _empty_committed:
+    case _regular:
+    case _humongous_start:
+    case _humongous_cont:
+      _state = _humongous_cont;
+      return;
+    default:
+      report_illegal_transition("humongous continuation bypass");
+  }
+}
+
+void ShenandoahHeapRegion::make_pinned() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _regular:
+      assert (_critical_pins == 0, "sanity");
+      _state = _pinned;
+    case _pinned_cset:
+    case _pinned:
+      _critical_pins++;
+      return;
+    case _humongous_start:
+      assert (_critical_pins == 0, "sanity");
+      _state = _pinned_humongous_start;
+    case _pinned_humongous_start:
+      _critical_pins++;
+      return;
+    case _cset:
+      guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
+      assert (_critical_pins == 0, "sanity");
+      _state = _pinned_cset;
+      _critical_pins++;
+      return;
+    default:
+      report_illegal_transition("pinning");
+  }
+}
+
+void ShenandoahHeapRegion::make_unpinned() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _pinned:
+      assert (_critical_pins > 0, "sanity");
+      _critical_pins--;
+      if (_critical_pins == 0) {
+        _state = _regular;
+      }
+      return;
+    case _regular:
+    case _humongous_start:
+      assert (_critical_pins == 0, "sanity");
+      return;
+    case _pinned_cset:
+      guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
+      assert (_critical_pins > 0, "sanity");
+      _critical_pins--;
+      if (_critical_pins == 0) {
+        _state = _cset;
+      }
+      return;
+    case _pinned_humongous_start:
+      assert (_critical_pins > 0, "sanity");
+      _critical_pins--;
+      if (_critical_pins == 0) {
+        _state = _humongous_start;
+      }
+      return;
+    default:
+      report_illegal_transition("unpinning");
+  }
+}
+
+void ShenandoahHeapRegion::make_cset() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _regular:
+      _state = _cset;
+    case _cset:
+      return;
+    default:
+      report_illegal_transition("cset");
+  }
+}
+
+void ShenandoahHeapRegion::make_trash() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _cset:
+      // Reclaiming cset regions
+    case _humongous_start:
+    case _humongous_cont:
+      // Reclaiming humongous regions
+    case _regular:
+      // Immediate region reclaim
+      _state = _trash;
+      return;
+    default:
+      report_illegal_transition("trashing");
+  }
+}
+
+void ShenandoahHeapRegion::make_trash_immediate() {
+  make_trash();
+
+  // On this path, we know there are no marked objects in the region,
+  // tell marking context about it to bypass bitmap resets.
+  _heap->complete_marking_context()->reset_top_bitmap(this);
+}
+
+void ShenandoahHeapRegion::make_empty() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _trash:
+      _state = _empty_committed;
+      _empty_time = os::elapsedTime();
+      return;
+    default:
+      report_illegal_transition("emptying");
+  }
+}
+
+void ShenandoahHeapRegion::make_uncommitted() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  switch (_state) {
+    case _empty_committed:
+      do_uncommit();
+      _state = _empty_uncommitted;
+      return;
+    default:
+      report_illegal_transition("uncommiting");
+  }
+}
+
+void ShenandoahHeapRegion::make_committed_bypass() {
+  _heap->assert_heaplock_owned_by_current_thread();
+  assert (_heap->is_full_gc_in_progress(), "only for full GC");
+
+  switch (_state) {
+    case _empty_uncommitted:
+      do_commit();
+      _state = _empty_committed;
+      return;
+    default:
+      report_illegal_transition("commit bypass");
+  }
+}
+
+void ShenandoahHeapRegion::clear_live_data() {
+  OrderAccess::release_store_fence<size_t>(&_live_data, 0);
+}
+
+void ShenandoahHeapRegion::reset_alloc_metadata() {
+  _tlab_allocs = 0;
+  _gclab_allocs = 0;
+  _shared_allocs = 0;
+  _seqnum_first_alloc_mutator = 0;
+  _seqnum_last_alloc_mutator = 0;
+  _seqnum_first_alloc_gc = 0;
+  _seqnum_last_alloc_gc = 0;
+}
+
+void ShenandoahHeapRegion::reset_alloc_metadata_to_shared() {
+  if (used() > 0) {
+    _tlab_allocs = 0;
+    _gclab_allocs = 0;
+    _shared_allocs = used() >> LogHeapWordSize;
+    uint64_t next = _alloc_seq_num.value++;
+    _seqnum_first_alloc_mutator = next;
+    _seqnum_last_alloc_mutator = next;
+    _seqnum_first_alloc_gc = 0;
+    _seqnum_last_alloc_gc = 0;
+  } else {
+    reset_alloc_metadata();
+  }
+}
+
+size_t ShenandoahHeapRegion::get_shared_allocs() const {
+  return _shared_allocs * HeapWordSize;
+}
+
+size_t ShenandoahHeapRegion::get_tlab_allocs() const {
+  return _tlab_allocs * HeapWordSize;
+}
+
+size_t ShenandoahHeapRegion::get_gclab_allocs() const {
+  return _gclab_allocs * HeapWordSize;
+}
+
+void ShenandoahHeapRegion::set_live_data(size_t s) {
+  assert(Thread::current()->is_VM_thread(), "by VM thread");
+  _live_data = (s >> LogHeapWordSize);
+}
+
+size_t ShenandoahHeapRegion::get_live_data_words() const {
+  return OrderAccess::load_acquire(&_live_data);
+}
+
+size_t ShenandoahHeapRegion::get_live_data_bytes() const {
+  return get_live_data_words() * HeapWordSize;
+}
+
+bool ShenandoahHeapRegion::has_live() const {
+  return get_live_data_words() != 0;
+}
+
+size_t ShenandoahHeapRegion::garbage() const {
+  assert(used() >= get_live_data_bytes(), "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
+         get_live_data_bytes(), used());
+
+  size_t result = used() - get_live_data_bytes();
+  return result;
+}
+
+void ShenandoahHeapRegion::print_on(outputStream* st) const {
+  st->print("|");
+  st->print(SIZE_FORMAT_W(5), this->_region_number);
+
+  switch (_state) {
+    case _empty_uncommitted:
+      st->print("|EU ");
+      break;
+    case _empty_committed:
+      st->print("|EC ");
+      break;
+    case _regular:
+      st->print("|R  ");
+      break;
+    case _humongous_start:
+      st->print("|H  ");
+      break;
+    case _pinned_humongous_start:
+      st->print("|HP ");
+      break;
+    case _humongous_cont:
+      st->print("|HC ");
+      break;
+    case _cset:
+      st->print("|CS ");
+      break;
+    case _trash:
+      st->print("|T  ");
+      break;
+    case _pinned:
+      st->print("|P  ");
+      break;
+    case _pinned_cset:
+      st->print("|CSP");
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
+            p2i(bottom()), p2i(top()), p2i(end()));
+  st->print("|TAMS " INTPTR_FORMAT_W(12),
+            p2i(_heap->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
+  st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
+  st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
+  st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
+  st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
+  st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
+  st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
+  st->print("|SN " UINT64_FORMAT_X_W(12) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8),
+            seqnum_first_alloc_mutator(), seqnum_last_alloc_mutator(),
+            seqnum_first_alloc_gc(), seqnum_last_alloc_gc());
+  st->cr();
+}
+
+void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {
+  if (!is_active()) return;
+  if (is_humongous()) {
+    oop_iterate_humongous(blk);
+  } else {
+    oop_iterate_objects(blk);
+  }
+}
+
+void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
+  assert(! is_humongous(), "no humongous region here");
+  HeapWord* obj_addr = bottom() + ShenandoahBrooksPointer::word_size();
+  HeapWord* t = top();
+  // Could call objects iterate, but this is easier.
+  while (obj_addr < t) {
+    oop obj = oop(obj_addr);
+    obj_addr += obj->oop_iterate_size(blk) + ShenandoahBrooksPointer::word_size();
+  }
+}
+
+void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
+  assert(is_humongous(), "only humongous region here");
+  // Find head.
+  ShenandoahHeapRegion* r = humongous_start_region();
+  assert(r->is_humongous_start(), "need humongous head here");
+  oop obj = oop(r->bottom() + ShenandoahBrooksPointer::word_size());
+  obj->oop_iterate(blk, MemRegion(bottom(), top()));
+}
+
+ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
+  assert(is_humongous(), "Must be a part of the humongous region");
+  size_t reg_num = region_number();
+  ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
+  while (!r->is_humongous_start()) {
+    assert(reg_num > 0, "Sanity");
+    reg_num --;
+    r = _heap->get_region(reg_num);
+    assert(r->is_humongous(), "Must be a part of the humongous region");
+  }
+  assert(r->is_humongous_start(), "Must be");
+  return r;
+}
+
+void ShenandoahHeapRegion::recycle() {
+  ContiguousSpace::clear(false);
+  if (ZapUnusedHeapArea) {
+    ContiguousSpace::mangle_unused_area_complete();
+  }
+  clear_live_data();
+
+  reset_alloc_metadata();
+
+  _heap->marking_context()->reset_top_at_mark_start(this);
+
+  make_empty();
+}
+
+HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
+  assert(MemRegion(bottom(), end()).contains(p),
+         "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
+         p2i(p), p2i(bottom()), p2i(end()));
+  if (p >= top()) {
+    return top();
+  } else {
+    HeapWord* last = bottom() + ShenandoahBrooksPointer::word_size();
+    HeapWord* cur = last;
+    while (cur <= p) {
+      last = cur;
+      cur += oop(cur)->size() + ShenandoahBrooksPointer::word_size();
+    }
+    shenandoah_assert_correct(NULL, oop(last));
+    return last;
+  }
+}
+
+void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap_size) {
+  // Absolute minimums we should not ever break.
+  static const size_t MIN_REGION_SIZE = 256*K;
+
+  if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
+    FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
+  }
+
+  size_t region_size;
+  if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
+    if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
+      err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
+                      "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
+                      initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
+    }
+    if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
+      err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
+                      ShenandoahMinRegionSize/K,  MIN_REGION_SIZE/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
+    }
+    if (ShenandoahMinRegionSize < MinTLABSize) {
+      err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
+                      ShenandoahMinRegionSize/K,  MinTLABSize/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
+    }
+    if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
+      err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
+                      ShenandoahMaxRegionSize/K,  MIN_REGION_SIZE/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
+    }
+    if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
+      err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
+                      ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
+    }
+
+    // We rapidly expand to max_heap_size in most scenarios, so that is the measure
+    // for usual heap sizes. Do not depend on initial_heap_size here.
+    region_size = max_heap_size / ShenandoahTargetNumRegions;
+
+    // Now make sure that we don't go over or under our limits.
+    region_size = MAX2(ShenandoahMinRegionSize, region_size);
+    region_size = MIN2(ShenandoahMaxRegionSize, region_size);
+
+  } else {
+    if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
+      err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
+                              "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
+                      initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
+    }
+    if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
+      err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
+                      ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
+    }
+    if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
+      err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
+                      ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
+      vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
+    }
+    region_size = ShenandoahHeapRegionSize;
+  }
+
+  // Make sure region size is at least one large page, if enabled.
+  // Otherwise, uncommitting one region may falsely uncommit the adjacent
+  // regions too.
+  // Also see shenandoahArguments.cpp, where it handles UseLargePages.
+  if (UseLargePages && ShenandoahUncommit) {
+    region_size = MAX2(region_size, os::large_page_size());
+  }
+
+  int region_size_log = log2_long((jlong) region_size);
+  // Recalculate the region size to make sure it's a power of
+  // 2. This means that region_size is the largest power of 2 that's
+  // <= what we've calculated so far.
+  region_size = size_t(1) << region_size_log;
+
+  // Now, set up the globals.
+  guarantee(RegionSizeBytesShift == 0, "we should only set it once");
+  RegionSizeBytesShift = (size_t)region_size_log;
+
+  guarantee(RegionSizeWordsShift == 0, "we should only set it once");
+  RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
+
+  guarantee(RegionSizeBytes == 0, "we should only set it once");
+  RegionSizeBytes = region_size;
+  RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
+  assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
+
+  guarantee(RegionSizeWordsMask == 0, "we should only set it once");
+  RegionSizeWordsMask = RegionSizeWords - 1;
+
+  guarantee(RegionSizeBytesMask == 0, "we should only set it once");
+  RegionSizeBytesMask = RegionSizeBytes - 1;
+
+  guarantee(RegionCount == 0, "we should only set it once");
+  RegionCount = max_heap_size / RegionSizeBytes;
+  guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
+
+  guarantee(HumongousThresholdWords == 0, "we should only set it once");
+  HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
+  assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
+
+  guarantee(HumongousThresholdBytes == 0, "we should only set it once");
+  HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
+  assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
+
+  // The rationale for trimming the TLAB sizes has to do with the raciness in
+  // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
+  // about next free size, gets the answer for region #N, goes away for a while, then
+  // tries to allocate in region #N, and fail because some other thread have claimed part
+  // of the region #N, and then the freeset allocation code has to retire the region #N,
+  // before moving the allocation to region #N+1.
+  //
+  // The worst case realizes when "answer" is "region size", which means it could
+  // prematurely retire an entire region. Having smaller TLABs does not fix that
+  // completely, but reduces the probability of too wasteful region retirement.
+  // With current divisor, we will waste no more than 1/8 of region size in the worst
+  // case. This also has a secondary effect on collection set selection: even under
+  // the race, the regions would be at least 7/8 used, which allows relying on
+  // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
+  // below the garbage threshold that would never be considered for collection.
+  //
+  // The whole thing is mitigated if Elastic TLABs are enabled.
+  //
+  guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
+  MaxTLABSizeBytes = MIN2(ShenandoahElasticTLAB ? RegionSizeBytes : (RegionSizeBytes / 8), HumongousThresholdBytes);
+  assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
+
+  guarantee(MaxTLABSizeWords == 0, "we should only set it once");
+  MaxTLABSizeWords = MaxTLABSizeBytes / HeapWordSize;
+
+  log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
+                     RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
+  log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
+                     byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
+  log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
+                     byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
+}
+
+void ShenandoahHeapRegion::do_commit() {
+  if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
+    report_java_out_of_memory("Unable to commit region");
+  }
+  if (!_heap->commit_bitmap_slice(this)) {
+    report_java_out_of_memory("Unable to commit bitmaps for region");
+  }
+  _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
+}
+
+void ShenandoahHeapRegion::do_uncommit() {
+  if (!os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) {
+    report_java_out_of_memory("Unable to uncommit region");
+  }
+  if (!_heap->uncommit_bitmap_slice(this)) {
+    report_java_out_of_memory("Unable to uncommit bitmaps for region");
+  }
+  _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
+
+#include "gc/shared/space.hpp"
+#include "gc/shenandoah/shenandoahAllocRequest.hpp"
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahPacer.hpp"
+#include "memory/universe.hpp"
+#include "utilities/sizes.hpp"
+
+class VMStructs;
+
+class ShenandoahHeapRegion : public ContiguousSpace {
+  friend class VMStructs;
+private:
+  /*
+    Region state is described by a state machine. Transitions are guarded by
+    heap lock, which allows changing the state of several regions atomically.
+    Region states can be logically aggregated in groups.
+
+      "Empty":
+      .................................................................
+      .                                                               .
+      .                                                               .
+      .         Uncommitted  <-------  Committed <------------------------\
+      .              |                     |                          .   |
+      .              \---------v-----------/                          .   |
+      .                        |                                      .   |
+      .........................|.......................................   |
+                               |                                          |
+      "Active":                |                                          |
+      .........................|.......................................   |
+      .                        |                                      .   |
+      .      /-----------------^-------------------\                  .   |
+      .      |                                     |                  .   |
+      .      v                                     v    "Humongous":  .   |
+      .   Regular ---\-----\     ..................O................  .   |
+      .     |  ^     |     |     .                 |               .  .   |
+      .     |  |     |     |     .                 *---------\     .  .   |
+      .     v  |     |     |     .                 v         v     .  .   |
+      .    Pinned  Cset    |     .  HStart <--> H/Start   H/Cont   .  .   |
+      .       ^    / |     |     .  Pinned         v         |     .  .   |
+      .       |   /  |     |     .                 *<--------/     .  .   |
+      .       |  v   |     |     .                 |               .  .   |
+      .  CsetPinned  |     |     ..................O................  .   |
+      .              |     |                       |                  .   |
+      .              \-----\---v-------------------/                  .   |
+      .                        |                                      .   |
+      .........................|.......................................   |
+                               |                                          |
+      "Trash":                 |                                          |
+      .........................|.......................................   |
+      .                        |                                      .   |
+      .                        v                                      .   |
+      .                      Trash ---------------------------------------/
+      .                                                               .
+      .                                                               .
+      .................................................................
+
+    Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed}
+    to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous.
+
+    Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle,
+    and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows
+    quick reclamation without actual cleaning up.
+
+    Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata.
+    Can be done asynchronously and in bulk.
+
+    Note how internal transitions disallow logic bugs:
+      a) No region can go Empty, unless properly reclaimed/recycled;
+      b) No region can go Uncommitted, unless reclaimed/recycled first;
+      c) Only Regular regions can go to CSet;
+      d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned;
+      e) Pinned cannot go CSet, thus it never moves;
+      f) Humongous cannot be used for regular allocations;
+      g) Humongous cannot go CSet, thus it never moves;
+      h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should
+         follow associated humongous starts, not pinnable/movable by themselves);
+      i) Empty cannot go Trash, avoiding useless work;
+      j) ...
+   */
+
+  enum RegionState {
+    _empty_uncommitted,       // region is empty and has memory uncommitted
+    _empty_committed,         // region is empty and has memory committed
+    _regular,                 // region is for regular allocations
+    _humongous_start,         // region is the humongous start
+    _humongous_cont,          // region is the humongous continuation
+    _pinned_humongous_start,  // region is both humongous start and pinned
+    _cset,                    // region is in collection set
+    _pinned,                  // region is pinned
+    _pinned_cset,             // region is pinned and in cset (evac failure path)
+    _trash,                   // region contains only trash
+  };
+
+  const char* region_state_to_string(RegionState s) const {
+    switch (s) {
+      case _empty_uncommitted:       return "Empty Uncommitted";
+      case _empty_committed:         return "Empty Committed";
+      case _regular:                 return "Regular";
+      case _humongous_start:         return "Humongous Start";
+      case _humongous_cont:          return "Humongous Continuation";
+      case _pinned_humongous_start:  return "Humongous Start, Pinned";
+      case _cset:                    return "Collection Set";
+      case _pinned:                  return "Pinned";
+      case _pinned_cset:             return "Collection Set, Pinned";
+      case _trash:                   return "Trash";
+      default:
+        ShouldNotReachHere();
+        return "";
+    }
+  }
+
+  // This method protects from accidental changes in enum order:
+  int region_state_to_ordinal(RegionState s) const {
+    switch (s) {
+      case _empty_uncommitted:      return 0;
+      case _empty_committed:        return 1;
+      case _regular:                return 2;
+      case _humongous_start:        return 3;
+      case _humongous_cont:         return 4;
+      case _cset:                   return 5;
+      case _pinned:                 return 6;
+      case _trash:                  return 7;
+      case _pinned_cset:            return 8;
+      case _pinned_humongous_start: return 9;
+      default:
+        ShouldNotReachHere();
+        return -1;
+    }
+  }
+
+  void report_illegal_transition(const char* method);
+
+public:
+  // Allowed transitions from the outside code:
+  void make_regular_allocation();
+  void make_regular_bypass();
+  void make_humongous_start();
+  void make_humongous_cont();
+  void make_humongous_start_bypass();
+  void make_humongous_cont_bypass();
+  void make_pinned();
+  void make_unpinned();
+  void make_cset();
+  void make_trash();
+  void make_trash_immediate();
+  void make_empty();
+  void make_uncommitted();
+  void make_committed_bypass();
+
+  // Individual states:
+  bool is_empty_uncommitted()      const { return _state == _empty_uncommitted; }
+  bool is_empty_committed()        const { return _state == _empty_committed; }
+  bool is_regular()                const { return _state == _regular; }
+  bool is_humongous_continuation() const { return _state == _humongous_cont; }
+
+  // Participation in logical groups:
+  bool is_empty()                  const { return is_empty_committed() || is_empty_uncommitted(); }
+  bool is_active()                 const { return !is_empty() && !is_trash(); }
+  bool is_trash()                  const { return _state == _trash; }
+  bool is_humongous_start()        const { return _state == _humongous_start || _state == _pinned_humongous_start; }
+  bool is_humongous()              const { return is_humongous_start() || is_humongous_continuation(); }
+  bool is_committed()              const { return !is_empty_uncommitted(); }
+  bool is_cset()                   const { return _state == _cset   || _state == _pinned_cset; }
+  bool is_pinned()                 const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; }
+
+  // Macro-properties:
+  bool is_alloc_allowed()          const { return is_empty() || is_regular() || _state == _pinned; }
+  bool is_move_allowed()           const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
+
+  RegionState state()              const { return _state; }
+  int  state_ordinal()             const { return region_state_to_ordinal(_state); }
+
+private:
+  static size_t RegionCount;
+  static size_t RegionSizeBytes;
+  static size_t RegionSizeWords;
+  static size_t RegionSizeBytesShift;
+  static size_t RegionSizeWordsShift;
+  static size_t RegionSizeBytesMask;
+  static size_t RegionSizeWordsMask;
+  static size_t HumongousThresholdBytes;
+  static size_t HumongousThresholdWords;
+  static size_t MaxTLABSizeBytes;
+  static size_t MaxTLABSizeWords;
+
+  // Global allocation counter, increased for each allocation under Shenandoah heap lock.
+  // Padded to avoid false sharing with the read-only fields above.
+  struct PaddedAllocSeqNum {
+    DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(uint64_t));
+    uint64_t value;
+    DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+    PaddedAllocSeqNum() {
+      // start with 1, reserve 0 for uninitialized value
+      value = 1;
+    }
+  };
+
+  static PaddedAllocSeqNum _alloc_seq_num;
+
+  // Never updated fields
+  ShenandoahHeap* _heap;
+  ShenandoahPacer* _pacer;
+  MemRegion _reserved;
+  size_t _region_number;
+
+  // Rarely updated fields
+  HeapWord* _new_top;
+  size_t _critical_pins;
+  double _empty_time;
+
+  // Seldom updated fields
+  RegionState _state;
+
+  // Frequently updated fields
+  size_t _tlab_allocs;
+  size_t _gclab_allocs;
+  size_t _shared_allocs;
+
+  uint64_t _seqnum_first_alloc_mutator;
+  uint64_t _seqnum_first_alloc_gc;
+  uint64_t _seqnum_last_alloc_mutator;
+  uint64_t _seqnum_last_alloc_gc;
+
+  volatile size_t _live_data;
+
+  // Claim some space at the end to protect next region
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
+
+public:
+  ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, size_t size_words, size_t index, bool committed);
+
+  static const size_t MIN_NUM_REGIONS = 10;
+
+  static void setup_sizes(size_t initial_heap_size, size_t max_heap_size);
+
+  double empty_time() {
+    return _empty_time;
+  }
+
+  inline static size_t required_regions(size_t bytes) {
+    return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift();
+  }
+
+  inline static size_t region_count() {
+    return ShenandoahHeapRegion::RegionCount;
+  }
+
+  inline static size_t region_size_bytes() {
+    return ShenandoahHeapRegion::RegionSizeBytes;
+  }
+
+  inline static size_t region_size_words() {
+    return ShenandoahHeapRegion::RegionSizeWords;
+  }
+
+  inline static size_t region_size_bytes_shift() {
+    return ShenandoahHeapRegion::RegionSizeBytesShift;
+  }
+
+  inline static size_t region_size_words_shift() {
+    return ShenandoahHeapRegion::RegionSizeWordsShift;
+  }
+
+  inline static size_t region_size_bytes_mask() {
+    return ShenandoahHeapRegion::RegionSizeBytesMask;
+  }
+
+  inline static size_t region_size_words_mask() {
+    return ShenandoahHeapRegion::RegionSizeWordsMask;
+  }
+
+  // Convert to jint with sanity checking
+  inline static jint region_size_bytes_jint() {
+    assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity");
+    return (jint)ShenandoahHeapRegion::RegionSizeBytes;
+  }
+
+  // Convert to jint with sanity checking
+  inline static jint region_size_words_jint() {
+    assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity");
+    return (jint)ShenandoahHeapRegion::RegionSizeWords;
+  }
+
+  // Convert to jint with sanity checking
+  inline static jint region_size_bytes_shift_jint() {
+    assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity");
+    return (jint)ShenandoahHeapRegion::RegionSizeBytesShift;
+  }
+
+  // Convert to jint with sanity checking
+  inline static jint region_size_words_shift_jint() {
+    assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity");
+    return (jint)ShenandoahHeapRegion::RegionSizeWordsShift;
+  }
+
+  inline static size_t humongous_threshold_bytes() {
+    return ShenandoahHeapRegion::HumongousThresholdBytes;
+  }
+
+  inline static size_t humongous_threshold_words() {
+    return ShenandoahHeapRegion::HumongousThresholdWords;
+  }
+
+  inline static size_t max_tlab_size_bytes() {
+    return ShenandoahHeapRegion::MaxTLABSizeBytes;
+  }
+
+  inline static size_t max_tlab_size_words() {
+    return ShenandoahHeapRegion::MaxTLABSizeWords;
+  }
+
+  static uint64_t seqnum_current_alloc() {
+    // Last used seq number
+    return _alloc_seq_num.value - 1;
+  }
+
+  size_t region_number() const;
+
+  // Allocation (return NULL if full)
+  inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type);
+
+  HeapWord* allocate(size_t word_size) shenandoah_not_implemented_return(NULL)
+
+  void clear_live_data();
+  void set_live_data(size_t s);
+
+  // Increase live data for newly allocated region
+  inline void increase_live_data_alloc_words(size_t s);
+
+  // Increase live data for region scanned with GC
+  inline void increase_live_data_gc_words(size_t s);
+
+  bool has_live() const;
+  size_t get_live_data_bytes() const;
+  size_t get_live_data_words() const;
+
+  void print_on(outputStream* st) const;
+
+  size_t garbage() const;
+
+  void recycle();
+
+  void oop_iterate(OopIterateClosure* cl);
+
+  HeapWord* block_start_const(const void* p) const;
+
+  bool in_collection_set() const;
+
+  // Find humongous start region that this region belongs to
+  ShenandoahHeapRegion* humongous_start_region() const;
+
+  CompactibleSpace* next_compaction_space() const shenandoah_not_implemented_return(NULL);
+  void prepare_for_compaction(CompactPoint* cp)   shenandoah_not_implemented;
+  void adjust_pointers()                          shenandoah_not_implemented;
+  void compact()                                  shenandoah_not_implemented;
+
+  void set_new_top(HeapWord* new_top) { _new_top = new_top; }
+  HeapWord* new_top() const { return _new_top; }
+
+  inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
+  void reset_alloc_metadata_to_shared();
+  void reset_alloc_metadata();
+  size_t get_shared_allocs() const;
+  size_t get_tlab_allocs() const;
+  size_t get_gclab_allocs() const;
+
+  uint64_t seqnum_first_alloc() const {
+    if (_seqnum_first_alloc_mutator == 0) return _seqnum_first_alloc_gc;
+    if (_seqnum_first_alloc_gc == 0)      return _seqnum_first_alloc_mutator;
+    return MIN2(_seqnum_first_alloc_mutator, _seqnum_first_alloc_gc);
+  }
+
+  uint64_t seqnum_last_alloc() const {
+    return MAX2(_seqnum_last_alloc_mutator, _seqnum_last_alloc_gc);
+  }
+
+  uint64_t seqnum_first_alloc_mutator() const {
+    return _seqnum_first_alloc_mutator;
+  }
+
+  uint64_t seqnum_last_alloc_mutator()  const {
+    return _seqnum_last_alloc_mutator;
+  }
+
+  uint64_t seqnum_first_alloc_gc() const {
+    return _seqnum_first_alloc_gc;
+  }
+
+  uint64_t seqnum_last_alloc_gc()  const {
+    return _seqnum_last_alloc_gc;
+  }
+
+private:
+  void do_commit();
+  void do_uncommit();
+
+  void oop_iterate_objects(OopIterateClosure* cl);
+  void oop_iterate_humongous(OopIterateClosure* cl);
+
+  inline void internal_increase_live_data(size_t s);
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahPacer.inline.hpp"
+#include "runtime/atomic.hpp"
+
+HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Type type) {
+  _heap->assert_heaplock_or_safepoint();
+
+  HeapWord* obj = top();
+  if (pointer_delta(end(), obj) >= size) {
+    make_regular_allocation();
+    adjust_alloc_metadata(type, size);
+
+    HeapWord* new_top = obj + size;
+    set_top(new_top);
+    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+
+    return obj;
+  } else {
+    return NULL;
+  }
+}
+
+inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
+  bool is_first_alloc = (top() == bottom());
+
+  switch (type) {
+    case ShenandoahAllocRequest::_alloc_shared:
+    case ShenandoahAllocRequest::_alloc_tlab:
+      _seqnum_last_alloc_mutator = _alloc_seq_num.value++;
+      if (is_first_alloc) {
+        assert (_seqnum_first_alloc_mutator == 0, "Region " SIZE_FORMAT " metadata is correct", _region_number);
+        _seqnum_first_alloc_mutator = _seqnum_last_alloc_mutator;
+      }
+      break;
+    case ShenandoahAllocRequest::_alloc_shared_gc:
+    case ShenandoahAllocRequest::_alloc_gclab:
+      _seqnum_last_alloc_gc = _alloc_seq_num.value++;
+      if (is_first_alloc) {
+        assert (_seqnum_first_alloc_gc == 0, "Region " SIZE_FORMAT " metadata is correct", _region_number);
+        _seqnum_first_alloc_gc = _seqnum_last_alloc_gc;
+      }
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+
+  switch (type) {
+    case ShenandoahAllocRequest::_alloc_shared:
+    case ShenandoahAllocRequest::_alloc_shared_gc:
+      _shared_allocs += size;
+      break;
+    case ShenandoahAllocRequest::_alloc_tlab:
+      _tlab_allocs += size;
+      break;
+    case ShenandoahAllocRequest::_alloc_gclab:
+      _gclab_allocs += size;
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) {
+  internal_increase_live_data(s);
+}
+
+inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
+  internal_increase_live_data(s);
+  if (ShenandoahPacing) {
+    _pacer->report_mark(s);
+  }
+}
+
+inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
+  size_t new_live_data = Atomic::add(s, &_live_data);
+#ifdef ASSERT
+  size_t live_bytes = new_live_data * HeapWordSize;
+  size_t used_bytes = used();
+  assert(live_bytes <= used_bytes,
+         "can't have more live data than used: " SIZE_FORMAT ", " SIZE_FORMAT, live_bytes, used_bytes);
+#endif
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionCounters.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/perfData.inline.hpp"
+
+ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() :
+  _last_sample_millis(0)
+{
+  if (UsePerfData && ShenandoahRegionSampling) {
+    EXCEPTION_MARK;
+    ResourceMark rm;
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t num_regions = heap->num_regions();
+    const char* cns = PerfDataManager::name_space("shenandoah", "regions");
+    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
+    strcpy(_name_space, cns);
+
+    const char* cname = PerfDataManager::counter_name(_name_space, "timestamp");
+    _timestamp = PerfDataManager::create_long_variable(SUN_GC, cname, PerfData::U_None, CHECK);
+
+    cname = PerfDataManager::counter_name(_name_space, "max_regions");
+    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, num_regions, CHECK);
+
+    cname = PerfDataManager::counter_name(_name_space, "region_size");
+    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, ShenandoahHeapRegion::region_size_bytes() >> 10, CHECK);
+
+    cname = PerfDataManager::counter_name(_name_space, "status");
+    _status = PerfDataManager::create_long_variable(SUN_GC, cname,
+                                                    PerfData::U_None, CHECK);
+
+    _regions_data = NEW_C_HEAP_ARRAY(PerfVariable*, num_regions, mtGC);
+    for (uint i = 0; i < num_regions; i++) {
+      const char* reg_name = PerfDataManager::name_space(_name_space, "region", i);
+      const char* data_name = PerfDataManager::counter_name(reg_name, "data");
+      const char* ns = PerfDataManager::ns_to_string(SUN_GC);
+      const char* fullname = PerfDataManager::counter_name(ns, data_name);
+      assert(!PerfDataManager::exists(fullname), "must not exist");
+      _regions_data[i] = PerfDataManager::create_long_variable(SUN_GC, data_name,
+                                                               PerfData::U_None, CHECK);
+    }
+  }
+}
+
+ShenandoahHeapRegionCounters::~ShenandoahHeapRegionCounters() {
+  if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
+}
+
+void ShenandoahHeapRegionCounters::update() {
+  if (ShenandoahRegionSampling) {
+    jlong current = os::javaTimeMillis();
+    jlong last = _last_sample_millis;
+    if (current - last > ShenandoahRegionSamplingRate &&
+            Atomic::cmpxchg(current, &_last_sample_millis, last) == last) {
+
+      ShenandoahHeap* heap = ShenandoahHeap::heap();
+      jlong status = 0;
+      if (heap->is_concurrent_mark_in_progress())      status |= 1 << 0;
+      if (heap->is_evacuation_in_progress())           status |= 1 << 1;
+      if (heap->is_update_refs_in_progress())          status |= 1 << 2;
+      if (heap->is_concurrent_traversal_in_progress()) status |= 1 << 3;
+      _status->set_value(status);
+
+      _timestamp->set_value(os::elapsed_counter());
+
+      size_t num_regions = heap->num_regions();
+
+      {
+        ShenandoahHeapLocker locker(heap->lock());
+        size_t rs = ShenandoahHeapRegion::region_size_bytes();
+        for (uint i = 0; i < num_regions; i++) {
+          ShenandoahHeapRegion* r = heap->get_region(i);
+          jlong data = 0;
+          data |= ((100 * r->used() / rs)                & PERCENT_MASK) << USED_SHIFT;
+          data |= ((100 * r->get_live_data_bytes() / rs) & PERCENT_MASK) << LIVE_SHIFT;
+          data |= ((100 * r->get_tlab_allocs() / rs)     & PERCENT_MASK) << TLAB_SHIFT;
+          data |= ((100 * r->get_gclab_allocs() / rs)    & PERCENT_MASK) << GCLAB_SHIFT;
+          data |= ((100 * r->get_shared_allocs() / rs)   & PERCENT_MASK) << SHARED_SHIFT;
+          data |= (r->state_ordinal() & STATUS_MASK) << STATUS_SHIFT;
+          _regions_data[i]->set_value(data);
+        }
+      }
+
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP
+
+#include "memory/allocation.hpp"
+
+/**
+ * This provides the following in JVMStat:
+ *
+ * constants:
+ * - sun.gc.shenandoah.regions.timestamp    the timestamp for this sample
+ * - sun.gc.shenandoah.regions.max_regions  maximum number of regions
+ * - sun.gc.shenandoah.regions.region_size  size per region, in kilobytes
+ *
+ * variables:
+ * - sun.gc.shenandoah.regions.status       current GC status:
+ *     - bit 0 set when marking in progress
+ *     - bit 1 set when evacuation in progress
+ *     - bit 2 set when update refs in progress
+ *     - bit 3 set when traversal in progress
+ *
+ * two variable counters per region, with $max_regions (see above) counters:
+ * - sun.gc.shenandoah.regions.region.$i.data
+ * where $ is the region number from 0 <= i < $max_regions
+ *
+ * .data is in the following format:
+ * - bits 0-6    used memory in percent
+ * - bits 7-13   live memory in percent
+ * - bits 14-20  tlab allocated memory in percent
+ * - bits 21-27  gclab allocated memory in percent
+ * - bits 28-34  shared allocated memory in percent
+ * - bits 35-41  <reserved>
+ * - bits 42-50  <reserved>
+ * - bits 51-57  <reserved>
+ * - bits 58-63  status
+ *      - bits describe the state as recorded in ShenandoahHeapRegion
+ */
+class ShenandoahHeapRegionCounters : public CHeapObj<mtGC>  {
+private:
+  static const jlong PERCENT_MASK = 0x7f;
+  static const jlong STATUS_MASK  = 0x3f;
+
+  static const jlong USED_SHIFT   = 0;
+  static const jlong LIVE_SHIFT   = 7;
+  static const jlong TLAB_SHIFT   = 14;
+  static const jlong GCLAB_SHIFT  = 21;
+  static const jlong SHARED_SHIFT = 28;
+
+  static const jlong STATUS_SHIFT = 58;
+
+  char* _name_space;
+  PerfLongVariable** _regions_data;
+  PerfLongVariable* _timestamp;
+  PerfLongVariable* _status;
+  volatile jlong _last_sample_millis;
+
+public:
+  ShenandoahHeapRegionCounters();
+  ~ShenandoahHeapRegionCounters();
+  void update();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/copy.hpp"
+
+ShenandoahHeapRegionSetIterator::ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set) :
+        _set(set), _heap(ShenandoahHeap::heap()), _current_index(0) {}
+
+void ShenandoahHeapRegionSetIterator::reset(const ShenandoahHeapRegionSet* const set) {
+  _set = set;
+  _current_index = 0;
+}
+
+ShenandoahHeapRegionSet::ShenandoahHeapRegionSet() :
+  _heap(ShenandoahHeap::heap()),
+  _map_size(_heap->num_regions()),
+  _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
+  _set_map(NEW_C_HEAP_ARRAY(jbyte, _map_size, mtGC)),
+  _biased_set_map(_set_map - ((uintx)_heap->base() >> _region_size_bytes_shift)),
+  _region_count(0)
+{
+  // Use 1-byte data type
+  STATIC_ASSERT(sizeof(jbyte) == 1);
+
+  // Initialize cset map
+  Copy::zero_to_bytes(_set_map, _map_size);
+}
+
+ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() {
+  FREE_C_HEAP_ARRAY(jbyte, _set_map);
+}
+
+void ShenandoahHeapRegionSet::add_region(ShenandoahHeapRegion* r) {
+  assert(!is_in(r), "Already in collection set");
+  _set_map[r->region_number()] = 1;
+  _region_count++;
+}
+
+bool ShenandoahHeapRegionSet::add_region_check_for_duplicates(ShenandoahHeapRegion* r) {
+  if (!is_in(r)) {
+    add_region(r);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+void ShenandoahHeapRegionSet::remove_region(ShenandoahHeapRegion* r) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+  assert(Thread::current()->is_VM_thread(), "Must be VMThread");
+  assert(is_in(r), "Not in region set");
+  _set_map[r->region_number()] = 0;
+  _region_count --;
+}
+
+void ShenandoahHeapRegionSet::clear() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+  Copy::zero_to_bytes(_set_map, _map_size);
+
+  _region_count = 0;
+}
+
+ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::claim_next() {
+  size_t num_regions = _heap->num_regions();
+  if (_current_index >= (jint)num_regions) {
+    return NULL;
+  }
+
+  jint saved_current = _current_index;
+  size_t index = (size_t)saved_current;
+
+  while(index < num_regions) {
+    if (_set->is_in(index)) {
+      jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current);
+      assert(cur >= (jint)saved_current, "Must move forward");
+      if (cur == saved_current) {
+        assert(_set->is_in(index), "Invariant");
+        return _heap->get_region(index);
+      } else {
+        index = (size_t)cur;
+        saved_current = cur;
+      }
+    } else {
+      index ++;
+    }
+  }
+  return NULL;
+}
+
+ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() {
+  size_t num_regions = _heap->num_regions();
+  for (size_t index = (size_t)_current_index; index < num_regions; index ++) {
+    if (_set->is_in(index)) {
+      _current_index = (jint)(index + 1);
+      return _heap->get_region(index);
+    }
+  }
+
+  return NULL;
+}
+
+void ShenandoahHeapRegionSet::print_on(outputStream* out) const {
+  out->print_cr("Region Set : " SIZE_FORMAT "", count());
+
+  debug_only(size_t regions = 0;)
+  for (size_t index = 0; index < _heap->num_regions(); index ++) {
+    if (is_in(index)) {
+      _heap->get_region(index)->print_on(out);
+      debug_only(regions ++;)
+    }
+  }
+  assert(regions == count(), "Must match");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
+
+#include "memory/allocation.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+
+class ShenandoahHeapRegionSet;
+
+class ShenandoahHeapRegionSetIterator : public StackObj {
+private:
+  const ShenandoahHeapRegionSet* _set;
+  ShenandoahHeap* const _heap;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile jint));
+  volatile jint _current_index;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  // No implicit copying: iterators should be passed by reference to capture the state
+  ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSetIterator& that);
+  ShenandoahHeapRegionSetIterator& operator=(const ShenandoahHeapRegionSetIterator& o);
+
+public:
+  ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set);
+
+  // Reset existing iterator to new set
+  void reset(const ShenandoahHeapRegionSet* const set);
+
+  // MT version
+  ShenandoahHeapRegion* claim_next();
+
+  // Single-thread version
+  ShenandoahHeapRegion* next();
+};
+
+class ShenandoahHeapRegionSet : public CHeapObj<mtGC> {
+  friend class ShenandoahHeap;
+private:
+  ShenandoahHeap* const _heap;
+  size_t const          _map_size;
+  size_t const          _region_size_bytes_shift;
+  jbyte* const          _set_map;
+  // Bias set map's base address for fast test if an oop is in set
+  jbyte* const          _biased_set_map;
+  size_t                _region_count;
+
+public:
+  ShenandoahHeapRegionSet();
+  ~ShenandoahHeapRegionSet();
+
+  // Add region to set
+  void add_region(ShenandoahHeapRegion* r);
+  bool add_region_check_for_duplicates(ShenandoahHeapRegion* r);
+
+  // Remove region from set
+  void remove_region(ShenandoahHeapRegion* r);
+
+  size_t count()  const { return _region_count; }
+  bool is_empty() const { return _region_count == 0; }
+
+  inline bool is_in(ShenandoahHeapRegion* r) const;
+  inline bool is_in(size_t region_number)    const;
+  inline bool is_in(HeapWord* p)             const;
+
+  void print_on(outputStream* out) const;
+
+  void clear();
+
+private:
+  jbyte* biased_map_address() const {
+    return _biased_set_map;
+  }
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+
+bool ShenandoahHeapRegionSet::is_in(size_t region_number) const {
+  assert(region_number < _heap->num_regions(), "Sanity");
+  return _set_map[region_number] == 1;
+}
+
+bool ShenandoahHeapRegionSet::is_in(ShenandoahHeapRegion* r) const {
+  return is_in(r->region_number());
+}
+
+bool ShenandoahHeapRegionSet::is_in(HeapWord* p) const {
+  assert(_heap->is_in(p), "Must be in the heap");
+  uintx index = ((uintx) p) >> _region_size_bytes_shift;
+  // no need to subtract the bottom of the heap from p,
+  // _biased_set_map is biased
+  return _biased_set_map[index] == 1;
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/gcCause.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
+
+int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
+  if (a._garbage > b._garbage)
+    return -1;
+  else if (a._garbage < b._garbage)
+    return 1;
+  else return 0;
+}
+
+int ShenandoahHeuristics::compare_by_garbage_then_alloc_seq_ascending(RegionData a, RegionData b) {
+  int r = compare_by_garbage(a, b);
+  if (r != 0) {
+    return r;
+  }
+  return compare_by_alloc_seq_ascending(a, b);
+}
+
+int ShenandoahHeuristics::compare_by_alloc_seq_ascending(RegionData a, RegionData b) {
+  if (a._seqnum_last_alloc == b._seqnum_last_alloc)
+    return 0;
+  else if (a._seqnum_last_alloc < b._seqnum_last_alloc)
+    return -1;
+  else return 1;
+}
+
+int ShenandoahHeuristics::compare_by_alloc_seq_descending(RegionData a, RegionData b) {
+  return -compare_by_alloc_seq_ascending(a, b);
+}
+
+ShenandoahHeuristics::ShenandoahHeuristics() :
+  _update_refs_early(false),
+  _update_refs_adaptive(false),
+  _region_data(NULL),
+  _region_data_size(0),
+  _degenerated_cycles_in_a_row(0),
+  _successful_cycles_in_a_row(0),
+  _bytes_in_cset(0),
+  _cycle_start(os::elapsedTime()),
+  _last_cycle_end(0),
+  _gc_times_learned(0),
+  _gc_time_penalties(0),
+  _gc_time_history(new TruncatedSeq(5)),
+  _metaspace_oom()
+{
+  if (strcmp(ShenandoahUpdateRefsEarly, "on") == 0 ||
+      strcmp(ShenandoahUpdateRefsEarly, "true") == 0 ) {
+    _update_refs_early = true;
+  } else if (strcmp(ShenandoahUpdateRefsEarly, "off") == 0 ||
+             strcmp(ShenandoahUpdateRefsEarly, "false") == 0 ) {
+    _update_refs_early = false;
+  } else if (strcmp(ShenandoahUpdateRefsEarly, "adaptive") == 0) {
+    _update_refs_adaptive = true;
+    _update_refs_early = true;
+  } else {
+    vm_exit_during_initialization("Unknown -XX:ShenandoahUpdateRefsEarly option: %s", ShenandoahUpdateRefsEarly);
+  }
+
+  // No unloading during concurrent mark? Communicate that to heuristics
+  if (!ClassUnloadingWithConcurrentMark) {
+    FLAG_SET_DEFAULT(ShenandoahUnloadClassesFrequency, 0);
+  }
+}
+
+ShenandoahHeuristics::~ShenandoahHeuristics() {
+  if (_region_data != NULL) {
+    FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
+  }
+}
+
+ShenandoahHeuristics::RegionData* ShenandoahHeuristics::get_region_data_cache(size_t num) {
+  RegionData* res = _region_data;
+  if (res == NULL) {
+    res = NEW_C_HEAP_ARRAY(RegionData, num, mtGC);
+    _region_data = res;
+    _region_data_size = num;
+  } else if (_region_data_size < num) {
+    res = REALLOC_C_HEAP_ARRAY(RegionData, _region_data, num, mtGC);
+    _region_data = res;
+    _region_data_size = num;
+  }
+  return res;
+}
+
+void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
+  assert(collection_set->count() == 0, "Must be empty");
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
+
+  size_t num_regions = heap->num_regions();
+
+  RegionData* candidates = get_region_data_cache(num_regions);
+
+  size_t cand_idx = 0;
+
+  size_t total_garbage = 0;
+
+  size_t immediate_garbage = 0;
+  size_t immediate_regions = 0;
+
+  size_t free = 0;
+  size_t free_regions = 0;
+
+  ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
+
+  for (size_t i = 0; i < num_regions; i++) {
+    ShenandoahHeapRegion* region = heap->get_region(i);
+
+    size_t garbage = region->garbage();
+    total_garbage += garbage;
+
+    if (region->is_empty()) {
+      free_regions++;
+      free += ShenandoahHeapRegion::region_size_bytes();
+    } else if (region->is_regular()) {
+      if (!region->has_live()) {
+        // We can recycle it right away and put it in the free set.
+        immediate_regions++;
+        immediate_garbage += garbage;
+        region->make_trash_immediate();
+      } else {
+        // This is our candidate for later consideration.
+        candidates[cand_idx]._region = region;
+        candidates[cand_idx]._garbage = garbage;
+        cand_idx++;
+      }
+    } else if (region->is_humongous_start()) {
+      // Reclaim humongous regions here, and count them as the immediate garbage
+#ifdef ASSERT
+      bool reg_live = region->has_live();
+      bool bm_live = ctx->is_marked(oop(region->bottom() + ShenandoahBrooksPointer::word_size()));
+      assert(reg_live == bm_live,
+             "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT,
+             BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
+#endif
+      if (!region->has_live()) {
+        heap->trash_humongous_region_at(region);
+
+        // Count only the start. Continuations would be counted on "trash" path
+        immediate_regions++;
+        immediate_garbage += garbage;
+      }
+    } else if (region->is_trash()) {
+      // Count in just trashed collection set, during coalesced CM-with-UR
+      immediate_regions++;
+      immediate_garbage += garbage;
+    }
+  }
+
+  // Step 2. Look back at garbage statistics, and decide if we want to collect anything,
+  // given the amount of immediately reclaimable garbage. If we do, figure out the collection set.
+
+  assert (immediate_garbage <= total_garbage,
+          "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "M vs " SIZE_FORMAT "M",
+          immediate_garbage / M, total_garbage / M);
+
+  size_t immediate_percent = total_garbage == 0 ? 0 : (immediate_garbage * 100 / total_garbage);
+
+  if (immediate_percent <= ShenandoahImmediateThreshold) {
+    choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
+    collection_set->update_region_status();
+
+    size_t cset_percent = total_garbage == 0 ? 0 : (collection_set->garbage() * 100 / total_garbage);
+    log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M (" SIZE_FORMAT "%% of total), " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
+                       collection_set->garbage() / M, cset_percent, collection_set->live_data() / M, collection_set->count());
+  }
+
+  log_info(gc, ergo)("Immediate Garbage: " SIZE_FORMAT "M (" SIZE_FORMAT "%% of total), " SIZE_FORMAT " regions",
+                     immediate_garbage / M, immediate_percent, immediate_regions);
+}
+
+void ShenandoahHeuristics::record_gc_start() {
+  // Do nothing
+}
+
+void ShenandoahHeuristics::record_gc_end() {
+  // Do nothing
+}
+
+void ShenandoahHeuristics::record_cycle_start() {
+  _cycle_start = os::elapsedTime();
+}
+
+void ShenandoahHeuristics::record_cycle_end() {
+  _last_cycle_end = os::elapsedTime();
+}
+
+void ShenandoahHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
+  // Do nothing
+}
+
+bool ShenandoahHeuristics::should_start_update_refs() {
+  return _update_refs_early;
+}
+
+bool ShenandoahHeuristics::should_start_normal_gc() const {
+  // Perform GC to cleanup metaspace
+  if (has_metaspace_oom()) {
+    // Some of vmTestbase/metaspace tests depend on following line to count GC cycles
+    log_info(gc)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold));
+    return true;
+  }
+
+  double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
+  bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
+  if (periodic_gc) {
+    log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)",
+                  last_time_ms, ShenandoahGuaranteedGCInterval);
+  }
+  return periodic_gc;
+}
+
+bool ShenandoahHeuristics::should_start_traversal_gc() {
+  return false;
+}
+
+bool ShenandoahHeuristics::can_do_traversal_gc() {
+  return false;
+}
+
+bool ShenandoahHeuristics::should_degenerate_cycle() {
+  return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold;
+}
+
+void ShenandoahHeuristics::record_success_concurrent() {
+  _degenerated_cycles_in_a_row = 0;
+  _successful_cycles_in_a_row++;
+
+  _gc_time_history->add(time_since_last_gc());
+  _gc_times_learned++;
+  _gc_time_penalties -= MIN2<size_t>(_gc_time_penalties, Concurrent_Adjust);
+}
+
+void ShenandoahHeuristics::record_success_degenerated() {
+  _degenerated_cycles_in_a_row++;
+  _successful_cycles_in_a_row = 0;
+  _gc_time_penalties += Degenerated_Penalty;
+}
+
+void ShenandoahHeuristics::record_success_full() {
+  _degenerated_cycles_in_a_row = 0;
+  _successful_cycles_in_a_row++;
+  _gc_time_penalties += Full_Penalty;
+}
+
+void ShenandoahHeuristics::record_allocation_failure_gc() {
+  _bytes_in_cset = 0;
+}
+
+void ShenandoahHeuristics::record_requested_gc() {
+  _bytes_in_cset = 0;
+
+  // Assume users call System.gc() when external state changes significantly,
+  // which forces us to re-learn the GC timings and allocation rates.
+  _gc_times_learned = 0;
+}
+
+bool ShenandoahHeuristics::can_process_references() {
+  if (ShenandoahRefProcFrequency == 0) return false;
+  return true;
+}
+
+bool ShenandoahHeuristics::should_process_references() {
+  if (!can_process_references()) return false;
+  size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter();
+  // Process references every Nth GC cycle.
+  return cycle % ShenandoahRefProcFrequency == 0;
+}
+
+bool ShenandoahHeuristics::can_unload_classes() {
+  if (!ClassUnloading) return false;
+  return true;
+}
+
+bool ShenandoahHeuristics::can_unload_classes_normal() {
+  if (!can_unload_classes()) return false;
+  if (has_metaspace_oom()) return true;
+  if (!ClassUnloadingWithConcurrentMark) return false;
+  if (ShenandoahUnloadClassesFrequency == 0) return false;
+  return true;
+}
+
+bool ShenandoahHeuristics::should_unload_classes() {
+  if (!can_unload_classes_normal()) return false;
+  if (has_metaspace_oom()) return true;
+  size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter();
+  // Unload classes every Nth GC cycle.
+  // This should not happen in the same cycle as process_references to amortize costs.
+  // Offsetting by one is enough to break the rendezvous when periods are equal.
+  // When periods are not equal, offsetting by one is just as good as any other guess.
+  return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
+}
+
+void ShenandoahHeuristics::initialize() {
+  // Nothing to do by default.
+}
+
+double ShenandoahHeuristics::time_since_last_gc() const {
+  return os::elapsedTime() - _cycle_start;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP
+
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahSharedVariables.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/globals_extension.hpp"
+
+#define SHENANDOAH_ERGO_DISABLE_FLAG(name)                                  \
+  do {                                                                      \
+    if (FLAG_IS_DEFAULT(name) && (name)) {                                  \
+      log_info(gc)("Heuristics ergonomically sets -XX:-" #name);            \
+      FLAG_SET_DEFAULT(name, false);                                        \
+    }                                                                       \
+  } while (0)
+
+#define SHENANDOAH_ERGO_ENABLE_FLAG(name)                                   \
+  do {                                                                      \
+    if (FLAG_IS_DEFAULT(name) && !(name)) {                                 \
+      log_info(gc)("Heuristics ergonomically sets -XX:+" #name);            \
+      FLAG_SET_DEFAULT(name, true);                                         \
+    }                                                                       \
+  } while (0)
+
+#define SHENANDOAH_ERGO_OVERRIDE_DEFAULT(name, value)                       \
+  do {                                                                      \
+    if (FLAG_IS_DEFAULT(name)) {                                            \
+      log_info(gc)("Heuristics ergonomically sets -XX:" #name "=" #value);  \
+      FLAG_SET_DEFAULT(name, value);                                        \
+    }                                                                       \
+  } while (0)
+
+#define SHENANDOAH_CHECK_FLAG_SET(name)                                     \
+  do {                                                                      \
+    if (!name) {                                                            \
+      err_msg message("Heuristics needs -XX:+" #name " to work correctly"); \
+      vm_exit_during_initialization("Error", message);                      \
+    }                                                                       \
+  } while (0)
+
+class ShenandoahCollectionSet;
+class ShenandoahHeapRegion;
+
+class ShenandoahHeuristics : public CHeapObj<mtGC> {
+  static const intx Concurrent_Adjust   =  1; // recover from penalties
+  static const intx Degenerated_Penalty = 10; // how much to penalize average GC duration history on Degenerated GC
+  static const intx Full_Penalty        = 20; // how much to penalize average GC duration history on Full GC
+
+protected:
+  typedef struct {
+    ShenandoahHeapRegion* _region;
+    size_t _garbage;
+    uint64_t _seqnum_last_alloc;
+  } RegionData;
+
+  bool _update_refs_early;
+  bool _update_refs_adaptive;
+
+  RegionData* _region_data;
+  size_t _region_data_size;
+
+  uint _degenerated_cycles_in_a_row;
+  uint _successful_cycles_in_a_row;
+
+  size_t _bytes_in_cset;
+
+  double _cycle_start;
+  double _last_cycle_end;
+
+  size_t _gc_times_learned;
+  size_t _gc_time_penalties;
+  TruncatedSeq* _gc_time_history;
+
+  // There may be many threads that contend to set this flag
+  ShenandoahSharedFlag _metaspace_oom;
+
+  static int compare_by_garbage(RegionData a, RegionData b);
+  static int compare_by_garbage_then_alloc_seq_ascending(RegionData a, RegionData b);
+  static int compare_by_alloc_seq_ascending(RegionData a, RegionData b);
+  static int compare_by_alloc_seq_descending(RegionData a, RegionData b);
+
+  RegionData* get_region_data_cache(size_t num);
+
+  virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
+                                                     RegionData* data, size_t data_size,
+                                                     size_t free) = 0;
+
+public:
+  ShenandoahHeuristics();
+  virtual ~ShenandoahHeuristics();
+
+  void record_gc_start();
+
+  void record_gc_end();
+
+  void record_metaspace_oom()     { _metaspace_oom.set(); }
+  void clear_metaspace_oom()      { _metaspace_oom.unset(); }
+  bool has_metaspace_oom() const  { return _metaspace_oom.is_set(); }
+
+  virtual void record_cycle_start();
+
+  virtual void record_cycle_end();
+
+  virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs);
+
+  virtual bool should_start_normal_gc() const;
+
+  virtual bool should_start_update_refs();
+
+  virtual bool should_start_traversal_gc();
+
+  virtual bool can_do_traversal_gc();
+
+  virtual bool should_degenerate_cycle();
+
+  virtual void record_success_concurrent();
+
+  virtual void record_success_degenerated();
+
+  virtual void record_success_full();
+
+  virtual void record_allocation_failure_gc();
+
+  virtual void record_requested_gc();
+
+  virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
+
+  virtual bool can_process_references();
+  virtual bool should_process_references();
+
+  virtual bool can_unload_classes();
+  virtual bool can_unload_classes_normal();
+  virtual bool should_unload_classes();
+
+  virtual const char* name() = 0;
+  virtual bool is_diagnostic() = 0;
+  virtual bool is_experimental() = 0;
+  virtual void initialize();
+
+  double time_since_last_gc() const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,844 @@
+/*
+ * Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "code/codeCache.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVerifier.hpp"
+#include "gc/shenandoah/shenandoahVMOperations.hpp"
+#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
+#include "memory/metaspace.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/copy.hpp"
+#include "utilities/growableArray.hpp"
+#include "gc/shared/workgroup.hpp"
+
+void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
+  _gc_timer = gc_timer;
+}
+
+void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_before_fullgc();
+  }
+
+  if (VerifyBeforeGC) {
+    Universe::verify();
+  }
+
+  heap->set_full_gc_in_progress(true);
+
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
+  assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
+
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
+    heap->pre_full_gc_dump(_gc_timer);
+  }
+
+  {
+    ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
+    // Full GC is supposed to recover from any GC state:
+
+    // a0. Remember if we have forwarded objects
+    bool has_forwarded_objects = heap->has_forwarded_objects();
+
+    // a1. Cancel evacuation, if in progress
+    if (heap->is_evacuation_in_progress()) {
+      heap->set_evacuation_in_progress(false);
+    }
+    assert(!heap->is_evacuation_in_progress(), "sanity");
+
+    // a2. Cancel update-refs, if in progress
+    if (heap->is_update_refs_in_progress()) {
+      heap->set_update_refs_in_progress(false);
+    }
+    assert(!heap->is_update_refs_in_progress(), "sanity");
+
+    // a3. Cancel concurrent traversal GC, if in progress
+    if (heap->is_concurrent_traversal_in_progress()) {
+      heap->traversal_gc()->reset();
+      heap->set_concurrent_traversal_in_progress(false);
+    }
+
+    // b. Cancel concurrent mark, if in progress
+    if (heap->is_concurrent_mark_in_progress()) {
+      heap->concurrent_mark()->cancel();
+      heap->stop_concurrent_marking();
+    }
+    assert(!heap->is_concurrent_mark_in_progress(), "sanity");
+
+    // c. Reset the bitmaps for new marking
+    heap->reset_mark_bitmap();
+    assert(heap->marking_context()->is_bitmap_clear(), "sanity");
+    assert(!heap->marking_context()->is_complete(), "sanity");
+
+    // d. Abandon reference discovery and clear all discovered references.
+    ReferenceProcessor* rp = heap->ref_processor();
+    rp->disable_discovery();
+    rp->abandon_partial_discovery();
+    rp->verify_no_references_recorded();
+
+    // e. Set back forwarded objects bit back, in case some steps above dropped it.
+    heap->set_has_forwarded_objects(has_forwarded_objects);
+  }
+
+  heap->make_parsable(true);
+
+  CodeCache::gc_prologue();
+
+  OrderAccess::fence();
+
+  phase1_mark_heap();
+
+  // Once marking is done, which may have fixed up forwarded objects, we can drop it.
+  // Coming out of Full GC, we would not have any forwarded objects.
+  // This also prevents read barrier from kicking in while adjusting pointers in phase3.
+  heap->set_has_forwarded_objects(false);
+
+  heap->set_full_gc_move_in_progress(true);
+
+  // Setup workers for the rest
+  OrderAccess::fence();
+
+  // Initialize worker slices
+  ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
+  for (uint i = 0; i < heap->max_workers(); i++) {
+    worker_slices[i] = new ShenandoahHeapRegionSet();
+  }
+
+  {
+    // The rest of code performs region moves, where region status is undefined
+    // until all phases run together.
+    ShenandoahHeapLocker lock(heap->lock());
+
+    phase2_calculate_target_addresses(worker_slices);
+
+    OrderAccess::fence();
+
+    phase3_update_references();
+
+    phase4_compact_objects(worker_slices);
+  }
+
+  // Resize metaspace
+  MetaspaceGC::compute_new_size();
+
+  // Free worker slices
+  for (uint i = 0; i < heap->max_workers(); i++) {
+    delete worker_slices[i];
+  }
+  FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
+
+  CodeCache::gc_epilogue();
+  JvmtiExport::gc_epilogue();
+
+  heap->set_full_gc_move_in_progress(false);
+  heap->set_full_gc_in_progress(false);
+
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_after_fullgc();
+  }
+
+  if (VerifyAfterGC) {
+    Universe::verify();
+  }
+
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
+    heap->post_full_gc_dump(_gc_timer);
+  }
+}
+
+class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahMarkingContext* const _ctx;
+
+public:
+  ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
+
+  void heap_region_do(ShenandoahHeapRegion *r) {
+    _ctx->capture_top_at_mark_start(r);
+    r->clear_live_data();
+    r->set_concurrent_iteration_safe_limit(r->top());
+  }
+};
+
+void ShenandoahMarkCompact::phase1_mark_heap() {
+  GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
+  ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  ShenandoahPrepareForMarkClosure cl;
+  heap->heap_region_iterate(&cl);
+
+  ShenandoahConcurrentMark* cm = heap->concurrent_mark();
+
+  heap->set_process_references(heap->heuristics()->can_process_references());
+  heap->set_unload_classes(heap->heuristics()->can_unload_classes());
+
+  ReferenceProcessor* rp = heap->ref_processor();
+  // enable ("weak") refs discovery
+  rp->enable_discovery(true /*verify_no_refs*/);
+  rp->setup_policy(true); // forcefully purge all soft references
+  rp->set_active_mt_degree(heap->workers()->active_workers());
+
+  cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
+  cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
+  cm->finish_mark_from_roots(/* full_gc = */ true);
+
+  heap->mark_complete_marking_context();
+}
+
+class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
+private:
+  ShenandoahHeap*          const _heap;
+  GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
+  int _empty_regions_pos;
+  ShenandoahHeapRegion*          _to_region;
+  ShenandoahHeapRegion*          _from_region;
+  HeapWord* _compact_point;
+
+public:
+  ShenandoahPrepareForCompactionObjectClosure(GrowableArray<ShenandoahHeapRegion*>& empty_regions, ShenandoahHeapRegion* to_region) :
+    _heap(ShenandoahHeap::heap()),
+    _empty_regions(empty_regions),
+    _empty_regions_pos(0),
+    _to_region(to_region),
+    _from_region(NULL),
+    _compact_point(to_region->bottom()) {}
+
+  void set_from_region(ShenandoahHeapRegion* from_region) {
+    _from_region = from_region;
+  }
+
+  void finish_region() {
+    assert(_to_region != NULL, "should not happen");
+    _to_region->set_new_top(_compact_point);
+  }
+
+  bool is_compact_same_region() {
+    return _from_region == _to_region;
+  }
+
+  int empty_regions_pos() {
+    return _empty_regions_pos;
+  }
+
+  void do_object(oop p) {
+    assert(_from_region != NULL, "must set before work");
+    assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
+    assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
+
+    size_t obj_size = p->size() + ShenandoahBrooksPointer::word_size();
+    if (_compact_point + obj_size > _to_region->end()) {
+      finish_region();
+
+      // Object doesn't fit. Pick next empty region and start compacting there.
+      ShenandoahHeapRegion* new_to_region;
+      if (_empty_regions_pos < _empty_regions.length()) {
+        new_to_region = _empty_regions.at(_empty_regions_pos);
+        _empty_regions_pos++;
+      } else {
+        // Out of empty region? Compact within the same region.
+        new_to_region = _from_region;
+      }
+
+      assert(new_to_region != _to_region, "must not reuse same to-region");
+      assert(new_to_region != NULL, "must not be NULL");
+      _to_region = new_to_region;
+      _compact_point = _to_region->bottom();
+    }
+
+    // Object fits into current region, record new location:
+    assert(_compact_point + obj_size <= _to_region->end(), "must fit");
+    shenandoah_assert_not_forwarded(NULL, p);
+    ShenandoahBrooksPointer::set_raw(p, _compact_point + ShenandoahBrooksPointer::word_size());
+    _compact_point += obj_size;
+  }
+};
+
+class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
+private:
+  ShenandoahHeap*           const _heap;
+  ShenandoahHeapRegionSet** const _worker_slices;
+  ShenandoahRegionIterator        _heap_regions;
+
+  ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) {
+    ShenandoahHeapRegion* from_region = _heap_regions.next();
+
+    while (from_region != NULL && (!from_region->is_move_allowed() || from_region->is_humongous())) {
+      from_region = _heap_regions.next();
+    }
+
+    if (from_region != NULL) {
+      assert(slice != NULL, "sanity");
+      assert(!from_region->is_humongous(), "this path cannot handle humongous regions");
+      assert(from_region->is_move_allowed(), "only regions that can be moved in mark-compact");
+      slice->add_region(from_region);
+    }
+
+    return from_region;
+  }
+
+public:
+  ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet** worker_slices) :
+    AbstractGangTask("Shenandoah Prepare For Compaction Task"),
+    _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
+    ShenandoahHeapRegion* from_region = next_from_region(slice);
+    // No work?
+    if (from_region == NULL) {
+      return;
+    }
+
+    // Sliding compaction. Walk all regions in the slice, and compact them.
+    // Remember empty regions and reuse them as needed.
+    ResourceMark rm;
+    GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
+    ShenandoahPrepareForCompactionObjectClosure cl(empty_regions, from_region);
+    while (from_region != NULL) {
+      cl.set_from_region(from_region);
+      if (from_region->has_live()) {
+        _heap->marked_object_iterate(from_region, &cl);
+      }
+
+      // Compacted the region to somewhere else? From-region is empty then.
+      if (!cl.is_compact_same_region()) {
+        empty_regions.append(from_region);
+      }
+      from_region = next_from_region(slice);
+    }
+    cl.finish_region();
+
+    // Mark all remaining regions as empty
+    for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
+      ShenandoahHeapRegion* r = empty_regions.at(pos);
+      r->set_new_top(r->bottom());
+    }
+  }
+};
+
+void ShenandoahMarkCompact::calculate_target_humongous_objects() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  // Compute the new addresses for humongous objects. We need to do this after addresses
+  // for regular objects are calculated, and we know what regions in heap suffix are
+  // available for humongous moves.
+  //
+  // Scan the heap backwards, because we are compacting humongous regions towards the end.
+  // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
+  // humongous start there.
+  //
+  // The complication is potential non-movable regions during the scan. If such region is
+  // detected, then sliding restarts towards that non-movable region.
+
+  size_t to_begin = heap->num_regions();
+  size_t to_end = heap->num_regions();
+
+  for (size_t c = heap->num_regions() - 1; c > 0; c--) {
+    ShenandoahHeapRegion *r = heap->get_region(c);
+    if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
+      // To-region candidate: record this, and continue scan
+      to_begin = r->region_number();
+      continue;
+    }
+
+    if (r->is_humongous_start() && r->is_move_allowed()) {
+      // From-region candidate: movable humongous region
+      oop old_obj = oop(r->bottom() + ShenandoahBrooksPointer::word_size());
+      size_t words_size = old_obj->size() + ShenandoahBrooksPointer::word_size();
+      size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
+
+      size_t start = to_end - num_regions;
+
+      if (start >= to_begin && start != r->region_number()) {
+        // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
+        ShenandoahBrooksPointer::set_raw(old_obj, heap->get_region(start)->bottom() + ShenandoahBrooksPointer::word_size());
+        to_end = start;
+        continue;
+      }
+    }
+
+    // Failed to fit. Scan starting from current region.
+    to_begin = r->region_number();
+    to_end = r->region_number();
+  }
+}
+
+class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahHeap* const _heap;
+
+public:
+  ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    if (r->is_trash()) {
+      r->recycle();
+    }
+    if (r->is_cset()) {
+      r->make_regular_bypass();
+    }
+    if (r->is_empty_uncommitted()) {
+      r->make_committed_bypass();
+    }
+    assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number());
+
+    // Record current region occupancy: this communicates empty regions are free
+    // to the rest of Full GC code.
+    r->set_new_top(r->top());
+  }
+};
+
+class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahHeap* const _heap;
+  ShenandoahMarkingContext* const _ctx;
+
+public:
+  ShenandoahTrashImmediateGarbageClosure() :
+    _heap(ShenandoahHeap::heap()),
+    _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
+
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    if (r->is_humongous_start()) {
+      oop humongous_obj = oop(r->bottom() + ShenandoahBrooksPointer::word_size());
+      if (!_ctx->is_marked(humongous_obj)) {
+        assert(!r->has_live(),
+               "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number());
+        _heap->trash_humongous_region_at(r);
+      } else {
+        assert(r->has_live(),
+               "Region " SIZE_FORMAT " should have live", r->region_number());
+      }
+    } else if (r->is_humongous_continuation()) {
+      // If we hit continuation, the non-live humongous starts should have been trashed already
+      assert(r->humongous_start_region()->has_live(),
+             "Region " SIZE_FORMAT " should have live", r->region_number());
+    } else if (r->is_regular()) {
+      if (!r->has_live()) {
+        r->make_trash_immediate();
+      }
+    }
+  }
+};
+
+void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
+  GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
+  ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  {
+    // Trash the immediately collectible regions before computing addresses
+    ShenandoahTrashImmediateGarbageClosure tigcl;
+    heap->heap_region_iterate(&tigcl);
+
+    // Make sure regions are in good state: committed, active, clean.
+    // This is needed because we are potentially sliding the data through them.
+    ShenandoahEnsureHeapActiveClosure ecl;
+    heap->heap_region_iterate(&ecl);
+  }
+
+  // Compute the new addresses for regular objects
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
+    ShenandoahPrepareForCompactionTask prepare_task(worker_slices);
+    heap->workers()->run_task(&prepare_task);
+  }
+
+  // Compute the new addresses for humongous objects
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
+    calculate_target_humongous_objects();
+  }
+}
+
+class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
+private:
+  ShenandoahHeap* const _heap;
+  ShenandoahMarkingContext* const _ctx;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      assert(_ctx->is_marked(obj), "must be marked");
+      oop forw = oop(ShenandoahBrooksPointer::get_raw(obj));
+      RawAccess<IS_NOT_NULL>::oop_store(p, forw);
+    }
+  }
+
+public:
+  ShenandoahAdjustPointersClosure() :
+    _heap(ShenandoahHeap::heap()),
+    _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
+
+  void do_oop(oop* p)       { do_oop_work(p); }
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
+private:
+  ShenandoahHeap* const _heap;
+  ShenandoahAdjustPointersClosure _cl;
+
+public:
+  ShenandoahAdjustPointersObjectClosure() :
+    _heap(ShenandoahHeap::heap()) {
+  }
+  void do_object(oop p) {
+    assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
+    HeapWord* forw = ShenandoahBrooksPointer::get_raw(p);
+    p->oop_iterate(&_cl);
+  }
+};
+
+class ShenandoahAdjustPointersTask : public AbstractGangTask {
+private:
+  ShenandoahHeap*          const _heap;
+  ShenandoahRegionIterator       _regions;
+
+public:
+  ShenandoahAdjustPointersTask() :
+    AbstractGangTask("Shenandoah Adjust Pointers Task"),
+    _heap(ShenandoahHeap::heap()) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahAdjustPointersObjectClosure obj_cl;
+    ShenandoahHeapRegion* r = _regions.next();
+    while (r != NULL) {
+      if (!r->is_humongous_continuation() && r->has_live()) {
+        _heap->marked_object_iterate(r, &obj_cl);
+      }
+      r = _regions.next();
+    }
+  }
+};
+
+class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
+private:
+  ShenandoahRootProcessor* _rp;
+
+public:
+  ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
+    AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
+    _rp(rp) {}
+
+  void work(uint worker_id) {
+    ShenandoahAdjustPointersClosure cl;
+    CLDToOopClosure adjust_cld_closure(&cl, ClassLoaderData::_claim_strong);
+    MarkingCodeBlobClosure adjust_code_closure(&cl,
+                                             CodeBlobToOopClosure::FixRelocations);
+
+    _rp->process_all_roots(&cl, &cl,
+                           &adjust_cld_closure,
+                           &adjust_code_closure, NULL, worker_id);
+  }
+};
+
+void ShenandoahMarkCompact::phase3_update_references() {
+  GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
+  ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  WorkGang* workers = heap->workers();
+  uint nworkers = workers->active_workers();
+  {
+#if COMPILER2_OR_JVMCI
+    DerivedPointerTable::clear();
+#endif
+    ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots);
+    ShenandoahAdjustRootPointersTask task(&rp);
+    workers->run_task(&task);
+#if COMPILER2_OR_JVMCI
+    DerivedPointerTable::update_pointers();
+#endif
+  }
+
+  ShenandoahAdjustPointersTask adjust_pointers_task;
+  workers->run_task(&adjust_pointers_task);
+}
+
+class ShenandoahCompactObjectsClosure : public ObjectClosure {
+private:
+  ShenandoahHeap* const _heap;
+  uint            const _worker_id;
+
+public:
+  ShenandoahCompactObjectsClosure(uint worker_id) :
+    _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
+
+  void do_object(oop p) {
+    assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
+    size_t size = (size_t)p->size();
+    HeapWord* compact_to = ShenandoahBrooksPointer::get_raw(p);
+    HeapWord* compact_from = (HeapWord*) p;
+    if (compact_from != compact_to) {
+      Copy::aligned_conjoint_words(compact_from, compact_to, size);
+    }
+    oop new_obj = oop(compact_to);
+    ShenandoahBrooksPointer::initialize(new_obj);
+  }
+};
+
+class ShenandoahCompactObjectsTask : public AbstractGangTask {
+private:
+  ShenandoahHeap* const _heap;
+  ShenandoahHeapRegionSet** const _worker_slices;
+
+public:
+  ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
+    AbstractGangTask("Shenandoah Compact Objects Task"),
+    _heap(ShenandoahHeap::heap()),
+    _worker_slices(worker_slices) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
+
+    ShenandoahCompactObjectsClosure cl(worker_id);
+    ShenandoahHeapRegion* r = slice.next();
+    while (r != NULL) {
+      assert(!r->is_humongous(), "must not get humongous regions here");
+      if (r->has_live()) {
+        _heap->marked_object_iterate(r, &cl);
+      }
+      r->set_top(r->new_top());
+      r = slice.next();
+    }
+  }
+};
+
+class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahHeap* const _heap;
+  size_t _live;
+
+public:
+  ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
+    _heap->free_set()->clear();
+  }
+
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    assert (!r->is_cset(), "cset regions should have been demoted already");
+
+    // Need to reset the complete-top-at-mark-start pointer here because
+    // the complete marking bitmap is no longer valid. This ensures
+    // size-based iteration in marked_object_iterate().
+    // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
+    // pinned regions.
+    if (!r->is_pinned()) {
+      _heap->complete_marking_context()->reset_top_at_mark_start(r);
+    }
+
+    size_t live = r->used();
+
+    // Make empty regions that have been allocated into regular
+    if (r->is_empty() && live > 0) {
+      r->make_regular_bypass();
+    }
+
+    // Reclaim regular regions that became empty
+    if (r->is_regular() && live == 0) {
+      r->make_trash();
+    }
+
+    // Recycle all trash regions
+    if (r->is_trash()) {
+      live = 0;
+      r->recycle();
+    }
+
+    r->set_live_data(live);
+    r->reset_alloc_metadata_to_shared();
+    _live += live;
+  }
+
+  size_t get_live() {
+    return _live;
+  }
+};
+
+void ShenandoahMarkCompact::compact_humongous_objects() {
+  // Compact humongous regions, based on their fwdptr objects.
+  //
+  // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
+  // humongous regions are already compacted, and do not require further moves, which alleviates
+  // sliding costs. We may consider doing this in parallel in future.
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  for (size_t c = heap->num_regions() - 1; c > 0; c--) {
+    ShenandoahHeapRegion* r = heap->get_region(c);
+    if (r->is_humongous_start()) {
+      oop old_obj = oop(r->bottom() + ShenandoahBrooksPointer::word_size());
+      size_t words_size = old_obj->size() + ShenandoahBrooksPointer::word_size();
+      size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
+
+      size_t old_start = r->region_number();
+      size_t old_end   = old_start + num_regions - 1;
+      size_t new_start = heap->heap_region_index_containing(ShenandoahBrooksPointer::get_raw(old_obj));
+      size_t new_end   = new_start + num_regions - 1;
+
+      if (old_start == new_start) {
+        // No need to move the object, it stays at the same slot
+        continue;
+      }
+
+      assert (r->is_move_allowed(), "should be movable");
+
+      Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
+                                   heap->get_region(new_start)->bottom(),
+                                   ShenandoahHeapRegion::region_size_words()*num_regions);
+
+      oop new_obj = oop(heap->get_region(new_start)->bottom() + ShenandoahBrooksPointer::word_size());
+      ShenandoahBrooksPointer::initialize(new_obj);
+
+      {
+        for (size_t c = old_start; c <= old_end; c++) {
+          ShenandoahHeapRegion* r = heap->get_region(c);
+          r->make_regular_bypass();
+          r->set_top(r->bottom());
+        }
+
+        for (size_t c = new_start; c <= new_end; c++) {
+          ShenandoahHeapRegion* r = heap->get_region(c);
+          if (c == new_start) {
+            r->make_humongous_start_bypass();
+          } else {
+            r->make_humongous_cont_bypass();
+          }
+
+          // Trailing region may be non-full, record the remainder there
+          size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
+          if ((c == new_end) && (remainder != 0)) {
+            r->set_top(r->bottom() + remainder);
+          } else {
+            r->set_top(r->end());
+          }
+
+          r->reset_alloc_metadata_to_shared();
+        }
+      }
+    }
+  }
+}
+
+// This is slightly different to ShHeap::reset_next_mark_bitmap:
+// we need to remain able to walk pinned regions.
+// Since pinned region do not move and don't get compacted, we will get holes with
+// unreachable objects in them (which may have pointers to unloaded Klasses and thus
+// cannot be iterated over using oop->size(). The only way to safely iterate over those is using
+// a valid marking bitmap and valid TAMS pointer. This class only resets marking
+// bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
+class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
+private:
+  ShenandoahRegionIterator _regions;
+
+public:
+  ShenandoahMCResetCompleteBitmapTask() :
+    AbstractGangTask("Parallel Reset Bitmap Task") {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahHeapRegion* region = _regions.next();
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
+    while (region != NULL) {
+      if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
+        ctx->clear_bitmap(region);
+      }
+      region = _regions.next();
+    }
+  }
+};
+
+void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
+  GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
+  ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  // Compact regular objects first
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
+    ShenandoahCompactObjectsTask compact_task(worker_slices);
+    heap->workers()->run_task(&compact_task);
+  }
+
+  // Compact humongous objects after regular object moves
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
+    compact_humongous_objects();
+  }
+
+  // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
+  // and must ensure the bitmap is in sync.
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
+    ShenandoahMCResetCompleteBitmapTask task;
+    heap->workers()->run_task(&task);
+  }
+
+  // Bring regions in proper states after the collection, and set heap properties.
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
+
+    ShenandoahPostCompactClosure post_compact;
+    heap->heap_region_iterate(&post_compact);
+    heap->set_used(post_compact.get_live());
+
+    heap->collection_set()->clear();
+    heap->free_set()->rebuild();
+  }
+
+  heap->clear_cancelled_gc();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
+
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+
+/**
+ * This implements Full GC (e.g. when invoking System.gc()) using a mark-compact algorithm.
+ *
+ * Current implementation is parallel sliding Lisp-2-style algorithm, based on
+ * "Parallel Garbage Collection for Shared Memory Multiprocessors", by Christine Flood et al.
+ * http://people.csail.mit.edu/shanir/publications/dfsz2001.pdf
+ *
+ * It is implemented in four phases:
+ *
+ * 1. Mark all live objects of the heap by traversing objects starting at GC roots.
+ * 2. Calculate the new location of each live object. This is done by sequentially scanning
+ *    the heap, keeping track of a next-location-pointer, which is then written to each
+ *    object's fwdptr field.
+ * 3. Update all references. This is implemented by another scan of the heap, and updates
+ *    all references in live objects by what's stored in the target object's fwdptr.
+ * 4. Compact the heap by copying all live objects to their new location.
+ *
+ * Parallelization is handled by assigning each GC worker the slice of the heap (the set of regions)
+ * where it does sliding compaction, without interfering with other threads.
+ */
+
+class ShenandoahMarkCompact : public CHeapObj<mtGC> {
+private:
+  GCTimer* _gc_timer;
+
+public:
+  void initialize(GCTimer* gc_timer);
+  void do_it(GCCause::Cause gc_cause);
+
+private:
+  void phase1_mark_heap();
+  void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices);
+  void phase3_update_references();
+  void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices);
+
+  void calculate_target_humongous_objects();
+  void compact_humongous_objects();
+
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/markBitMap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.hpp"
+
+ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions) :
+  _top_bitmaps(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)),
+  _top_at_mark_starts_base(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)),
+  _top_at_mark_starts(_top_at_mark_starts_base -
+                      ((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())) {
+  _mark_bit_map.initialize(heap_region, bitmap_region);
+}
+
+bool ShenandoahMarkingContext::is_bitmap_clear() const {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  size_t num_regions = heap->num_regions();
+  for (size_t idx = 0; idx < num_regions; idx++) {
+    ShenandoahHeapRegion* r = heap->get_region(idx);
+    if (heap->is_bitmap_slice_committed(r) && !is_bitmap_clear_range(r->bottom(), r->end())) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool ShenandoahMarkingContext::is_bitmap_clear_range(HeapWord* start, HeapWord* end) const {
+  return _mark_bit_map.get_next_marked_addr(start, end) == end;
+}
+
+void ShenandoahMarkingContext::initialize_top_at_mark_start(ShenandoahHeapRegion* r) {
+  size_t idx = r->region_number();
+  HeapWord *bottom = r->bottom();
+  _top_at_mark_starts_base[idx] = bottom;
+  _top_bitmaps[idx] = bottom;
+}
+
+void ShenandoahMarkingContext::capture_top_at_mark_start(ShenandoahHeapRegion *r) {
+  size_t region_number = r->region_number();
+  HeapWord* old_tams = _top_at_mark_starts_base[region_number];
+  HeapWord* new_tams = r->top();
+
+  assert(new_tams >= old_tams,
+         "Region " SIZE_FORMAT", TAMS updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT,
+         region_number, p2i(old_tams), p2i(new_tams));
+  assert(is_bitmap_clear_range(old_tams, new_tams),
+         "Region " SIZE_FORMAT ", bitmap should be clear while adjusting TAMS: " PTR_FORMAT " -> " PTR_FORMAT,
+         region_number, p2i(old_tams), p2i(new_tams));
+
+  _top_at_mark_starts_base[region_number] = new_tams;
+  _top_bitmaps[region_number] = new_tams;
+}
+
+void ShenandoahMarkingContext::reset_top_at_mark_start(ShenandoahHeapRegion* r) {
+  _top_at_mark_starts_base[r->region_number()] = r->bottom();
+}
+
+HeapWord* ShenandoahMarkingContext::top_at_mark_start(ShenandoahHeapRegion* r) const {
+  return _top_at_mark_starts_base[r->region_number()];
+}
+
+void ShenandoahMarkingContext::reset_top_bitmap(ShenandoahHeapRegion* r) {
+  assert(is_bitmap_clear_range(r->bottom(), r->end()),
+         "Region " SIZE_FORMAT " should have no marks in bitmap", r->region_number());
+  _top_bitmaps[r->region_number()] = r->bottom();
+}
+
+void ShenandoahMarkingContext::clear_bitmap(ShenandoahHeapRegion* r) {
+  HeapWord* bottom = r->bottom();
+  HeapWord* top_bitmap = _top_bitmaps[r->region_number()];
+  if (top_bitmap > bottom) {
+    _mark_bit_map.clear_range_large(MemRegion(bottom, top_bitmap));
+    _top_bitmaps[r->region_number()] = bottom;
+  }
+  assert(is_bitmap_clear_range(bottom, r->end()),
+         "Region " SIZE_FORMAT " should have no marks in bitmap", r->region_number());
+}
+
+bool ShenandoahMarkingContext::is_complete() {
+  return _is_complete.is_set();
+}
+
+void ShenandoahMarkingContext::mark_complete() {
+  _is_complete.set();
+}
+
+void ShenandoahMarkingContext::mark_incomplete() {
+  _is_complete.unset();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP
+
+#include "gc/shared/markBitMap.hpp"
+#include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
+#include "oops/oopsHierarchy.hpp"
+
+class HeapWord;
+
+/**
+ * Encapsulate a marking bitmap with the top-at-mark-start and top-bitmaps array.
+ */
+class ShenandoahMarkingContext : public CHeapObj<mtGC> {
+private:
+  MarkBitMap _mark_bit_map;
+
+  HeapWord** const _top_bitmaps;
+  HeapWord** const _top_at_mark_starts_base;
+  HeapWord** const _top_at_mark_starts;
+
+  ShenandoahSharedFlag _is_complete;
+
+public:
+  ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions);
+
+  /*
+   * Marks the object. Returns true if the object has not been marked before and has
+   * been marked by this thread. Returns false if the object has already been marked,
+   * or if a competing thread succeeded in marking this object.
+   */
+  inline bool mark(oop obj);
+
+  inline bool is_marked(oop obj) const;
+
+  inline bool allocated_after_mark_start(HeapWord* addr) const;
+
+  inline MarkBitMap* mark_bit_map();
+
+  HeapWord* top_at_mark_start(ShenandoahHeapRegion* r) const;
+  void capture_top_at_mark_start(ShenandoahHeapRegion* r);
+  void reset_top_at_mark_start(ShenandoahHeapRegion* r);
+  void initialize_top_at_mark_start(ShenandoahHeapRegion* r);
+
+  void reset_top_bitmap(ShenandoahHeapRegion *r);
+  void clear_bitmap(ShenandoahHeapRegion *r);
+
+  bool is_bitmap_clear() const;
+  bool is_bitmap_clear_range(HeapWord* start, HeapWord* end) const;
+
+  bool is_complete();
+  void mark_complete();
+  void mark_incomplete();
+
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahMarkingContext.hpp"
+
+inline MarkBitMap* ShenandoahMarkingContext::mark_bit_map() {
+  return &_mark_bit_map;
+}
+
+inline bool ShenandoahMarkingContext::mark(oop obj) {
+  shenandoah_assert_not_forwarded(NULL, obj);
+  HeapWord* addr = (HeapWord*) obj;
+  return (! allocated_after_mark_start(addr)) && _mark_bit_map.par_mark(addr);
+}
+
+inline bool ShenandoahMarkingContext::is_marked(oop obj) const {
+  HeapWord* addr = (HeapWord*) obj;
+  return allocated_after_mark_start(addr) || _mark_bit_map.is_marked(addr);
+}
+
+inline bool ShenandoahMarkingContext::allocated_after_mark_start(HeapWord* addr) const {
+  uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
+  HeapWord* top_at_mark_start = _top_at_mark_starts[index];
+  bool alloc_after_mark_start = addr >= top_at_mark_start;
+  return alloc_after_mark_start;
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahMemoryPool.hpp"
+
+ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap) :
+        CollectedMemoryPool("Shenandoah",
+                            heap->capacity(),
+                            heap->max_capacity(),
+                            true /* support_usage_threshold */),
+                            _heap(heap) {}
+
+MemoryUsage ShenandoahMemoryPool::get_memory_usage() {
+  size_t initial   = initial_size();
+  size_t max       = max_size();
+  size_t used      = used_in_bytes();
+  size_t committed = _heap->committed();
+
+  assert(initial <= max,    "initial: "   SIZE_FORMAT ", max: "       SIZE_FORMAT, initial,   max);
+  assert(used <= max,       "used: "      SIZE_FORMAT ", max: "       SIZE_FORMAT, used,      max);
+  assert(committed <= max,  "committed: " SIZE_FORMAT ", max: "       SIZE_FORMAT, committed, max);
+  assert(used <= committed, "used: "      SIZE_FORMAT ", committed: " SIZE_FORMAT, used,      committed);
+
+  return MemoryUsage(initial, used, committed, max);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP
+#define SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP
+
+#ifndef SERIALGC
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "services/memoryPool.hpp"
+#include "services/memoryUsage.hpp"
+#endif
+
+class ShenandoahMemoryPool : public CollectedMemoryPool {
+private:
+   ShenandoahHeap* _heap;
+
+public:
+  ShenandoahMemoryPool(ShenandoahHeap* pool);
+  MemoryUsage get_memory_usage();
+  size_t used_in_bytes()              { return _heap->used(); }
+  size_t max_size() const             { return _heap->max_capacity(); }
+};
+
+#endif //SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMetrics.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahMetrics.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+
+/*
+ * Internal fragmentation metric: describes how fragmented the heap regions are.
+ *
+ * It is derived as:
+ *
+ *               sum(used[i]^2, i=0..k)
+ *   IF = 1 - ------------------------------
+ *              C * sum(used[i], i=0..k)
+ *
+ * ...where k is the number of regions in computation, C is the region capacity, and
+ * used[i] is the used space in the region.
+ *
+ * The non-linearity causes IF to be lower for the cases where the same total heap
+ * used is densely packed. For example:
+ *   a) Heap is completely full  => IF = 0
+ *   b) Heap is half full, first 50% regions are completely full => IF = 0
+ *   c) Heap is half full, each region is 50% full => IF = 1/2
+ *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
+ *   e) Heap is quarter full, each region is 25% full => IF = 3/4
+ *   f) Heap has the small object per each region => IF =~ 1
+ */
+double ShenandoahMetrics::internal_fragmentation() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  double squared = 0;
+  double linear = 0;
+  int count = 0;
+  for (size_t c = 0; c < heap->num_regions(); c++) {
+    ShenandoahHeapRegion* r = heap->get_region(c);
+    size_t used = r->used();
+    squared += used * used;
+    linear += used;
+    count++;
+  }
+
+  if (count > 0) {
+    double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
+    return 1 - s;
+  } else {
+    return 0;
+  }
+}
+
+/*
+ * External fragmentation metric: describes how fragmented the heap is.
+ *
+ * It is derived as:
+ *
+ *   EF = 1 - largest_contiguous_free / total_free
+ *
+ * For example:
+ *   a) Heap is completely empty => EF = 0
+ *   b) Heap is completely full => EF = 1
+ *   c) Heap is first-half full => EF = 1/2
+ *   d) Heap is half full, full and empty regions interleave => EF =~ 1
+ */
+double ShenandoahMetrics::external_fragmentation() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  size_t last_idx = 0;
+  size_t max_contig = 0;
+  size_t empty_contig = 0;
+
+  size_t free = 0;
+  for (size_t c = 0; c < heap->num_regions(); c++) {
+    ShenandoahHeapRegion* r = heap->get_region(c);
+
+    if (r->is_empty() && (last_idx + 1 == c)) {
+      empty_contig++;
+    } else {
+      empty_contig = 0;
+    }
+
+    free += r->free();
+    max_contig = MAX2(max_contig, empty_contig);
+    last_idx = c;
+  }
+
+  if (free > 0) {
+    return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
+  } else {
+    return 1;
+  }
+}
+
+ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot() {
+  _heap = ShenandoahHeap::heap();
+}
+
+void ShenandoahMetricsSnapshot::snap_before() {
+  _used_before = _heap->used();
+  _if_before = ShenandoahMetrics::internal_fragmentation();
+  _ef_before = ShenandoahMetrics::external_fragmentation();
+}
+void ShenandoahMetricsSnapshot::snap_after() {
+  _used_after = _heap->used();
+  _if_after = ShenandoahMetrics::internal_fragmentation();
+  _ef_after = ShenandoahMetrics::external_fragmentation();
+}
+
+void ShenandoahMetricsSnapshot::print() {
+  log_info(gc, ergo)("Used: before: " SIZE_FORMAT "M, after: " SIZE_FORMAT "M", _used_before/M, _used_after/M);
+  log_info(gc, ergo)("Internal frag: before: %.1f%%, after: %.1f%%", _if_before * 100, _if_after * 100);
+  log_info(gc, ergo)("External frag: before: %.1f%%, after: %.1f%%", _ef_before * 100, _ef_after * 100);
+}
+
+bool ShenandoahMetricsSnapshot::is_good_progress(const char *label) {
+  // Under the critical threshold? Declare failure.
+  size_t free_actual   = _heap->free_set()->available();
+  size_t free_expected = _heap->max_capacity() / 100 * ShenandoahCriticalFreeThreshold;
+  if (free_actual < free_expected) {
+    log_info(gc, ergo)("Not enough free space (" SIZE_FORMAT "M, need " SIZE_FORMAT "M) after %s",
+                       free_actual / M, free_expected / M, label);
+    return false;
+  }
+
+  // Freed up enough? Good! Declare victory.
+  size_t progress_actual   = (_used_before > _used_after) ? _used_before - _used_after : 0;
+  size_t progress_expected = ShenandoahHeapRegion::region_size_bytes();
+  if (progress_actual >= progress_expected) {
+    return true;
+  }
+  log_info(gc,ergo)("Not enough progress (" SIZE_FORMAT "M, need " SIZE_FORMAT "M) after %s",
+                    progress_actual / M, progress_expected / M, label);
+
+  // Internal fragmentation is down? Good! Declare victory.
+  double if_actual = _if_before - _if_after;
+  double if_expected = 0.01; // 1% should be enough
+  if (if_actual > if_expected) {
+    return true;
+  }
+  log_info(gc,ergo)("Not enough internal fragmentation improvement (%.1f%%, need %.1f%%) after %s",
+                    if_actual * 100, if_expected * 100, label);
+
+  // External fragmentation is down? Good! Declare victory.
+  double ef_actual = _ef_before - _ef_after;
+  double ef_expected = 0.01; // 1% should be enough
+  if (ef_actual > ef_expected) {
+    return true;
+  }
+  log_info(gc,ergo)("Not enough external fragmentation improvement (%.1f%%, need %.1f%%) after %s",
+                    if_actual * 100, if_expected * 100, label);
+
+  // Nothing good had happened.
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMetrics.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP
+
+#include "gc/shenandoah/shenandoahHeap.hpp"
+
+class ShenandoahMetrics {
+private:
+  ShenandoahMetrics() {}
+
+public:
+  static double internal_fragmentation();
+  static double external_fragmentation();
+};
+
+class ShenandoahMetricsSnapshot : public StackObj {
+private:
+  ShenandoahHeap* _heap;
+  size_t _used_before, _used_after;
+  double _if_before, _if_after;
+  double _ef_before, _ef_after;
+
+public:
+  ShenandoahMetricsSnapshot();
+
+  void snap_before();
+  void snap_after();
+  void print();
+
+  bool is_good_progress(const char *label);
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectorCounters.hpp"
+#include "gc/shared/generationCounters.hpp"
+#include "gc/shared/hSpaceCounters.hpp"
+#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionCounters.hpp"
+#include "memory/metaspaceCounters.hpp"
+#include "services/memoryService.hpp"
+
+class ShenandoahYoungGenerationCounters : public GenerationCounters {
+public:
+  ShenandoahYoungGenerationCounters() :
+          GenerationCounters("Young", 0, 0, 0, (size_t)0, (size_t)0) {};
+
+  virtual void update_all() {
+    // no update
+  }
+};
+
+class ShenandoahGenerationCounters : public GenerationCounters {
+private:
+  ShenandoahHeap* _heap;
+public:
+  ShenandoahGenerationCounters(ShenandoahHeap* heap) :
+          GenerationCounters("Heap", 1, 1, heap->initial_capacity(), heap->max_capacity(), heap->committed()),
+          _heap(heap)
+  {};
+
+  virtual void update_all() {
+    _current_size->set_value(_heap->committed());
+  }
+};
+
+ShenandoahMonitoringSupport::ShenandoahMonitoringSupport(ShenandoahHeap* heap) :
+        _partial_counters(NULL),
+        _full_counters(NULL)
+{
+  // Collection counters do not fit Shenandoah very well.
+  // We record partial cycles as "young", and full cycles (including full STW GC) as "old".
+  _partial_counters  = new CollectorCounters("Shenandoah partial", 0);
+  _full_counters     = new CollectorCounters("Shenandoah full",    1);
+
+  // We report young gen as unused.
+  _young_counters = new ShenandoahYoungGenerationCounters();
+  _heap_counters  = new ShenandoahGenerationCounters(heap);
+  _space_counters = new HSpaceCounters(_heap_counters->name_space(), "Heap", 0, heap->max_capacity(), heap->initial_capacity());
+
+  _heap_region_counters = new ShenandoahHeapRegionCounters();
+}
+
+CollectorCounters* ShenandoahMonitoringSupport::stw_collection_counters() {
+  return _full_counters;
+}
+
+CollectorCounters* ShenandoahMonitoringSupport::full_stw_collection_counters() {
+  return _full_counters;
+}
+
+CollectorCounters* ShenandoahMonitoringSupport::concurrent_collection_counters() {
+  return _full_counters;
+}
+
+CollectorCounters* ShenandoahMonitoringSupport::partial_collection_counters() {
+  return _partial_counters;
+}
+
+void ShenandoahMonitoringSupport::update_counters() {
+  MemoryService::track_memory_usage();
+
+  if (UsePerfData) {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t used = heap->used();
+    size_t capacity = heap->capacity();
+    _heap_counters->update_all();
+    _space_counters->update_all(capacity, used);
+    _heap_region_counters->update();
+
+    MetaspaceCounters::update_performance_counters();
+    CompressedClassSpaceCounters::update_performance_counters();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMonitoringSupport.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP
+
+#include "memory/allocation.hpp"
+
+class GenerationCounters;
+class HSpaceCounters;
+class ShenandoahHeap;
+class CollectorCounters;
+class ShenandoahHeapRegionCounters;
+
+class ShenandoahMonitoringSupport : public CHeapObj<mtGC> {
+private:
+  CollectorCounters*   _partial_counters;
+  CollectorCounters*   _full_counters;
+
+  GenerationCounters* _young_counters;
+  GenerationCounters* _heap_counters;
+
+  HSpaceCounters* _space_counters;
+
+  ShenandoahHeapRegionCounters* _heap_region_counters;
+
+public:
+ ShenandoahMonitoringSupport(ShenandoahHeap* heap);
+ CollectorCounters* stw_collection_counters();
+ CollectorCounters* full_stw_collection_counters();
+ CollectorCounters* concurrent_collection_counters();
+ CollectorCounters* partial_collection_counters();
+ void update_counters();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahNumberSeq.hpp"
+#include "runtime/atomic.hpp"
+
+HdrSeq::HdrSeq() {
+  _hdr = NEW_C_HEAP_ARRAY(int*, MagBuckets, mtInternal);
+  for (int c = 0; c < MagBuckets; c++) {
+    _hdr[c] = NULL;
+  }
+}
+
+HdrSeq::~HdrSeq() {
+  for (int c = 0; c < MagBuckets; c++) {
+    int* sub = _hdr[c];
+    if (sub != NULL) {
+      FREE_C_HEAP_ARRAY(int, sub);
+    }
+  }
+  FREE_C_HEAP_ARRAY(int*, _hdr);
+}
+
+void HdrSeq::add(double val) {
+  if (val < 0) {
+    assert (false, "value (%8.2f) is not negative", val);
+    val = 0;
+  }
+
+  NumberSeq::add(val);
+
+  double v = val;
+  int mag;
+  if (v > 0) {
+    mag = 0;
+    while (v > 1) {
+      mag++;
+      v /= 10;
+    }
+    while (v < 0.1) {
+      mag--;
+      v *= 10;
+    }
+  } else {
+    mag = MagMinimum;
+  }
+
+  int bucket = -MagMinimum + mag;
+  int sub_bucket = (int) (v * ValBuckets);
+
+  // Defensively saturate for product bits:
+  if (bucket < 0) {
+    assert (false, "bucket index (%d) underflow for value (%8.2f)", bucket, val);
+    bucket = 0;
+  }
+
+  if (bucket >= MagBuckets) {
+    assert (false, "bucket index (%d) overflow for value (%8.2f)", bucket, val);
+    bucket = MagBuckets - 1;
+  }
+
+  if (sub_bucket < 0) {
+    assert (false, "sub-bucket index (%d) underflow for value (%8.2f)", sub_bucket, val);
+    sub_bucket = 0;
+  }
+
+  if (sub_bucket >= ValBuckets) {
+    assert (false, "sub-bucket index (%d) overflow for value (%8.2f)", sub_bucket, val);
+    sub_bucket = ValBuckets - 1;
+  }
+
+  int* b = _hdr[bucket];
+  if (b == NULL) {
+    b = NEW_C_HEAP_ARRAY(int, ValBuckets, mtInternal);
+    for (int c = 0; c < ValBuckets; c++) {
+      b[c] = 0;
+    }
+    _hdr[bucket] = b;
+  }
+  b[sub_bucket]++;
+}
+
+double HdrSeq::percentile(double level) const {
+  // target should be non-zero to find the first sample
+  int target = MAX2(1, (int) (level * num() / 100));
+  int cnt = 0;
+  for (int mag = 0; mag < MagBuckets; mag++) {
+    if (_hdr[mag] != NULL) {
+      for (int val = 0; val < ValBuckets; val++) {
+        cnt += _hdr[mag][val];
+        if (cnt >= target) {
+          return pow(10.0, MagMinimum + mag) * val / ValBuckets;
+        }
+      }
+    }
+  }
+  return maximum();
+}
+
+BinaryMagnitudeSeq::BinaryMagnitudeSeq() {
+  _mags = NEW_C_HEAP_ARRAY(size_t, BitsPerSize_t, mtInternal);
+  for (int c = 0; c < BitsPerSize_t; c++) {
+    _mags[c] = 0;
+  }
+  _sum = 0;
+}
+
+BinaryMagnitudeSeq::~BinaryMagnitudeSeq() {
+  FREE_C_HEAP_ARRAY(size_t, _mags);
+}
+
+void BinaryMagnitudeSeq::add(size_t val) {
+  Atomic::add(val, &_sum);
+
+  int mag = log2_intptr(val) + 1;
+
+  // Defensively saturate for product bits:
+  if (mag < 0) {
+    assert (false, "bucket index (%d) underflow for value (" SIZE_FORMAT ")", mag, val);
+    mag = 0;
+  }
+
+  if (mag >= BitsPerSize_t) {
+    assert (false, "bucket index (%d) overflow for value (" SIZE_FORMAT ")", mag, val);
+    mag = BitsPerSize_t - 1;
+  }
+
+  Atomic::add((size_t)1, &_mags[mag]);
+}
+
+size_t BinaryMagnitudeSeq::level(int level) const {
+  if (0 <= level && level < BitsPerSize_t) {
+    return _mags[level];
+  } else {
+    return 0;
+  }
+}
+
+size_t BinaryMagnitudeSeq::num() const {
+  size_t r = 0;
+  for (int c = 0; c < BitsPerSize_t; c++) {
+    r += _mags[c];
+  }
+  return r;
+}
+
+size_t BinaryMagnitudeSeq::sum() const {
+  return _sum;
+}
+
+int BinaryMagnitudeSeq::min_level() const {
+  for (int c = 0; c < BitsPerSize_t; c++) {
+    if (_mags[c] != 0) {
+      return c;
+    }
+  }
+  return BitsPerSize_t - 1;
+}
+
+int BinaryMagnitudeSeq::max_level() const {
+  for (int c = BitsPerSize_t - 1; c > 0; c--) {
+    if (_mags[c] != 0) {
+      return c;
+    }
+  }
+  return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP
+
+#include "utilities/numberSeq.hpp"
+
+// HDR sequence stores the low-resolution high-dynamic-range values.
+// It does so by maintaining the double array, where first array defines
+// the magnitude of the value being stored, and the second array maintains
+// the low resolution histogram within that magnitude. For example, storing
+// 4.352819 * 10^3 increments the bucket _hdr[3][435]. This allows for
+// memory efficient storage of huge amount of samples.
+//
+// Accepts positive numbers only.
+class HdrSeq: public NumberSeq {
+private:
+  enum PrivateConstants {
+    ValBuckets = 512,
+    MagBuckets = 24,
+    MagMinimum = -12,
+  };
+  int** _hdr;
+
+public:
+  HdrSeq();
+  ~HdrSeq();
+
+  virtual void add(double val);
+  double percentile(double level) const;
+};
+
+// Binary magnitude sequence stores the power-of-two histogram.
+// It has very low memory requirements, and is thread-safe. When accuracy
+// is not needed, it is preferred over HdrSeq.
+class BinaryMagnitudeSeq {
+private:
+  size_t  _sum;
+  size_t* _mags;
+
+public:
+  BinaryMagnitudeSeq();
+  ~BinaryMagnitudeSeq();
+
+  void add(size_t val);
+  size_t num() const;
+  size_t level(int level) const;
+  size_t sum() const;
+  int min_level() const;
+  int max_level() const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP
+
+#include "gc/shared/referenceProcessor.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahStrDedupQueue.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "memory/iterator.hpp"
+#include "runtime/thread.hpp"
+
+enum UpdateRefsMode {
+  NONE,       // No reference updating
+  RESOLVE,    // Only a read-barrier (no reference updating)
+  SIMPLE,     // Reference updating using simple store
+  CONCURRENT  // Reference updating using CAS
+};
+
+enum StringDedupMode {
+  NO_DEDUP,      // Do not do anything for String deduplication
+  ENQUEUE_DEDUP, // Enqueue candidate Strings for deduplication
+};
+
+class ShenandoahMarkRefsSuperClosure : public MetadataVisitingOopIterateClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  ShenandoahHeap* _heap;
+  ShenandoahMarkingContext* const _mark_context;
+
+protected:
+  template <class T, UpdateRefsMode UPDATE_MODE, StringDedupMode STRING_DEDUP>
+  void work(T *p);
+
+public:
+  ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp);
+};
+
+class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, CONCURRENT, NO_DEDUP>(p); }
+
+public:
+  ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+          ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahMarkUpdateRefsDedupClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, CONCURRENT, ENQUEUE_DEDUP>(p); }
+
+public:
+  ShenandoahMarkUpdateRefsDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+          ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahMarkUpdateRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, CONCURRENT, NO_DEDUP>(p); }
+
+public:
+  ShenandoahMarkUpdateRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return true; }
+};
+
+class ShenandoahMarkUpdateRefsMetadataDedupClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, CONCURRENT, ENQUEUE_DEDUP>(p); }
+
+public:
+  ShenandoahMarkUpdateRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+  ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return true; }
+};
+
+class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, NONE, NO_DEDUP>(p); }
+
+public:
+  ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahMarkRefsDedupClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, NONE, ENQUEUE_DEDUP>(p); }
+
+public:
+  ShenandoahMarkRefsDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahMarkResolveRefsClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, RESOLVE, NO_DEDUP>(p); }
+
+public:
+  ShenandoahMarkResolveRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahMarkRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, NONE, NO_DEDUP>(p); }
+
+public:
+  ShenandoahMarkRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return true; }
+};
+
+class ShenandoahMarkRefsMetadataDedupClosure : public ShenandoahMarkRefsSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, NONE, ENQUEUE_DEDUP>(p); }
+
+public:
+  ShenandoahMarkRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahMarkRefsSuperClosure(q, rp) {};
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual bool do_metadata()        { return true; }
+};
+
+class ShenandoahUpdateHeapRefsSuperClosure : public BasicOopIterateClosure {
+private:
+  ShenandoahHeap* _heap;
+public:
+  ShenandoahUpdateHeapRefsSuperClosure() :
+    _heap(ShenandoahHeap::heap()) {}
+
+  template <class T>
+  void work(T *p);
+};
+
+class ShenandoahUpdateHeapRefsClosure : public ShenandoahUpdateHeapRefsSuperClosure {
+private:
+  template <class T>
+  inline  void do_oop_work(T* p)    { work<T>(p); }
+
+public:
+  ShenandoahUpdateHeapRefsClosure() : ShenandoahUpdateHeapRefsSuperClosure() {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahTraversalSuperClosure : public MetadataVisitingOopIterateClosure {
+private:
+  ShenandoahTraversalGC* const _traversal_gc;
+  Thread* const _thread;
+  ShenandoahObjToScanQueue* const _queue;
+  ShenandoahMarkingContext* const _mark_context;
+protected:
+  ShenandoahTraversalSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    MetadataVisitingOopIterateClosure(rp),
+    _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
+    _thread(Thread::current()),
+    _queue(q),
+    _mark_context(ShenandoahHeap::heap()->marking_context()) {
+  }
+
+  template <class T, bool STRING_DEDUP, bool DEGEN>
+  void work(T* p);
+
+};
+
+class ShenandoahTraversalClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, false, false>(p); }
+
+public:
+  ShenandoahTraversalClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahTraversalMetadataClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, false, false>(p); }
+
+public:
+  ShenandoahTraversalMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return true; }
+};
+
+class ShenandoahTraversalDedupClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, true, false>(p); }
+
+public:
+  ShenandoahTraversalDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahTraversalMetadataDedupClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, true, false>(p); }
+
+public:
+  ShenandoahTraversalMetadataDedupClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return true; }
+};
+
+class ShenandoahTraversalDegenClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, false, true>(p); }
+
+public:
+  ShenandoahTraversalDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahTraversalMetadataDegenClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, false, true>(p); }
+
+public:
+  ShenandoahTraversalMetadataDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return true; }
+};
+
+class ShenandoahTraversalDedupDegenClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, true, true>(p); }
+
+public:
+  ShenandoahTraversalDedupDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return false; }
+};
+
+class ShenandoahTraversalMetadataDedupDegenClosure : public ShenandoahTraversalSuperClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p)     { work<T, true, true>(p); }
+
+public:
+  ShenandoahTraversalMetadataDedupDegenClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
+    ShenandoahTraversalSuperClosure(q, rp) {}
+
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+
+  virtual bool do_metadata()        { return true; }
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.inline.hpp"
+
+template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
+inline void ShenandoahMarkRefsSuperClosure::work(T *p) {
+  ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context);
+}
+
+template <class T>
+inline void ShenandoahUpdateHeapRefsSuperClosure::work(T* p) {
+  _heap->maybe_update_with_forwarded(p);
+}
+
+template <class T, bool STRING_DEDUP, bool DEGEN>
+inline void ShenandoahTraversalSuperClosure::work(T* p) {
+  _traversal_gc->process_oop<T, STRING_DEDUP, DEGEN>(p, _thread, _queue, _mark_context);
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahPacer.hpp"
+
+/*
+ * In normal concurrent cycle, we have to pace the application to let GC finish.
+ *
+ * Here, we do not know how large would be the collection set, and what are the
+ * relative performances of the each stage in the concurrent cycle, and so we have to
+ * make some assumptions.
+ *
+ * For concurrent mark, there is no clear notion of progress. The moderately accurate
+ * and easy to get metric is the amount of live objects the mark had encountered. But,
+ * that does directly correlate with the used heap, because the heap might be fully
+ * dead or fully alive. We cannot assume either of the extremes: we would either allow
+ * application to run out of memory if we assume heap is fully dead but it is not, and,
+ * conversely, we would pacify application excessively if we assume heap is fully alive
+ * but it is not. So we need to guesstimate the particular expected value for heap liveness.
+ * The best way to do this is apparently recording the past history.
+ *
+ * For concurrent evac and update-refs, we are walking the heap per-region, and so the
+ * notion of progress is clear: we get reported the "used" size from the processed regions
+ * and use the global heap-used as the baseline.
+ *
+ * The allocatable space when GC is running is "free" at the start of cycle, but the
+ * accounted budget is based on "used". So, we need to adjust the tax knowing that.
+ * Also, since we effectively count the used space three times (mark, evac, update-refs),
+ * we need to multiply the tax by 3. Example: for 10 MB free and 90 MB used, GC would
+ * come back with 3*90 MB budget, and thus for each 1 MB of allocation, we have to pay
+ * 3*90 / 10 MBs. In the end, we would pay back the entire budget.
+ */
+
+void ShenandoahPacer::setup_for_mark() {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  size_t live = update_and_get_progress_history();
+  size_t free = _heap->free_set()->available();
+
+  size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
+  size_t taxable = free - non_taxable;
+
+  double tax = 1.0 * live / taxable; // base tax for available free space
+  tax *= 3;                          // mark is phase 1 of 3, claim 1/3 of free for it
+  tax *= ShenandoahPacingSurcharge;  // additional surcharge to help unclutter heap
+
+  restart_with(non_taxable, tax);
+
+  log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
+                     "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
+                     live / M, free / M, non_taxable / M, tax);
+}
+
+void ShenandoahPacer::setup_for_evac() {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  size_t used = _heap->collection_set()->used();
+  size_t free = _heap->free_set()->available();
+
+  size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
+  size_t taxable = free - non_taxable;
+
+  double tax = 1.0 * used / taxable; // base tax for available free space
+  tax *= 2;                          // evac is phase 2 of 3, claim 1/2 of remaining free
+  tax = MAX2<double>(1, tax);        // never allocate more than GC processes during the phase
+  tax *= ShenandoahPacingSurcharge;  // additional surcharge to help unclutter heap
+
+  restart_with(non_taxable, tax);
+
+  log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
+                     "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
+                     used / M, free / M, non_taxable / M, tax);
+}
+
+void ShenandoahPacer::setup_for_updaterefs() {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  size_t used = _heap->used();
+  size_t free = _heap->free_set()->available();
+
+  size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
+  size_t taxable = free - non_taxable;
+
+  double tax = 1.0 * used / taxable; // base tax for available free space
+  tax *= 1;                          // update-refs is phase 3 of 3, claim the remaining free
+  tax = MAX2<double>(1, tax);        // never allocate more than GC processes during the phase
+  tax *= ShenandoahPacingSurcharge;  // additional surcharge to help unclutter heap
+
+  restart_with(non_taxable, tax);
+
+  log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
+                     "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
+                     used / M, free / M, non_taxable / M, tax);
+}
+
+/*
+ * Traversal walks the entire heap once, and therefore we have to make assumptions about its
+ * liveness, like concurrent mark does.
+ */
+
+void ShenandoahPacer::setup_for_traversal() {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  size_t live = update_and_get_progress_history();
+  size_t free = _heap->free_set()->available();
+
+  size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
+  size_t taxable = free - non_taxable;
+
+  double tax = 1.0 * live / taxable; // base tax for available free space
+  tax *= ShenandoahPacingSurcharge;  // additional surcharge to help unclutter heap
+
+  restart_with(non_taxable, tax);
+
+  log_info(gc, ergo)("Pacer for Traversal. Expected Live: " SIZE_FORMAT "M, Free: " SIZE_FORMAT
+                     "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
+                     live / M, free / M, non_taxable / M, tax);
+}
+
+/*
+ * In idle phase, we have to pace the application to let control thread react with GC start.
+ *
+ * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges
+ * it had seen recent allocations. It will naturally pace the allocations if control thread is
+ * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget
+ * for applications to allocate at.
+ */
+
+void ShenandoahPacer::setup_for_idle() {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  size_t initial = _heap->capacity() * ShenandoahPacingIdleSlack / 100;
+  double tax = 1;
+
+  restart_with(initial, tax);
+
+  log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx",
+                     initial / M, tax);
+}
+
+size_t ShenandoahPacer::update_and_get_progress_history() {
+  if (_progress == -1) {
+    // First initialization, report some prior
+    Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
+    return (size_t) (_heap->capacity() * 0.1);
+  } else {
+    // Record history, and reply historical data
+    _progress_history->add(_progress);
+    Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
+    return (size_t) (_progress_history->avg() * HeapWordSize);
+  }
+}
+
+void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
+  size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
+  STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
+  Atomic::xchg((intptr_t)initial, &_budget);
+  Atomic::store(tax_rate, &_tax_rate);
+  Atomic::inc(&_epoch);
+}
+
+bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
+
+  intptr_t cur = 0;
+  intptr_t new_val = 0;
+  do {
+    cur = Atomic::load(&_budget);
+    if (cur < tax && !force) {
+      // Progress depleted, alas.
+      return false;
+    }
+    new_val = cur - tax;
+  } while (Atomic::cmpxchg(new_val, &_budget, cur) != cur);
+  return true;
+}
+
+void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  if (_epoch != epoch) {
+    // Stale ticket, no need to unpace.
+    return;
+  }
+
+  intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
+  Atomic::add(tax, &_budget);
+}
+
+intptr_t ShenandoahPacer::epoch() {
+  return Atomic::load(&_epoch);
+}
+
+void ShenandoahPacer::pace_for_alloc(size_t words) {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+
+  // Fast path: try to allocate right away
+  if (claim_for_alloc(words, false)) {
+    return;
+  }
+
+  size_t max = ShenandoahPacingMaxDelay;
+  double start = os::elapsedTime();
+
+  size_t total = 0;
+  size_t cur = 0;
+
+  while (true) {
+    // We could instead assist GC, but this would suffice for now.
+    // This code should also participate in safepointing.
+    // Perform the exponential backoff, limited by max.
+
+    cur = cur * 2;
+    if (total + cur > max) {
+      cur = (max > total) ? (max - total) : 0;
+    }
+    cur = MAX2<size_t>(1, cur);
+
+    os::sleep(Thread::current(), cur, true);
+
+    double end = os::elapsedTime();
+    total = (size_t)((end - start) * 1000);
+
+    if (total > max) {
+      // Spent local time budget to wait for enough GC progress.
+      // Breaking out and allocating anyway, which may mean we outpace GC,
+      // and start Degenerated GC cycle.
+      _delays.add(total);
+
+      // Forcefully claim the budget: it may go negative at this point, and
+      // GC should replenish for this and subsequent allocations
+      claim_for_alloc(words, true);
+      break;
+    }
+
+    if (claim_for_alloc(words, false)) {
+      // Acquired enough permit, nice. Can allocate now.
+      _delays.add(total);
+      break;
+    }
+  }
+}
+
+void ShenandoahPacer::print_on(outputStream* out) const {
+  out->print_cr("ALLOCATION PACING:");
+  out->cr();
+
+  out->print_cr("Max pacing delay is set for " UINTX_FORMAT " ms.", ShenandoahPacingMaxDelay);
+  out->cr();
+
+  out->print_cr("Higher delay would prevent application outpacing the GC, but it will hide the GC latencies");
+  out->print_cr("from the STW pause times. Pacing affects the individual threads, and so it would also be");
+  out->print_cr("invisible to the usual profiling tools, but would add up to end-to-end application latency.");
+  out->print_cr("Raise max pacing delay with care.");
+  out->cr();
+
+  out->print_cr("Actual pacing delays histogram:");
+  out->cr();
+
+  out->print_cr("%10s - %10s  %12s%12s", "From", "To", "Count", "Sum");
+
+  size_t total_count = 0;
+  size_t total_sum = 0;
+  for (int c = _delays.min_level(); c <= _delays.max_level(); c++) {
+    int l = (c == 0) ? 0 : 1 << (c - 1);
+    int r = 1 << c;
+    size_t count = _delays.level(c);
+    size_t sum   = count * (r - l) / 2;
+    total_count += count;
+    total_sum   += sum;
+
+    out->print_cr("%7d ms - %7d ms: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", l, r, count, sum);
+  }
+  out->print_cr("%23s: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", "Total", total_count, total_sum);
+  out->cr();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP
+
+#include "gc/shenandoah/shenandoahNumberSeq.hpp"
+#include "memory/allocation.hpp"
+
+class ShenandoahHeap;
+
+#define PACING_PROGRESS_UNINIT (-1)
+#define PACING_PROGRESS_ZERO   ( 0)
+
+/**
+ * ShenandoahPacer provides allocation pacing mechanism.
+ *
+ * Currently it implements simple tax-and-spend pacing policy: GC threads provide
+ * credit, allocating thread spend the credit, or stall when credit is not available.
+ */
+class ShenandoahPacer : public CHeapObj<mtGC> {
+private:
+  ShenandoahHeap* _heap;
+  BinaryMagnitudeSeq _delays;
+  TruncatedSeq* _progress_history;
+
+  // Set once per phase
+  volatile intptr_t _epoch;
+  volatile double _tax_rate;
+
+  // Heavily updated, protect from accidental false sharing
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile intptr_t));
+  volatile intptr_t _budget;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  // Heavily updated, protect from accidental false sharing
+  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile intptr_t));
+  volatile intptr_t _progress;
+  DEFINE_PAD_MINUS_SIZE(3, DEFAULT_CACHE_LINE_SIZE, 0);
+
+public:
+  ShenandoahPacer(ShenandoahHeap* heap) :
+          _heap(heap),
+          _progress_history(new TruncatedSeq(5)),
+          _epoch(0),
+          _tax_rate(1),
+          _budget(0),
+          _progress(PACING_PROGRESS_UNINIT) {}
+
+  void setup_for_idle();
+  void setup_for_mark();
+  void setup_for_evac();
+  void setup_for_updaterefs();
+  void setup_for_traversal();
+
+  inline void report_mark(size_t words);
+  inline void report_evac(size_t words);
+  inline void report_updaterefs(size_t words);
+
+  inline void report_alloc(size_t words);
+
+  bool claim_for_alloc(size_t words, bool force);
+  void pace_for_alloc(size_t words);
+  void unpace_for_alloc(intptr_t epoch, size_t words);
+
+  intptr_t epoch();
+
+  void print_on(outputStream* out) const;
+
+private:
+  inline void report_internal(size_t words);
+  inline void report_progress_internal(size_t words);
+
+  void restart_with(size_t non_taxable_bytes, double tax_rate);
+
+  size_t update_and_get_progress_history();
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahPacer.hpp"
+#include "runtime/atomic.hpp"
+
+inline void ShenandoahPacer::report_mark(size_t words) {
+  report_internal(words);
+  report_progress_internal(words);
+}
+
+inline void ShenandoahPacer::report_evac(size_t words) {
+  report_internal(words);
+}
+
+inline void ShenandoahPacer::report_updaterefs(size_t words) {
+  report_internal(words);
+}
+
+inline void ShenandoahPacer::report_alloc(size_t words) {
+  report_internal(words);
+}
+
+inline void ShenandoahPacer::report_internal(size_t words) {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+  STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
+  Atomic::add((intptr_t)words, &_budget);
+}
+
+inline void ShenandoahPacer::report_progress_internal(size_t words) {
+  assert(ShenandoahPacing, "Only be here when pacing is enabled");
+  STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
+  Atomic::add((intptr_t)words, &_progress);
+}
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/workerDataArray.inline.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "utilities/ostream.hpp"
+
+#define GC_PHASE_DECLARE_NAME(type, title) \
+  title,
+
+const char* ShenandoahPhaseTimings::_phase_names[] = {
+  SHENANDOAH_GC_PHASE_DO(GC_PHASE_DECLARE_NAME)
+};
+
+#undef GC_PHASE_DECLARE_NAME
+
+ShenandoahPhaseTimings::ShenandoahPhaseTimings() : _policy(NULL) {
+  uint max_workers = MAX2(ConcGCThreads, ParallelGCThreads);
+  _worker_times = new ShenandoahWorkerTimings(max_workers);
+  _termination_times = new ShenandoahTerminationTimings(max_workers);
+  _policy = ShenandoahHeap::heap()->shenandoah_policy();
+  assert(_policy != NULL, "Can not be NULL");
+}
+
+void ShenandoahPhaseTimings::record_phase_start(Phase phase) {
+  _timing_data[phase]._start = os::elapsedTime();
+}
+
+void ShenandoahPhaseTimings::record_phase_end(Phase phase) {
+  assert(_policy != NULL, "Not yet initialized");
+  double end = os::elapsedTime();
+  double elapsed = end - _timing_data[phase]._start;
+  if (!_policy->is_at_shutdown()) {
+    _timing_data[phase]._secs.add(elapsed);
+  }
+  ShenandoahHeap::heap()->heuristics()->record_phase_time(phase, elapsed);
+}
+
+void ShenandoahPhaseTimings::record_phase_time(Phase phase, double time) {
+  assert(_policy != NULL, "Not yet initialized");
+  if (!_policy->is_at_shutdown()) {
+    _timing_data[phase]._secs.add(time);
+  }
+}
+
+void ShenandoahPhaseTimings::record_workers_start(Phase phase) {
+  for (uint i = 0; i < GCParPhasesSentinel; i++) {
+    _worker_times->reset(i);
+  }
+}
+
+void ShenandoahPhaseTimings::record_workers_end(Phase phase) {
+  if (_policy->is_at_shutdown()) {
+    // Do not record the past-shutdown events
+    return;
+  }
+
+  guarantee(phase == init_evac ||
+            phase == scan_roots ||
+            phase == update_roots ||
+            phase == init_traversal_gc_work ||
+            phase == final_traversal_gc_work ||
+            phase == final_traversal_update_roots ||
+            phase == final_update_refs_roots ||
+            phase == full_gc_roots ||
+            phase == degen_gc_update_roots ||
+            phase == _num_phases,
+            "only in these phases we can add per-thread phase times");
+  if (phase != _num_phases) {
+    // Merge _phase_time to counters below the given phase.
+    for (uint i = 0; i < GCParPhasesSentinel; i++) {
+      double t = _worker_times->average(i);
+      _timing_data[phase + i + 1]._secs.add(t);
+    }
+  }
+}
+
+void ShenandoahPhaseTimings::print_on(outputStream* out) const {
+  out->cr();
+  out->print_cr("GC STATISTICS:");
+  out->print_cr("  \"(G)\" (gross) pauses include VM time: time to notify and block threads, do the pre-");
+  out->print_cr("        and post-safepoint housekeeping. Use -XX:+PrintSafepointStatistics to dissect.");
+  out->print_cr("  \"(N)\" (net) pauses are the times spent in the actual GC code.");
+  out->print_cr("  \"a\" is average time for each phase, look at levels to see if average makes sense.");
+  out->print_cr("  \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum).");
+  out->cr();
+
+  for (uint i = 0; i < _num_phases; i++) {
+    if (_timing_data[i]._secs.maximum() != 0) {
+      print_summary_sd(out, _phase_names[i], &(_timing_data[i]._secs));
+    }
+  }
+}
+
+void ShenandoahPhaseTimings::print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq) const {
+  out->print_cr("%-27s = %8.2lf s (a = %8.0lf us) (n = " INT32_FORMAT_W(5) ") (lvls, us = %8.0lf, %8.0lf, %8.0lf, %8.0lf, %8.0lf)",
+          str,
+          seq->sum(),
+          seq->avg() * 1000000.0,
+          seq->num(),
+          seq->percentile(0)  * 1000000.0,
+          seq->percentile(25) * 1000000.0,
+          seq->percentile(50) * 1000000.0,
+          seq->percentile(75) * 1000000.0,
+          seq->maximum() * 1000000.0
+  );
+}
+
+ShenandoahWorkerTimings::ShenandoahWorkerTimings(uint max_gc_threads) :
+        _max_gc_threads(max_gc_threads)
+{
+  assert(max_gc_threads > 0, "Must have some GC threads");
+
+#define GC_PAR_PHASE_DECLARE_WORKER_DATA(type, title) \
+  _gc_par_phases[ShenandoahPhaseTimings::type] = new WorkerDataArray<double>(max_gc_threads, title);
+  // Root scanning phases
+  SHENANDOAH_GC_PAR_PHASE_DO(GC_PAR_PHASE_DECLARE_WORKER_DATA)
+#undef GC_PAR_PHASE_DECLARE_WORKER_DATA
+}
+
+// record the time a phase took in seconds
+void ShenandoahWorkerTimings::record_time_secs(ShenandoahPhaseTimings::GCParPhases phase, uint worker_i, double secs) {
+  _gc_par_phases[phase]->set(worker_i, secs);
+}
+
+double ShenandoahWorkerTimings::average(uint i) const {
+  return _gc_par_phases[i]->average();
+}
+
+void ShenandoahWorkerTimings::reset(uint i) {
+  _gc_par_phases[i]->reset();
+}
+
+void ShenandoahWorkerTimings::print() const {
+  for (uint i = 0; i < ShenandoahPhaseTimings::GCParPhasesSentinel; i++) {
+    _gc_par_phases[i]->print_summary_on(tty);
+  }
+}
+
+
+ShenandoahTerminationTimings::ShenandoahTerminationTimings(uint max_gc_threads) {
+  _gc_termination_phase = new WorkerDataArray<double>(max_gc_threads, "Task Termination (ms):");
+}
+
+void ShenandoahTerminationTimings::record_time_secs(uint worker_id, double secs) {
+  if (_gc_termination_phase->get(worker_id) == WorkerDataArray<double>::uninitialized()) {
+    _gc_termination_phase->set(worker_id, secs);
+  } else {
+    // worker may re-enter termination phase
+    _gc_termination_phase->add(worker_id, secs);
+  }
+}
+
+void ShenandoahTerminationTimings::print() const {
+  _gc_termination_phase->print_summary_on(tty);
+}
+
+double ShenandoahTerminationTimings::average() const {
+  return _gc_termination_phase->average();
+}
+
+void ShenandoahTerminationTimings::reset() {
+  _gc_termination_phase->reset();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPHASETIMEINGS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPHASETIMEINGS_HPP
+
+#include "gc/shenandoah/shenandoahNumberSeq.hpp"
+#include "gc/shared/workerDataArray.hpp"
+#include "memory/allocation.hpp"
+
+class ShenandoahCollectorPolicy;
+class ShenandoahWorkerTimings;
+class ShenandoahTerminationTimings;
+class outputStream;
+
+#define SHENANDOAH_GC_PHASE_DO(f)                                                       \
+  f(total_pause_gross,                              "Total Pauses (G)")                 \
+  f(total_pause,                                    "Total Pauses (N)")                 \
+  f(init_mark_gross,                                "Pause Init Mark (G)")              \
+  f(init_mark,                                      "Pause Init Mark (N)")              \
+  f(make_parsable,                                  "  Make Parsable")                  \
+  f(clear_liveness,                                 "  Clear Liveness")                 \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(scan_roots,                                     "  Scan Roots")                     \
+  f(scan_thread_roots,                              "    S: Thread Roots")              \
+  f(scan_code_roots,                                "    S: Code Cache Roots")          \
+  f(scan_string_table_roots,                        "    S: String Table Roots")        \
+  f(scan_universe_roots,                            "    S: Universe Roots")            \
+  f(scan_jni_roots,                                 "    S: JNI Roots")                 \
+  f(scan_jni_weak_roots,                            "    S: JNI Weak Roots")            \
+  f(scan_synchronizer_roots,                        "    S: Synchronizer Roots")        \
+  f(scan_management_roots,                          "    S: Management Roots")          \
+  f(scan_system_dictionary_roots,                   "    S: System Dict Roots")         \
+  f(scan_cldg_roots,                                "    S: CLDG Roots")                \
+  f(scan_jvmti_roots,                               "    S: JVMTI Roots")               \
+  f(scan_string_dedup_table_roots,                  "    S: Dedup Table Roots")         \
+  f(scan_string_dedup_queue_roots,                  "    S: Dedup Queue Roots")         \
+  f(scan_finish_queues,                             "    S: Finish Queues" )            \
+                                                                                        \
+  f(resize_tlabs,                                   "  Resize TLABs")                   \
+                                                                                        \
+  f(final_mark_gross,                               "Pause Final Mark (G)")             \
+  f(final_mark,                                     "Pause Final Mark (N)")             \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(update_roots,                                   "  Update Roots")                   \
+  f(update_thread_roots,                            "    U: Thread Roots")              \
+  f(update_code_roots,                              "    U: Code Cache Roots")          \
+  f(update_string_table_roots,                      "    U: String Table Roots")        \
+  f(update_universe_roots,                          "    U: Universe Roots")            \
+  f(update_jni_roots,                               "    U: JNI Roots")                 \
+  f(update_jni_weak_roots,                          "    U: JNI Weak Roots")            \
+  f(update_synchronizer_roots,                      "    U: Synchronizer Roots")        \
+  f(update_management_roots,                        "    U: Management Roots")          \
+  f(update_system_dictionary_roots,                 "    U: System Dict Roots")         \
+  f(update_cldg_roots,                              "    U: CLDG Roots")                \
+  f(update_jvmti_roots,                             "    U: JVMTI Roots")               \
+  f(update_string_dedup_table_roots,                "    U: Dedup Table Roots")         \
+  f(update_string_dedup_queue_roots,                "    U: Dedup Queue Roots")         \
+  f(update_finish_queues,                           "    U: Finish Queues")             \
+                                                                                        \
+  f(finish_queues,                                  "  Finish Queues")                  \
+  f(termination,                                    "    Termination")                  \
+  f(weakrefs,                                       "  Weak References")                \
+  f(weakrefs_process,                               "    Process")                      \
+  f(weakrefs_termination,                           "      Termination")                \
+  f(purge,                                          "  System Purge")                   \
+  f(purge_class_unload,                             "    Unload Classes")               \
+  f(purge_par,                                      "    Parallel Cleanup")             \
+  f(purge_cldg,                                     "    CLDG")                         \
+  f(purge_string_dedup,                             "    String Dedup")                 \
+  f(complete_liveness,                              "  Complete Liveness")              \
+  f(prepare_evac,                                   "  Prepare Evacuation")             \
+  f(recycle_regions,                                "  Recycle regions")                \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(init_evac,                                      "  Initial Evacuation")             \
+  f(evac_thread_roots,                              "    E: Thread Roots")              \
+  f(evac_code_roots,                                "    E: Code Cache Roots")          \
+  f(evac_string_table_roots,                        "    E: String Table Roots")        \
+  f(evac_universe_roots,                            "    E: Universe Roots")            \
+  f(evac_jni_roots,                                 "    E: JNI Roots")                 \
+  f(evac_jni_weak_roots,                            "    E: JNI Weak Roots")            \
+  f(evac_synchronizer_roots,                        "    E: Synchronizer Roots")        \
+  f(evac_management_roots,                          "    E: Management Roots")          \
+  f(evac_system_dictionary_roots,                   "    E: System Dict Roots")         \
+  f(evac_cldg_roots,                                "    E: CLDG Roots")                \
+  f(evac_jvmti_roots,                               "    E: JVMTI Roots")               \
+  f(evac_string_dedup_table_roots,                  "    E: String Dedup Table Roots")  \
+  f(evac_string_dedup_queue_roots,                  "    E: String Dedup Queue Roots")  \
+  f(evac_finish_queues,                             "    E: Finish Queues")             \
+                                                                                        \
+  f(final_evac_gross,                               "Pause Final Evac (G)")             \
+  f(final_evac,                                     "Pause Final Evac (N)")             \
+                                                                                        \
+  f(init_update_refs_gross,                         "Pause Init  Update Refs (G)")      \
+  f(init_update_refs,                               "Pause Init  Update Refs (N)")      \
+                                                                                        \
+  f(final_update_refs_gross,                         "Pause Final Update Refs (G)")     \
+  f(final_update_refs,                               "Pause Final Update Refs (N)")     \
+  f(final_update_refs_finish_work,                   "  Finish Work")                   \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(final_update_refs_roots,                         "  Update Roots")                  \
+  f(final_update_refs_thread_roots,                  "    UR: Thread Roots")            \
+  f(final_update_refs_code_roots,                    "    UR: Code Cache Roots")        \
+  f(final_update_refs_string_table_roots,            "    UR: String Table Roots")      \
+  f(final_update_refs_universe_roots,                "    UR: Universe Roots")          \
+  f(final_update_refs_jni_roots,                     "    UR: JNI Roots")               \
+  f(final_update_refs_jni_weak_roots,                "    UR: JNI Weak Roots")          \
+  f(final_update_refs_synchronizer_roots,            "    UR: Synchronizer Roots")      \
+  f(final_update_refs_management_roots,              "    UR: Management Roots")        \
+  f(final_update_refs_system_dict_roots,             "    UR: System Dict Roots")       \
+  f(final_update_refs_cldg_roots,                    "    UR: CLDG Roots")              \
+  f(final_update_refs_jvmti_roots,                   "    UR: JVMTI Roots")             \
+  f(final_update_refs_string_dedup_table_roots,      "    UR: Dedup Table Roots")       \
+  f(final_update_refs_string_dedup_queue_roots,      "    UR: Dedup Queue Roots")       \
+  f(final_update_refs_finish_queues,                 "    UR: Finish Queues")           \
+                                                                                        \
+  f(final_update_refs_recycle,                       "  Recycle")                       \
+                                                                                        \
+  f(degen_gc_gross,                                  "Pause Degenerated GC (G)")        \
+  f(degen_gc,                                        "Pause Degenerated GC (N)")        \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(degen_gc_update_roots,                           "  Degen Update Roots")            \
+  f(degen_gc_update_thread_roots,                    "    DU: Thread Roots")            \
+  f(degen_gc_update_code_roots,                      "    DU: Code Cache Roots")        \
+  f(degen_gc_update_string_table_roots,              "    DU: String Table Roots")      \
+  f(degen_gc_update_universe_roots,                  "    DU: Universe Roots")          \
+  f(degen_gc_update_jni_roots,                       "    DU: JNI Roots")               \
+  f(degen_gc_update_jni_weak_roots,                  "    DU: JNI Weak Roots")          \
+  f(degen_gc_update_synchronizer_roots,              "    DU: Synchronizer Roots")      \
+  f(degen_gc_update_management_roots,                "    DU: Management Roots")        \
+  f(degen_gc_update_system_dict_roots,               "    DU: System Dict Roots")       \
+  f(degen_gc_update_cldg_roots,                      "    DU: CLDG Roots")              \
+  f(degen_gc_update_jvmti_roots,                     "    DU: JVMTI Roots")             \
+  f(degen_gc_update_string_dedup_table_roots,        "    DU: Dedup Table Roots")       \
+  f(degen_gc_update_string_dedup_queue_roots,        "    DU: Dedup Queue Roots")       \
+  f(degen_gc_update_finish_queues,                   "    DU: Finish Queues")           \
+                                                                                        \
+  f(init_traversal_gc_gross,                         "Pause Init Traversal (G)")        \
+  f(init_traversal_gc,                               "Pause Init Traversal (N)")        \
+  f(traversal_gc_prepare,                            "  Prepare")                       \
+  f(traversal_gc_make_parsable,                      "    Make Parsable")               \
+  f(traversal_gc_resize_tlabs,                       "    Resize TLABs")                \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(init_traversal_gc_work,                          "  Work")                          \
+  f(init_traversal_gc_thread_roots,                  "    TI: Thread Roots")            \
+  f(init_traversal_gc_code_roots,                    "    TI: Code Cache Roots")        \
+  f(init_traversal_gc_string_table_roots,            "    TI: String Table Roots")      \
+  f(init_traversal_gc_universe_roots,                "    TI: Universe Roots")          \
+  f(init_traversal_gc_jni_roots,                     "    TI: JNI Roots")               \
+  f(init_traversal_gc_jni_weak_roots,                "    TI: JNI Weak Roots")          \
+  f(init_traversal_gc_synchronizer_roots,            "    TI: Synchronizer Roots")      \
+  f(init_traversal_gc_management_roots,              "    TI: Management Roots")        \
+  f(init_traversal_gc_system_dict_roots,             "    TI: System Dict Roots")       \
+  f(init_traversal_gc_cldg_roots,                    "    TI: CLDG Roots")              \
+  f(init_traversal_gc_jvmti_roots,                   "    TI: JVMTI Roots")             \
+  f(init_traversal_gc_string_dedup_table_roots,      "    TI: Dedup Table Roots")       \
+  f(init_traversal_gc_string_dedup_queue_roots,      "    TI: Dedup Queue Roots")       \
+  f(init_traversal_gc_finish_queues,                 "    TI: Finish Queues")           \
+                                                                                        \
+  f(final_traversal_gc_gross,                        "Pause Final Traversal (G)")       \
+  f(final_traversal_gc,                              "Pause Final Traversal (N)")       \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(final_traversal_gc_work,                         "  Work")                          \
+  f(final_traversal_gc_thread_roots,                 "    TF: Thread Roots")            \
+  f(final_traversal_gc_code_roots,                   "    TF: Code Cache Roots")        \
+  f(final_traversal_gc_string_table_roots,           "    TF: String Table Roots")      \
+  f(final_traversal_gc_universe_roots,               "    TF: Universe Roots")          \
+  f(final_traversal_gc_jni_roots,                    "    TF: JNI Roots")               \
+  f(final_traversal_gc_jni_weak_roots,               "    TF: JNI Weak Roots")          \
+  f(final_traversal_gc_synchronizer_roots,           "    TF: Synchronizer Roots")      \
+  f(final_traversal_gc_management_roots,             "    TF: Management Roots")        \
+  f(final_traversal_gc_system_dict_roots,            "    TF: System Dict Roots")       \
+  f(final_traversal_gc_cldg_roots,                   "    TF: CLDG Roots")              \
+  f(final_traversal_gc_jvmti_roots,                  "    TF: JVMTI Roots")             \
+  f(final_traversal_gc_string_dedup_table_roots,     "    TF: Dedup Table Roots")       \
+  f(final_traversal_gc_string_dedup_queue_roots,     "    TF: Dedup Queue Roots")       \
+  f(final_traversal_gc_finish_queues,                "    TF: Finish Queues")           \
+  f(final_traversal_gc_termination,                  "    TF:   Termination")           \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(final_traversal_update_roots,                    "  Update Roots")                  \
+  f(final_traversal_update_thread_roots,             "    TU: Thread Roots")            \
+  f(final_traversal_update_code_roots,               "    TU: Code Cache Roots")        \
+  f(final_traversal_update_string_table_roots,       "    TU: String Table Roots")      \
+  f(final_traversal_update_universe_roots,           "    TU: Universe Roots")          \
+  f(final_traversal_update_jni_roots,                "    TU: JNI Roots")               \
+  f(final_traversal_update_jni_weak_roots,           "    TU: JNI Weak Roots")          \
+  f(final_traversal_update_synchronizer_roots,       "    TU: Synchronizer Roots")      \
+  f(final_traversal_update_management_roots,         "    TU: Management Roots")        \
+  f(final_traversal_update_system_dict_roots,        "    TU: System Dict Roots")       \
+  f(final_traversal_update_cldg_roots,               "    TU: CLDG Roots")              \
+  f(final_traversal_update_jvmti_roots,              "    TU: JVMTI Roots")             \
+  f(final_traversal_update_string_dedup_table_roots, "    TU: Dedup Table Roots")       \
+  f(final_traversal_update_string_dedup_queue_roots, "    TU: Dedup Queue Roots")       \
+  f(final_traversal_update_finish_queues,            "    TU: Finish Queues")           \
+                                                                                        \
+  f(traversal_gc_cleanup,                            "  Cleanup")                       \
+                                                                                        \
+  f(full_gc_gross,                                   "Pause Full GC (G)")               \
+  f(full_gc,                                         "Pause Full GC (N)")               \
+  f(full_gc_heapdumps,                               "  Heap Dumps")                    \
+  f(full_gc_prepare,                                 "  Prepare")                       \
+                                                                                        \
+  /* Per-thread timer block, should have "roots" counters in consistent order */        \
+  f(full_gc_roots,                                   "  Roots")                         \
+  f(full_gc_thread_roots,                            "    F: Thread Roots")             \
+  f(full_gc_code_roots,                              "    F: Code Cache Roots")         \
+  f(full_gc_string_table_roots,                      "    F: String Table Roots")       \
+  f(full_gc_universe_roots,                          "    F: Universe Roots")           \
+  f(full_gc_jni_roots,                               "    F: JNI Roots")                \
+  f(full_gc_jni_weak_roots,                          "    F: JNI Weak Roots")           \
+  f(full_gc_synchronizer_roots,                      "    F: Synchronizer Roots")       \
+  f(full_gc_management_roots,                        "    F: Management Roots")         \
+  f(full_gc_system_dictionary_roots,                 "    F: System Dict Roots")        \
+  f(full_gc_cldg_roots,                              "    F: CLDG Roots")               \
+  f(full_gc_jvmti_roots,                             "    F: JVMTI Roots")              \
+  f(full_gc_string_dedup_table_roots,                "    F: Dedup Table Roots")        \
+  f(full_gc_string_dedup_queue_roots,                "    F: Dedup Queue Roots")        \
+  f(full_gc_finish_queues,                           "    F: Finish Queues")            \
+                                                                                        \
+  f(full_gc_mark,                                    "  Mark")                          \
+  f(full_gc_mark_finish_queues,                      "    Finish Queues")               \
+  f(full_gc_mark_termination,                        "      Termination")               \
+  f(full_gc_weakrefs,                                "    Weak References")             \
+  f(full_gc_weakrefs_process,                        "      Process")                   \
+  f(full_gc_weakrefs_termination,                    "        Termination")             \
+  f(full_gc_purge,                                   "    System Purge")                \
+  f(full_gc_purge_class_unload,                      "      Unload Classes")            \
+  f(full_gc_purge_par,                               "    Parallel Cleanup")            \
+  f(full_gc_purge_cldg,                              "    CLDG")                        \
+  f(full_gc_purge_string_dedup,                      "    String Dedup")                \
+  f(full_gc_calculate_addresses,                     "  Calculate Addresses")           \
+  f(full_gc_calculate_addresses_regular,             "    Regular Objects")             \
+  f(full_gc_calculate_addresses_humong,              "    Humongous Objects")           \
+  f(full_gc_adjust_pointers,                         "  Adjust Pointers")               \
+  f(full_gc_copy_objects,                            "  Copy Objects")                  \
+  f(full_gc_copy_objects_regular,                    "    Regular Objects")             \
+  f(full_gc_copy_objects_humong,                     "    Humongous Objects")           \
+  f(full_gc_copy_objects_reset_complete,             "    Reset Complete Bitmap")       \
+  f(full_gc_copy_objects_rebuild,                    "    Rebuild Region Sets")         \
+  f(full_gc_resize_tlabs,                            "  Resize TLABs")                  \
+                                                                                        \
+  /* Longer concurrent phases at the end */                                             \
+  f(conc_reset,                                      "Concurrent Reset")                \
+  f(conc_mark,                                       "Concurrent Marking")              \
+  f(conc_termination,                                "  Termination")                   \
+  f(conc_preclean,                                   "Concurrent Precleaning")          \
+  f(conc_evac,                                       "Concurrent Evacuation")           \
+  f(conc_update_refs,                                "Concurrent Update Refs")          \
+  f(conc_cleanup,                                    "Concurrent Cleanup")              \
+  f(conc_traversal,                                  "Concurrent Traversal")            \
+  f(conc_traversal_termination,                      "  Termination")                   \
+                                                                                        \
+  f(conc_uncommit,                                   "Concurrent Uncommit")             \
+                                                                                        \
+  /* Unclassified */                                                                    \
+  f(pause_other,                                     "Pause Other")                     \
+  f(conc_other,                                      "Concurrent Other")                \
+  // end
+
+#define SHENANDOAH_GC_PAR_PHASE_DO(f)                           \
+  f(ThreadRoots,             "Thread Roots (ms):")              \
+  f(CodeCacheRoots,          "CodeCache Roots (ms):")           \
+  f(StringTableRoots,        "StringTable Roots (ms):")         \
+  f(UniverseRoots,           "Universe Roots (ms):")            \
+  f(JNIRoots,                "JNI Handles Roots (ms):")         \
+  f(JNIWeakRoots,            "JNI Weak Roots (ms):")            \
+  f(ObjectSynchronizerRoots, "ObjectSynchronizer Roots (ms):")  \
+  f(ManagementRoots,         "Management Roots (ms):")          \
+  f(SystemDictionaryRoots,   "SystemDictionary Roots (ms):")    \
+  f(CLDGRoots,               "CLDG Roots (ms):")                \
+  f(JVMTIRoots,              "JVMTI Roots (ms):")               \
+  f(StringDedupTableRoots,   "String Dedup Table Roots (ms):")  \
+  f(StringDedupQueueRoots,   "String Dedup Queue Roots (ms):")  \
+  f(FinishQueues,            "Finish Queues (ms):")             \
+  // end
+
+class ShenandoahPhaseTimings : public CHeapObj<mtGC> {
+public:
+#define GC_PHASE_DECLARE_ENUM(type, title)   type,
+
+  enum Phase {
+    SHENANDOAH_GC_PHASE_DO(GC_PHASE_DECLARE_ENUM)
+    _num_phases
+  };
+
+  // These are the subphases of GC phases (scan_roots, update_roots,
+  // init_evac, final_update_refs_roots and full_gc_roots).
+  // Make sure they are following this order.
+  enum GCParPhases {
+    SHENANDOAH_GC_PAR_PHASE_DO(GC_PHASE_DECLARE_ENUM)
+    GCParPhasesSentinel
+  };
+
+#undef GC_PHASE_DECLARE_ENUM
+
+private:
+  struct TimingData {
+    HdrSeq _secs;
+    double _start;
+  };
+
+private:
+  TimingData          _timing_data[_num_phases];
+  static const char*  _phase_names[_num_phases];
+
+  ShenandoahWorkerTimings*      _worker_times;
+  ShenandoahTerminationTimings* _termination_times;
+
+  ShenandoahCollectorPolicy* _policy;
+
+public:
+  ShenandoahPhaseTimings();
+
+  ShenandoahWorkerTimings* const worker_times() const { return _worker_times; }
+  ShenandoahTerminationTimings* const termination_times() const { return _termination_times; }
+
+  // record phase start
+  void record_phase_start(Phase phase);
+  // record phase end and return elapsed time in seconds for the phase
+  void record_phase_end(Phase phase);
+  // record an elapsed time for the phase
+  void record_phase_time(Phase phase, double time);
+
+  void record_workers_start(Phase phase);
+  void record_workers_end(Phase phase);
+
+  static const char* phase_name(Phase phase) {
+    assert(phase >= 0 && phase < _num_phases, "Out of bound");
+    return _phase_names[phase];
+  }
+
+  void print_on(outputStream* out) const;
+
+private:
+  void init_phase_names();
+  void print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq) const;
+};
+
+class ShenandoahWorkerTimings : public CHeapObj<mtGC> {
+private:
+  uint _max_gc_threads;
+  WorkerDataArray<double>* _gc_par_phases[ShenandoahPhaseTimings::GCParPhasesSentinel];
+
+public:
+  ShenandoahWorkerTimings(uint max_gc_threads);
+
+  // record the time a phase took in seconds
+  void record_time_secs(ShenandoahPhaseTimings::GCParPhases phase, uint worker_i, double secs);
+
+  double average(uint i) const;
+  void reset(uint i);
+  void print() const;
+};
+
+class ShenandoahTerminationTimings : public CHeapObj<mtGC> {
+private:
+  WorkerDataArray<double>* _gc_termination_phase;
+public:
+  ShenandoahTerminationTimings(uint max_gc_threads);
+
+  // record the time a phase took in seconds
+  void record_time_secs(uint worker_i, double secs);
+
+  double average() const;
+  void reset();
+
+  void print() const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHGCPHASETIMEINGS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/classLoaderDataGraph.hpp"
+#include "classfile/stringTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahStringDedup.hpp"
+#include "gc/shenandoah/shenandoahTimingTracker.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVMOperations.hpp"
+#include "gc/shared/weakProcessor.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/iterator.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/thread.hpp"
+#include "services/management.hpp"
+
+ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
+                                                 ShenandoahPhaseTimings::Phase phase) :
+  _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
+  _srs(n_workers),
+  _par_state_string(StringTable::weak_storage()),
+  _phase(phase),
+  _coderoots_all_iterator(ShenandoahCodeRoots::iterator())
+{
+  heap->phase_timings()->record_workers_start(_phase);
+
+  if (ShenandoahStringDedup::is_enabled()) {
+    StringDedup::gc_prologue(false);
+  }
+}
+
+ShenandoahRootProcessor::~ShenandoahRootProcessor() {
+  delete _process_strong_tasks;
+  if (ShenandoahStringDedup::is_enabled()) {
+    StringDedup::gc_epilogue();
+  }
+
+  ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
+}
+
+void ShenandoahRootProcessor::process_all_roots_slow(OopClosure* oops) {
+  CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
+  CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
+
+  CodeCache::blobs_do(&blobs);
+  ClassLoaderDataGraph::cld_do(&clds);
+  Universe::oops_do(oops);
+  Management::oops_do(oops);
+  JvmtiExport::oops_do(oops);
+  JNIHandles::oops_do(oops);
+  WeakProcessor::oops_do(oops);
+  ObjectSynchronizer::oops_do(oops);
+  SystemDictionary::oops_do(oops);
+  StringTable::oops_do(oops);
+
+  if (ShenandoahStringDedup::is_enabled()) {
+    ShenandoahStringDedup::oops_do_slow(oops);
+  }
+
+  // Do thread roots the last. This allows verification code to find
+  // any broken objects from those special roots first, not the accidental
+  // dangling reference from the thread root.
+  Threads::possibly_parallel_oops_do(false, oops, &blobs);
+}
+
+void ShenandoahRootProcessor::process_strong_roots(OopClosure* oops,
+                                                   OopClosure* weak_oops,
+                                                   CLDClosure* clds,
+                                                   CLDClosure* weak_clds,
+                                                   CodeBlobClosure* blobs,
+                                                   ThreadClosure* thread_cl,
+                                                   uint worker_id) {
+
+  process_java_roots(oops, clds, weak_clds, blobs, thread_cl, worker_id);
+  process_vm_roots(oops, NULL, weak_oops, worker_id);
+
+  _process_strong_tasks->all_tasks_completed(n_workers());
+}
+
+void ShenandoahRootProcessor::process_all_roots(OopClosure* oops,
+                                                OopClosure* weak_oops,
+                                                CLDClosure* clds,
+                                                CodeBlobClosure* blobs,
+                                                ThreadClosure* thread_cl,
+                                                uint worker_id) {
+
+  ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
+  process_java_roots(oops, clds, clds, blobs, thread_cl, worker_id);
+  process_vm_roots(oops, oops, weak_oops, worker_id);
+
+  if (blobs != NULL) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
+    _coderoots_all_iterator.possibly_parallel_blobs_do(blobs);
+  }
+
+  _process_strong_tasks->all_tasks_completed(n_workers());
+}
+
+class ShenandoahParallelOopsDoThreadClosure : public ThreadClosure {
+private:
+  OopClosure* _f;
+  CodeBlobClosure* _cf;
+  ThreadClosure* _thread_cl;
+public:
+  ShenandoahParallelOopsDoThreadClosure(OopClosure* f, CodeBlobClosure* cf, ThreadClosure* thread_cl) :
+    _f(f), _cf(cf), _thread_cl(thread_cl) {}
+
+  void do_thread(Thread* t) {
+    if (_thread_cl != NULL) {
+      _thread_cl->do_thread(t);
+    }
+    t->oops_do(_f, _cf);
+  }
+};
+
+void ShenandoahRootProcessor::process_java_roots(OopClosure* strong_roots,
+                                                 CLDClosure* strong_clds,
+                                                 CLDClosure* weak_clds,
+                                                 CodeBlobClosure* strong_code,
+                                                 ThreadClosure* thread_cl,
+                                                 uint worker_id)
+{
+  ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
+  // Iterating over the CLDG and the Threads are done early to allow us to
+  // first process the strong CLDs and nmethods and then, after a barrier,
+  // let the thread process the weak CLDs and nmethods.
+  {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CLDGRoots, worker_id);
+    _cld_iterator.root_cld_do(strong_clds, weak_clds);
+  }
+
+  {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
+    bool is_par = n_workers() > 1;
+    ResourceMark rm;
+    ShenandoahParallelOopsDoThreadClosure cl(strong_roots, strong_code, thread_cl);
+    Threads::possibly_parallel_threads_do(is_par, &cl);
+  }
+}
+
+void ShenandoahRootProcessor::process_vm_roots(OopClosure* strong_roots,
+                                               OopClosure* weak_roots,
+                                               OopClosure* jni_weak_roots,
+                                               uint worker_id)
+{
+  ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
+  if (_process_strong_tasks->try_claim_task(SHENANDOAH_RP_PS_Universe_oops_do)) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::UniverseRoots, worker_id);
+    Universe::oops_do(strong_roots);
+  }
+
+  if (_process_strong_tasks->try_claim_task(SHENANDOAH_RP_PS_JNIHandles_oops_do)) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIRoots, worker_id);
+    JNIHandles::oops_do(strong_roots);
+  }
+  if (_process_strong_tasks->try_claim_task(SHENANDOAH_RP_PS_Management_oops_do)) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ManagementRoots, worker_id);
+    Management::oops_do(strong_roots);
+  }
+  if (_process_strong_tasks->try_claim_task(SHENANDOAH_RP_PS_jvmti_oops_do)) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
+    JvmtiExport::oops_do(strong_roots);
+  }
+  if (_process_strong_tasks->try_claim_task(SHENANDOAH_RP_PS_SystemDictionary_oops_do)) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id);
+    SystemDictionary::oops_do(strong_roots);
+  }
+  if (jni_weak_roots != NULL) {
+    if (_process_strong_tasks->try_claim_task(SHENANDOAH_RP_PS_JNIHandles_weak_oops_do)) {
+      ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIWeakRoots, worker_id);
+      WeakProcessor::oops_do(jni_weak_roots);
+    }
+  }
+
+  if (ShenandoahStringDedup::is_enabled() && weak_roots != NULL) {
+    ShenandoahStringDedup::parallel_oops_do(weak_roots, worker_id);
+  }
+
+  {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id);
+    if (_process_strong_tasks->try_claim_task(SHENANDOAH_RP_PS_ObjectSynchronizer_oops_do)) {
+      ObjectSynchronizer::oops_do(strong_roots);
+    }
+  }
+
+  // All threads execute the following. A specific chunk of buckets
+  // from the StringTable are the individual tasks.
+  if (weak_roots != NULL) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::StringTableRoots, worker_id);
+    StringTable::possibly_parallel_oops_do(&_par_state_string, weak_roots);
+  }
+}
+
+uint ShenandoahRootProcessor::n_workers() const {
+  return _srs.n_threads();
+}
+
+ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers, ShenandoahPhaseTimings::Phase phase) :
+  _evacuation_tasks(new SubTasksDone(SHENANDOAH_EVAC_NumElements)),
+  _srs(n_workers),
+  _phase(phase),
+  _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
+{
+  heap->phase_timings()->record_workers_start(_phase);
+}
+
+ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {
+  delete _evacuation_tasks;
+  ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
+}
+
+void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
+                                                     CodeBlobClosure* blobs,
+                                                     uint worker_id) {
+
+  ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
+  {
+    bool is_par = n_workers() > 1;
+    ResourceMark rm;
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
+
+    Threads::possibly_parallel_oops_do(is_par, oops, NULL);
+  }
+
+  if (blobs != NULL) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
+    _coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);
+  }
+
+  if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_jvmti_oops_do)) {
+    ShenandoahForwardedIsAliveClosure is_alive;
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
+    JvmtiExport::weak_oops_do(&is_alive, oops);
+  }
+}
+
+uint ShenandoahRootEvacuator::n_workers() const {
+  return _srs.n_threads();
+}
+
+// Implemenation of ParallelCLDRootIterator
+ParallelCLDRootIterator::ParallelCLDRootIterator() {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
+  ClassLoaderDataGraph::clear_claimed_marks();
+}
+
+void ParallelCLDRootIterator::root_cld_do(CLDClosure* strong, CLDClosure* weak) {
+    ClassLoaderDataGraph::roots_cld_do(strong, weak);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
+
+#include "code/codeCache.hpp"
+#include "gc/shared/oopStorageParState.hpp"
+#include "gc/shenandoah/shenandoahCodeRoots.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+
+class ParallelCLDRootIterator {
+public:
+  ParallelCLDRootIterator();
+  void root_cld_do(CLDClosure* strong, CLDClosure* weak);
+};
+
+enum Shenandoah_process_roots_tasks {
+  SHENANDOAH_RP_PS_Universe_oops_do,
+  SHENANDOAH_RP_PS_JNIHandles_oops_do,
+  SHENANDOAH_RP_PS_JNIHandles_weak_oops_do,
+  SHENANDOAH_RP_PS_ObjectSynchronizer_oops_do,
+  SHENANDOAH_RP_PS_Management_oops_do,
+  SHENANDOAH_RP_PS_SystemDictionary_oops_do,
+  SHENANDOAH_RP_PS_jvmti_oops_do,
+  // Leave this one last.
+  SHENANDOAH_RP_PS_NumElements
+};
+
+class ShenandoahRootProcessor : public StackObj {
+  SubTasksDone* _process_strong_tasks;
+  StrongRootsScope _srs;
+  OopStorage::ParState<false, false> _par_state_string;
+  ShenandoahPhaseTimings::Phase _phase;
+  ParallelCLDRootIterator   _cld_iterator;
+  ShenandoahAllCodeRootsIterator _coderoots_all_iterator;
+  CodeBlobClosure* _threads_nmethods_cl;
+
+  void process_java_roots(OopClosure* scan_non_heap_roots,
+                          CLDClosure* scan_strong_clds,
+                          CLDClosure* scan_weak_clds,
+                          CodeBlobClosure* scan_strong_code,
+                          ThreadClosure* thread_cl,
+                          uint worker_i);
+
+  void process_vm_roots(OopClosure* scan_non_heap_roots,
+                        OopClosure* scan_non_heap_weak_roots,
+                        OopClosure* weak_jni_roots,
+                        uint worker_i);
+
+public:
+  ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
+                          ShenandoahPhaseTimings::Phase phase);
+  ~ShenandoahRootProcessor();
+
+  // Apply oops, clds and blobs to all strongly reachable roots in the system
+  void process_strong_roots(OopClosure* oops, OopClosure* weak_oops,
+                            CLDClosure* clds,
+                            CLDClosure* weak_clds,
+                            CodeBlobClosure* blobs,
+                            ThreadClosure* thread_cl,
+                            uint worker_id);
+
+  // Apply oops, clds and blobs to strongly and weakly reachable roots in the system
+  void process_all_roots(OopClosure* oops, OopClosure* weak_oops,
+                         CLDClosure* clds,
+                         CodeBlobClosure* blobs,
+                         ThreadClosure* thread_cl,
+                         uint worker_id);
+
+  // For slow debug/verification code
+  void process_all_roots_slow(OopClosure* oops);
+
+  // Number of worker threads used by the root processor.
+  uint n_workers() const;
+};
+
+class ShenandoahRootEvacuator : public StackObj {
+  SubTasksDone* _evacuation_tasks;
+  StrongRootsScope _srs;
+  ShenandoahPhaseTimings::Phase _phase;
+  ShenandoahCsetCodeRootsIterator _coderoots_cset_iterator;
+
+  enum Shenandoah_evacuate_roots_tasks {
+      SHENANDOAH_EVAC_jvmti_oops_do,
+      // Leave this one last.
+      SHENANDOAH_EVAC_NumElements
+  };
+public:
+  ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers,
+                          ShenandoahPhaseTimings::Phase phase);
+  ~ShenandoahRootEvacuator();
+
+  void process_evacuate_roots(OopClosure* oops,
+                              CodeBlobClosure* blobs,
+                              uint worker_id);
+
+  // Number of worker threads used by the root processor.
+  uint n_workers() const;
+};
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahRuntime.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+void ShenandoahRuntime::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
+  ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set();
+  bs->write_ref_array_pre(dst, length, false);
+}
+
+void ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
+  ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set();
+  bs->write_ref_array_pre(dst, length, false);
+}
+
+void ShenandoahRuntime::write_ref_array_post_entry(HeapWord* dst, size_t length) {
+  ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set();
+  bs->ShenandoahBarrierSet::write_ref_array(dst, length);
+}
+
+// Shenandoah pre write barrier slowpath
+JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread))
+  if (orig == NULL) {
+    assert(false, "should be optimized out");
+    return;
+  }
+  shenandoah_assert_correct(NULL, orig);
+  // store the original value that was in the field reference
+  ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue(orig);
+JRT_END
+
+JRT_LEAF(oopDesc*, ShenandoahRuntime::write_barrier_JRT(oopDesc* src))
+  oop result = ShenandoahBarrierSet::barrier_set()->write_barrier_mutator(src);
+  return (oopDesc*) result;
+JRT_END
+
+// Shenandoah clone barrier: makes sure that references point to to-space
+// in cloned objects.
+JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* obj))
+  ShenandoahBarrierSet::barrier_set()->write_region(MemRegion((HeapWord*) obj, obj->size()));
+JRT_END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+
+class HeapWord;
+class JavaThread;
+class oopDesc;
+
+class ShenandoahRuntime : public AllStatic {
+public:
+  static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
+  static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
+  static void write_ref_array_post_entry(HeapWord* dst, size_t length);
+  static void write_ref_field_pre_entry(oopDesc* orig, JavaThread* thread);
+
+  static oopDesc* write_barrier_JRT(oopDesc* src);
+
+  static void shenandoah_clone_barrier(oopDesc* obj);
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+
+ShenandoahSATBMarkQueueSet::ShenandoahSATBMarkQueueSet() :
+  _heap(NULL),
+  _satb_mark_queue_buffer_allocator(ShenandoahSATBBufferSize, SATB_Q_FL_lock)
+{}
+
+void ShenandoahSATBMarkQueueSet::initialize(ShenandoahHeap* const heap,
+                                            Monitor* cbl_mon,
+                                            int process_completed_threshold,
+                                            uint buffer_enqueue_threshold_percentage,
+                                            Mutex* lock) {
+  SATBMarkQueueSet::initialize(cbl_mon,
+                               &_satb_mark_queue_buffer_allocator,
+                               process_completed_threshold,
+                               buffer_enqueue_threshold_percentage,
+                               lock);
+  _heap = heap;
+}
+
+SATBMarkQueue& ShenandoahSATBMarkQueueSet::satb_queue_for_thread(JavaThread* const t) const {
+  return ShenandoahThreadLocalData::satb_mark_queue(t);
+}
+
+static inline bool discard_entry(const void* entry, ShenandoahHeap* heap) {
+  return !heap->requires_marking(entry);
+}
+
+class ShenandoahSATBMarkQueueFilterFn {
+  ShenandoahHeap* _heap;
+
+public:
+  ShenandoahSATBMarkQueueFilterFn(ShenandoahHeap* heap) : _heap(heap) {}
+
+  // Return true if entry should be filtered out (removed), false if
+  // it should be retained.
+  bool operator()(const void* entry) const {
+    return discard_entry(entry, _heap);
+  }
+};
+
+void ShenandoahSATBMarkQueueSet::filter(SATBMarkQueue* queue) {
+  assert(_heap != NULL, "SATB queue set not initialized");
+  apply_filter(ShenandoahSATBMarkQueueFilterFn(_heap), queue);
+}
+
+bool ShenandoahSATBMarkQueue::should_enqueue_buffer() {
+  bool should_enqueue = SATBMarkQueue::should_enqueue_buffer();
+  size_t cap = capacity();
+  Thread* t = Thread::current();
+  if (ShenandoahThreadLocalData::is_force_satb_flush(t)) {
+    if (!should_enqueue && cap != index()) {
+      // Non-empty buffer is compacted, and we decided not to enqueue it.
+      // We still want to know about leftover work in that buffer eventually.
+      // This avoid dealing with these leftovers during the final-mark, after
+      // the buffers are drained completely. See JDK-8205353 for more discussion.
+      should_enqueue = true;
+    }
+    ShenandoahThreadLocalData::set_force_satb_flush(t, false);
+  }
+  return should_enqueue;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSATBMARKQUEUESET_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSATBMARKQUEUESET_HPP
+
+#include "gc/shared/satbMarkQueue.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/thread.hpp"
+
+class ShenandoahSATBMarkQueue: public SATBMarkQueue {
+public:
+  ShenandoahSATBMarkQueue(SATBMarkQueueSet* qset) : SATBMarkQueue(qset, /* permanent = */ false) {}
+  virtual bool should_enqueue_buffer();
+};
+
+class ShenandoahSATBMarkQueueSet : public SATBMarkQueueSet {
+private:
+  ShenandoahHeap* _heap;
+  BufferNode::Allocator _satb_mark_queue_buffer_allocator;
+public:
+  ShenandoahSATBMarkQueueSet();
+
+  void initialize(ShenandoahHeap* const heap,
+                  Monitor* cbl_mon,
+                  int process_completed_threshold,
+                  uint buffer_enqueue_threshold_percentage,
+                  Mutex* lock);
+
+  virtual SATBMarkQueue& satb_queue_for_thread(JavaThread* const t) const;
+  virtual void filter(SATBMarkQueue* queue);
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/orderAccess.hpp"
+
+typedef jbyte ShenandoahSharedValue;
+
+// Needed for cooperation with generated code.
+STATIC_ASSERT(sizeof(ShenandoahSharedValue) == 1);
+
+typedef struct ShenandoahSharedFlag {
+  enum {
+    UNSET = 0,
+    SET = 1,
+  };
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile ShenandoahSharedValue));
+  volatile ShenandoahSharedValue value;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  ShenandoahSharedFlag() {
+    unset();
+  }
+
+  void set() {
+    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)SET);
+  }
+
+  void unset() {
+    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
+  }
+
+  bool is_set() const {
+    return OrderAccess::load_acquire(&value) == SET;
+  }
+
+  bool is_unset() const {
+    return OrderAccess::load_acquire(&value) == UNSET;
+  }
+
+  void set_cond(bool value) {
+    if (value) {
+      set();
+    } else {
+      unset();
+    }
+  }
+
+  bool try_set() {
+    if (is_set()) {
+      return false;
+    }
+    ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)SET, &value, (ShenandoahSharedValue)UNSET);
+    return old == UNSET; // success
+  }
+
+  bool try_unset() {
+    if (!is_set()) {
+      return false;
+    }
+    ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)UNSET, &value, (ShenandoahSharedValue)SET);
+    return old == SET; // success
+  }
+
+  volatile ShenandoahSharedValue* addr_of() {
+    return &value;
+  }
+
+private:
+  volatile ShenandoahSharedValue* operator&() {
+    fatal("Use addr_of() instead");
+    return NULL;
+  }
+
+  bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator!=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator> (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator>=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator< (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator<=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+
+} ShenandoahSharedFlag;
+
+typedef struct ShenandoahSharedBitmap {
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile ShenandoahSharedValue));
+  volatile ShenandoahSharedValue value;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  ShenandoahSharedBitmap() {
+    clear();
+  }
+
+  void set(uint mask) {
+    assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
+    ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
+    while (true) {
+      ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
+      if ((ov & mask_val) != 0) {
+        // already set
+        return;
+      }
+
+      ShenandoahSharedValue nv = ov | mask_val;
+      if (Atomic::cmpxchg(nv, &value, ov) == ov) {
+        // successfully set
+        return;
+      }
+    }
+  }
+
+  void unset(uint mask) {
+    assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
+    ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
+    while (true) {
+      ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
+      if ((ov & mask_val) == 0) {
+        // already unset
+        return;
+      }
+
+      ShenandoahSharedValue nv = ov & ~mask_val;
+      if (Atomic::cmpxchg(nv, &value, ov) == ov) {
+        // successfully unset
+        return;
+      }
+    }
+  }
+
+  void clear() {
+    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
+  }
+
+  bool is_set(uint mask) const {
+    return !is_unset(mask);
+  }
+
+  bool is_unset(uint mask) const {
+    assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
+    return (OrderAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
+  }
+
+  bool is_clear() const {
+    return (OrderAccess::load_acquire(&value)) == 0;
+  }
+
+  void set_cond(uint mask, bool value) {
+    if (value) {
+      set(mask);
+    } else {
+      unset(mask);
+    }
+  }
+
+  volatile ShenandoahSharedValue* addr_of() {
+    return &value;
+  }
+
+  ShenandoahSharedValue raw_value() const {
+    return value;
+  }
+
+private:
+  volatile ShenandoahSharedValue* operator&() {
+    fatal("Use addr_of() instead");
+    return NULL;
+  }
+
+  bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator!=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator> (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator>=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator< (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+  bool operator<=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
+
+} ShenandoahSharedBitmap;
+
+template<class T>
+struct ShenandoahSharedEnumFlag {
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile ShenandoahSharedValue));
+  volatile ShenandoahSharedValue value;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  ShenandoahSharedEnumFlag() {
+    value = 0;
+  }
+
+  void set(T v) {
+    assert (v >= 0, "sanity");
+    assert (v < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
+    OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)v);
+  }
+
+  T get() const {
+    return (T)OrderAccess::load_acquire(&value);
+  }
+
+  T cmpxchg(T new_value, T expected) {
+    assert (new_value >= 0, "sanity");
+    assert (new_value < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
+    return (T)Atomic::cmpxchg((ShenandoahSharedValue)new_value, &value, (ShenandoahSharedValue)expected);
+  }
+
+  volatile ShenandoahSharedValue* addr_of() {
+    return &value;
+  }
+
+private:
+  volatile T* operator&() {
+    fatal("Use addr_of() instead");
+    return NULL;
+  }
+
+  bool operator==(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; }
+  bool operator!=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; }
+  bool operator> (ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; }
+  bool operator>=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; }
+  bool operator< (ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; }
+  bool operator<=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; }
+
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shared/stringdedup/stringDedupThread.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahStrDedupQueue.hpp"
+#include "gc/shenandoah/shenandoahStrDedupQueue.inline.hpp"
+#include "gc/shenandoah/shenandoahStringDedup.hpp"
+#include "logging/log.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+
+ShenandoahStrDedupQueue::ShenandoahStrDedupQueue() :
+  _consumer_queue(NULL),
+  _num_producer_queue(ShenandoahHeap::heap()->max_workers()),
+  _published_queues(NULL),
+  _free_list(NULL),
+  _num_free_buffer(0),
+  _max_free_buffer(ShenandoahHeap::heap()->max_workers() * 2),
+  _cancel(false),
+  _total_buffers(0) {
+  _producer_queues = NEW_C_HEAP_ARRAY(ShenandoahQueueBuffer*, _num_producer_queue, mtGC);
+  for (size_t index = 0; index < _num_producer_queue; index ++) {
+    _producer_queues[index] = NULL;
+  }
+}
+
+ShenandoahStrDedupQueue::~ShenandoahStrDedupQueue() {
+  MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
+  for (size_t index = 0; index < num_queues(); index ++) {
+    release_buffers(queue_at(index));
+  }
+
+  release_buffers(_free_list);
+  FREE_C_HEAP_ARRAY(ShenandoahQueueBuffer*, _producer_queues);
+}
+
+void ShenandoahStrDedupQueue::wait_impl() {
+  MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
+  while (_consumer_queue == NULL && !_cancel) {
+    ml.wait(Mutex::_no_safepoint_check_flag);
+    assert(_consumer_queue == NULL, "Why wait?");
+    _consumer_queue = _published_queues;
+    _published_queues = NULL;
+  }
+}
+
+void ShenandoahStrDedupQueue::cancel_wait_impl() {
+  MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
+  _cancel = true;
+  ml.notify();
+}
+
+void ShenandoahStrDedupQueue::unlink_or_oops_do_impl(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue) {
+  ShenandoahQueueBuffer* q = queue_at(queue);
+  while (q != NULL) {
+    q->unlink_or_oops_do(cl);
+    q = q->next();
+  }
+}
+
+ShenandoahQueueBuffer* ShenandoahStrDedupQueue::queue_at(size_t queue_id) const {
+  assert(queue_id <= num_queues(), "Invalid queue id");
+  if (queue_id < _num_producer_queue) {
+    return _producer_queues[queue_id];
+  } else if (queue_id == _num_producer_queue) {
+    return _consumer_queue;
+  } else {
+    assert(queue_id == _num_producer_queue + 1, "Must be");
+    return _published_queues;
+  }
+}
+
+void ShenandoahStrDedupQueue::set_producer_buffer(ShenandoahQueueBuffer* buf, size_t queue_id) {
+  assert(queue_id < _num_producer_queue, "Not a producer queue id");
+  _producer_queues[queue_id] = buf;
+}
+
+void ShenandoahStrDedupQueue::push_impl(uint worker_id, oop string_oop) {
+  assert(worker_id < _num_producer_queue, "Invalid queue id. Can only push to producer queue");
+  assert(ShenandoahStringDedup::is_candidate(string_oop), "Not a candidate");
+
+  ShenandoahQueueBuffer* buf = queue_at((size_t)worker_id);
+
+  if (buf == NULL) {
+    MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
+    buf = new_buffer();
+    set_producer_buffer(buf, worker_id);
+  } else if (buf->is_full()) {
+    MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
+    buf->set_next(_published_queues);
+    _published_queues = buf;
+    buf = new_buffer();
+    set_producer_buffer(buf, worker_id);
+    ml.notify();
+  }
+
+  assert(!buf->is_full(), "Sanity");
+  buf->push(string_oop);
+}
+
+oop ShenandoahStrDedupQueue::pop_impl() {
+  assert(Thread::current() == StringDedupThread::thread(), "Must be dedup thread");
+  while (true) {
+    if (_consumer_queue == NULL) {
+      MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
+      _consumer_queue = _published_queues;
+      _published_queues = NULL;
+    }
+
+    // there is nothing
+    if (_consumer_queue == NULL) {
+      return NULL;
+    }
+
+    oop obj = NULL;
+    if (pop_candidate(obj)) {
+      assert(ShenandoahStringDedup::is_candidate(obj), "Must be a candidate");
+      return obj;
+    }
+    assert(obj == NULL, "No more candidate");
+  }
+}
+
+bool ShenandoahStrDedupQueue::pop_candidate(oop& obj) {
+  ShenandoahQueueBuffer* to_release = NULL;
+  bool suc = true;
+  do {
+    if (_consumer_queue->is_empty()) {
+      ShenandoahQueueBuffer* buf = _consumer_queue;
+      _consumer_queue = _consumer_queue->next();
+      buf->set_next(to_release);
+      to_release = buf;
+
+      if (_consumer_queue == NULL) {
+        suc = false;
+        break;
+      }
+    }
+    obj = _consumer_queue->pop();
+  } while (obj == NULL);
+
+  if (to_release != NULL) {
+    MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
+    release_buffers(to_release);
+  }
+
+  return suc;
+}
+
+ShenandoahQueueBuffer* ShenandoahStrDedupQueue::new_buffer() {
+  assert_lock_strong(StringDedupQueue_lock);
+  if (_free_list != NULL) {
+    assert(_num_free_buffer > 0, "Sanity");
+    ShenandoahQueueBuffer* buf = _free_list;
+    _free_list = _free_list->next();
+    _num_free_buffer --;
+    buf->reset();
+    return buf;
+  } else {
+    assert(_num_free_buffer == 0, "Sanity");
+    _total_buffers ++;
+    return new ShenandoahQueueBuffer;
+  }
+}
+
+void ShenandoahStrDedupQueue::release_buffers(ShenandoahQueueBuffer* list) {
+  assert_lock_strong(StringDedupQueue_lock);
+  while (list != NULL) {
+    ShenandoahQueueBuffer* tmp = list;
+    list = list->next();
+    if (_num_free_buffer < _max_free_buffer) {
+      tmp->set_next(_free_list);
+      _free_list = tmp;
+      _num_free_buffer ++;
+    } else {
+      _total_buffers --;
+      delete tmp;
+    }
+  }
+}
+
+void ShenandoahStrDedupQueue::print_statistics_impl() {
+  Log(gc, stringdedup) log;
+  log.debug("  Queue:");
+  log.debug("    Total buffers: " SIZE_FORMAT " (" SIZE_FORMAT " K). " SIZE_FORMAT " buffers are on free list",
+    _total_buffers, (_total_buffers * sizeof(ShenandoahQueueBuffer) / K), _num_free_buffer);
+}
+
+class VerifyQueueClosure : public OopClosure {
+private:
+  ShenandoahHeap* _heap;
+public:
+  VerifyQueueClosure();
+
+  void do_oop(oop* o);
+  void do_oop(narrowOop* o) {
+    ShouldNotCallThis();
+  }
+};
+
+VerifyQueueClosure::VerifyQueueClosure() :
+  _heap(ShenandoahHeap::heap()) {
+}
+
+void VerifyQueueClosure::do_oop(oop* o) {
+  if (*o != NULL) {
+    oop obj = *o;
+    shenandoah_assert_correct(o, obj);
+    assert(java_lang_String::is_instance(obj), "Object must be a String");
+  }
+}
+
+void ShenandoahStrDedupQueue::verify_impl() {
+  VerifyQueueClosure vcl;
+  for (size_t index = 0; index < num_queues(); index ++) {
+    ShenandoahQueueBuffer* buf = queue_at(index);
+    while (buf != NULL) {
+      buf->oops_do(&vcl);
+      buf = buf->next();
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP
+
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "oops/oop.hpp"
+
+template <uint buffer_size>
+class ShenandoahOopBuffer : public CHeapObj<mtGC> {
+private:
+  oop   _buf[buffer_size];
+  uint  _index;
+  ShenandoahOopBuffer<buffer_size>* _next;
+
+public:
+  ShenandoahOopBuffer();
+
+  bool is_full()  const;
+  bool is_empty() const;
+  uint size()     const;
+
+  void push(oop obj);
+  oop pop();
+
+  void reset();
+
+  void set_next(ShenandoahOopBuffer<buffer_size>* next);
+  ShenandoahOopBuffer<buffer_size>* next() const;
+
+  void unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl);
+  void oops_do(OopClosure* cl);
+};
+
+typedef ShenandoahOopBuffer<64> ShenandoahQueueBuffer;
+
+// Muti-producer and single consumer queue set
+class ShenandoahStrDedupQueue : public StringDedupQueue {
+private:
+  ShenandoahQueueBuffer** _producer_queues;
+  ShenandoahQueueBuffer*  _consumer_queue;
+  size_t                  _num_producer_queue;
+
+  // The queue is used for producers to publish completed buffers
+  ShenandoahQueueBuffer* _published_queues;
+
+  // Cached free buffers
+  ShenandoahQueueBuffer* _free_list;
+  size_t                 _num_free_buffer;
+  const size_t           _max_free_buffer;
+
+  bool                   _cancel;
+
+  // statistics
+  size_t                 _total_buffers;
+
+private:
+  ~ShenandoahStrDedupQueue();
+
+public:
+  ShenandoahStrDedupQueue();
+
+  void wait_impl();
+  void cancel_wait_impl();
+
+  void push_impl(uint worker_id, oop string_oop);
+  oop  pop_impl();
+
+  void unlink_or_oops_do_impl(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue);
+
+  void print_statistics_impl();
+  void verify_impl();
+
+protected:
+  size_t num_queues() const { return (_num_producer_queue + 2); }
+
+private:
+  ShenandoahQueueBuffer* new_buffer();
+
+  void release_buffers(ShenandoahQueueBuffer* list);
+
+  ShenandoahQueueBuffer* queue_at(size_t queue_id) const;
+
+  bool pop_candidate(oop& obj);
+
+  void set_producer_buffer(ShenandoahQueueBuffer* buf, size_t queue_id);
+
+  void verify(ShenandoahQueueBuffer* head);
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahStrDedupQueue.hpp"
+
+template <uint buffer_size>
+ShenandoahOopBuffer<buffer_size>::ShenandoahOopBuffer() :
+  _index(0), _next(NULL) {
+}
+
+template <uint buffer_size>
+bool ShenandoahOopBuffer<buffer_size>::is_full() const {
+  return _index >= buffer_size;
+}
+
+template <uint buffer_size>
+bool ShenandoahOopBuffer<buffer_size>::is_empty() const {
+  return _index == 0;
+}
+
+template <uint buffer_size>
+uint ShenandoahOopBuffer<buffer_size>::size() const {
+  return _index;
+}
+
+template <uint buffer_size>
+void ShenandoahOopBuffer<buffer_size>::push(oop obj) {
+  assert(!is_full(),  "Buffer is full");
+  _buf[_index ++] = obj;
+}
+
+template <uint buffer_size>
+oop ShenandoahOopBuffer<buffer_size>::pop() {
+  assert(!is_empty(), "Buffer is empty");
+  return _buf[--_index];
+}
+
+template <uint buffer_size>
+void ShenandoahOopBuffer<buffer_size>::set_next(ShenandoahOopBuffer<buffer_size>* next) {
+  _next = next;
+}
+
+template <uint buffer_size>
+ShenandoahOopBuffer<buffer_size>* ShenandoahOopBuffer<buffer_size>::next() const {
+  return _next;
+}
+
+template <uint buffer_size>
+void ShenandoahOopBuffer<buffer_size>::reset() {
+  _index = 0;
+  _next = NULL;
+}
+
+template <uint buffer_size>
+void ShenandoahOopBuffer<buffer_size>::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
+  for (uint index = 0; index < size(); index ++) {
+    oop* obj_addr = &_buf[index];
+    if (*obj_addr != NULL) {
+      if (cl->is_alive(*obj_addr)) {
+        cl->keep_alive(obj_addr);
+      } else {
+        *obj_addr = NULL;
+      }
+    }
+  }
+}
+
+template <uint buffer_size>
+void ShenandoahOopBuffer<buffer_size>::oops_do(OopClosure* cl) {
+  for (uint index = 0; index < size(); index ++) {
+    cl->do_oop(&_buf[index]);
+  }
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shared/stringdedup/stringDedup.inline.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahStringDedup.hpp"
+#include "gc/shenandoah/shenandoahStrDedupQueue.hpp"
+#include "gc/shenandoah/shenandoahTimingTracker.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "runtime/thread.hpp"
+
+void ShenandoahStringDedup::initialize() {
+  assert(UseShenandoahGC, "String deduplication available with Shenandoah GC");
+  StringDedup::initialize_impl<ShenandoahStrDedupQueue, StringDedupStat>();
+}
+
+/* Enqueue candidates for deduplication.
+ * The method should only be called by GC worker threads during marking phases.
+ */
+void ShenandoahStringDedup::enqueue_candidate(oop java_string) {
+  assert(Thread::current()->is_Worker_thread(),
+        "Only from a GC worker thread");
+
+  if (java_string->age() <= StringDeduplicationAgeThreshold) {
+    const markOop mark = java_string->mark();
+
+    // Having/had displaced header, too risk to deal with them, skip
+    if (mark == markOopDesc::INFLATING() || mark->has_displaced_mark_helper()) {
+      return;
+    }
+
+    // Increase string age and enqueue it when it rearches age threshold
+    markOop new_mark = mark->incr_age();
+    if (mark == java_string->cas_set_mark(new_mark, mark)) {
+      if (mark->age() == StringDeduplicationAgeThreshold) {
+        StringDedupQueue::push(ShenandoahWorkerSession::worker_id(), java_string);
+      }
+    }
+  }
+}
+
+// Deduplicate a string, return true if it is deduplicated.
+void ShenandoahStringDedup::deduplicate(oop java_string) {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupStat dummy; // Statistics from this path is never used
+  StringDedupTable::deduplicate(java_string, &dummy);
+}
+
+void ShenandoahStringDedup::parallel_oops_do(OopClosure* cl, uint worker_id) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+  assert(is_enabled(), "String deduplication not enabled");
+
+  ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
+
+  StringDedupUnlinkOrOopsDoClosure sd_cl(NULL, cl);
+
+  {
+    ShenandoahWorkerTimingsTracker x(worker_times, ShenandoahPhaseTimings::StringDedupQueueRoots, worker_id);
+    StringDedupQueue::unlink_or_oops_do(&sd_cl);
+  }
+  {
+    ShenandoahWorkerTimingsTracker x(worker_times, ShenandoahPhaseTimings::StringDedupTableRoots, worker_id);
+    StringDedupTable::unlink_or_oops_do(&sd_cl, worker_id);
+  }
+}
+
+void ShenandoahStringDedup::oops_do_slow(OopClosure* cl) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+  assert(is_enabled(), "String deduplication not enabled");
+  ShenandoahAlwaysTrueClosure always_true;
+  StringDedupUnlinkOrOopsDoClosure sd_cl(&always_true, cl);
+  StringDedupQueue::unlink_or_oops_do(&sd_cl);
+  StringDedupTable::unlink_or_oops_do(&sd_cl, 0);
+}
+
+class ShenandoahIsMarkedNextClosure : public BoolObjectClosure {
+private:
+  ShenandoahMarkingContext* const _mark_context;
+
+public:
+  ShenandoahIsMarkedNextClosure() : _mark_context(ShenandoahHeap::heap()->marking_context()) { }
+
+  bool do_object_b(oop obj) {
+    return _mark_context->is_marked(obj);
+  }
+};
+
+void ShenandoahStringDedup::parallel_cleanup() {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+  log_debug(gc, stringdedup)("String dedup cleanup");
+  ShenandoahIsMarkedNextClosure cl;
+
+  unlink_or_oops_do(&cl, NULL, true);
+}
+
+//
+// Task for parallel unlink_or_oops_do() operation on the deduplication queue
+// and table.
+//
+class ShenandoahStringDedupUnlinkOrOopsDoTask : public AbstractGangTask {
+private:
+  StringDedupUnlinkOrOopsDoClosure _cl;
+
+public:
+  ShenandoahStringDedupUnlinkOrOopsDoTask(BoolObjectClosure* is_alive,
+                                  OopClosure* keep_alive,
+                                  bool allow_resize_and_rehash) :
+    AbstractGangTask("StringDedupUnlinkOrOopsDoTask"),
+    _cl(is_alive, keep_alive) {
+      StringDedup::gc_prologue(allow_resize_and_rehash);
+  }
+
+  ~ShenandoahStringDedupUnlinkOrOopsDoTask() {
+    StringDedup::gc_epilogue();
+  }
+
+  virtual void work(uint worker_id) {
+    StringDedupQueue::unlink_or_oops_do(&_cl);
+    StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
+  }
+};
+
+void ShenandoahStringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive,
+                                      OopClosure* keep_alive,
+                                      bool allow_resize_and_rehash) {
+  assert(is_enabled(), "String deduplication not enabled");
+
+  ShenandoahStringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash);
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  heap->workers()->run_task(&task);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP
+
+#include "classfile/javaClasses.inline.hpp"
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "memory/iterator.hpp"
+
+class ShenandoahStringDedup : public StringDedup {
+public:
+  // Initialize string deduplication.
+  static void initialize();
+
+  // Enqueue a string to worker's local string dedup queue
+  static void enqueue_candidate(oop java_string);
+
+  // Deduplicate a string, the call is lock-free
+  static void deduplicate(oop java_string);
+
+  static void parallel_oops_do(OopClosure* cl, uint worker_id);
+  static void oops_do_slow(OopClosure* cl);
+
+  // Parallel cleanup string dedup queues/table
+  static void parallel_cleanup();
+
+  static inline bool is_candidate(oop obj) {
+    return java_lang_String::is_instance_inlined(obj) &&
+           java_lang_String::value(obj) != NULL;
+  }
+private:
+  static void unlink_or_oops_do(BoolObjectClosure* is_alive,
+                                OopClosure* keep_alive,
+                                bool allow_resize_and_rehash);
+
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+
+void ShenandoahObjToScanQueueSet::clear() {
+  uint size = GenericTaskQueueSet<ShenandoahObjToScanQueue, mtGC>::size();
+  for (uint index = 0; index < size; index ++) {
+    ShenandoahObjToScanQueue* q = queue(index);
+    assert(q != NULL, "Sanity");
+    q->clear();
+  }
+}
+
+bool ShenandoahObjToScanQueueSet::is_empty() {
+  uint size = GenericTaskQueueSet<ShenandoahObjToScanQueue, mtGC>::size();
+  for (uint index = 0; index < size; index ++) {
+    ShenandoahObjToScanQueue* q = queue(index);
+    assert(q != NULL, "Sanity");
+    if (!q->is_empty()) {
+      return false;
+    }
+  }
+  return true;
+}
+
+class ShenandoahOWSTTerminator: public OWSTTaskTerminator {
+public:
+  ShenandoahOWSTTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
+    OWSTTaskTerminator(n_threads, queue_set){ }
+
+protected:
+  bool exit_termination(size_t tasks, TerminatorTerminator* terminator);
+};
+
+bool ShenandoahOWSTTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
+  ShenandoahTerminatorTerminator* t = (ShenandoahTerminatorTerminator*)terminator;
+  bool force = (t != NULL) && t->should_force_termination();
+  if (force) {
+    // Force termination : continue termination, even there are remaining tasks.
+    return false;
+  } else {
+    return OWSTTaskTerminator::exit_termination(tasks, terminator);
+  }
+}
+
+ShenandoahTaskTerminator::ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
+  _terminator(new ShenandoahOWSTTerminator(n_threads, queue_set)) { }
+
+ShenandoahTaskTerminator::~ShenandoahTaskTerminator() {
+  assert(_terminator != NULL, "Invariant");
+  delete _terminator;
+}
+
+#if TASKQUEUE_STATS
+void ShenandoahObjToScanQueueSet::print_taskqueue_stats_hdr(outputStream* const st) {
+  st->print_raw_cr("GC Task Stats");
+  st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
+  st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
+}
+
+void ShenandoahObjToScanQueueSet::print_taskqueue_stats() const {
+  if (!log_develop_is_enabled(Trace, gc, task, stats)) {
+    return;
+  }
+  Log(gc, task, stats) log;
+  ResourceMark rm;
+  LogStream ls(log.trace());
+  outputStream* st = &ls;
+  print_taskqueue_stats_hdr(st);
+
+  ShenandoahObjToScanQueueSet* queues = const_cast<ShenandoahObjToScanQueueSet*>(this);
+  TaskQueueStats totals;
+  const uint n = size();
+  for (uint i = 0; i < n; ++i) {
+    st->print(UINT32_FORMAT_W(3), i);
+    queues->queue(i)->stats.print(st);
+    st->cr();
+    totals += queues->queue(i)->stats;
+  }
+  st->print("tot "); totals.print(st); st->cr();
+  DEBUG_ONLY(totals.verify());
+
+}
+
+void ShenandoahObjToScanQueueSet::reset_taskqueue_stats() {
+  const uint n = size();
+  for (uint i = 0; i < n; ++i) {
+    queue(i)->stats.reset();
+  }
+}
+#endif // TASKQUEUE_STATS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,338 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
+#include "gc/shared/owstTaskTerminator.hpp"
+#include "gc/shared/taskqueue.hpp"
+#include "gc/shared/taskqueue.inline.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/thread.hpp"
+
+template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
+class BufferedOverflowTaskQueue: public OverflowTaskQueue<E, F, N>
+{
+public:
+  typedef OverflowTaskQueue<E, F, N> taskqueue_t;
+
+  BufferedOverflowTaskQueue() : _buf_empty(true) {};
+
+  TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
+
+  // Push task t into the queue. Returns true on success.
+  inline bool push(E t);
+
+  // Attempt to pop from the queue. Returns true on success.
+  inline bool pop(E &t);
+
+  inline void clear()  {
+    _buf_empty = true;
+    taskqueue_t::set_empty();
+    taskqueue_t::overflow_stack()->clear();
+  }
+
+  inline bool is_empty()        const {
+    return _buf_empty && taskqueue_t::is_empty();
+  }
+
+private:
+  bool _buf_empty;
+  E _elem;
+};
+
+// ObjArrayChunkedTask
+//
+// Encodes both regular oops, and the array oops plus chunking data for parallel array processing.
+// The design goal is to make the regular oop ops very fast, because that would be the prevailing
+// case. On the other hand, it should not block parallel array processing from efficiently dividing
+// the array work.
+//
+// The idea is to steal the bits from the 64-bit oop to encode array data, if needed. For the
+// proper divide-and-conquer strategies, we want to encode the "blocking" data. It turns out, the
+// most efficient way to do this is to encode the array block as (chunk * 2^pow), where it is assumed
+// that the block has the size of 2^pow. This requires for pow to have only 5 bits (2^32) to encode
+// all possible arrays.
+//
+//    |---------oop---------|-pow-|--chunk---|
+//    0                    49     54        64
+//
+// By definition, chunk == 0 means "no chunk", i.e. chunking starts from 1.
+//
+// This encoding gives a few interesting benefits:
+//
+// a) Encoding/decoding regular oops is very simple, because the upper bits are zero in that task:
+//
+//    |---------oop---------|00000|0000000000| // no chunk data
+//
+//    This helps the most ubiquitous path. The initialization amounts to putting the oop into the word
+//    with zero padding. Testing for "chunkedness" is testing for zero with chunk mask.
+//
+// b) Splitting tasks for divide-and-conquer is possible. Suppose we have chunk <C, P> that covers
+// interval [ (C-1)*2^P; C*2^P ). We can then split it into two chunks:
+//      <2*C - 1, P-1>, that covers interval [ (2*C - 2)*2^(P-1); (2*C - 1)*2^(P-1) )
+//      <2*C, P-1>,     that covers interval [ (2*C - 1)*2^(P-1);       2*C*2^(P-1) )
+//
+//    Observe that the union of these two intervals is:
+//      [ (2*C - 2)*2^(P-1); 2*C*2^(P-1) )
+//
+//    ...which is the original interval:
+//      [ (C-1)*2^P; C*2^P )
+//
+// c) The divide-and-conquer strategy could even start with chunk <1, round-log2-len(arr)>, and split
+//    down in the parallel threads, which alleviates the upfront (serial) splitting costs.
+//
+// Encoding limitations caused by current bitscales mean:
+//    10 bits for chunk: max 1024 blocks per array
+//     5 bits for power: max 2^32 array
+//    49 bits for   oop: max 512 TB of addressable space
+//
+// Stealing bits from oop trims down the addressable space. Stealing too few bits for chunk ID limits
+// potential parallelism. Stealing too few bits for pow limits the maximum array size that can be handled.
+// In future, these might be rebalanced to favor one degree of freedom against another. For example,
+// if/when Arrays 2.0 bring 2^64-sized arrays, we might need to steal another bit for power. We could regain
+// some bits back if chunks are counted in ObjArrayMarkingStride units.
+//
+// There is also a fallback version that uses plain fields, when we don't have enough space to steal the
+// bits from the native pointer. It is useful to debug the _LP64 version.
+//
+
+#ifdef _MSC_VER
+#pragma warning(push)
+// warning C4522: multiple assignment operators specified
+#pragma warning( disable:4522 )
+#endif
+
+#ifdef _LP64
+class ObjArrayChunkedTask
+{
+public:
+  enum {
+    chunk_bits   = 10,
+    pow_bits     = 5,
+    oop_bits     = sizeof(uintptr_t)*8 - chunk_bits - pow_bits,
+  };
+  enum {
+    oop_shift    = 0,
+    pow_shift    = oop_shift + oop_bits,
+    chunk_shift  = pow_shift + pow_bits,
+  };
+
+public:
+  ObjArrayChunkedTask(oop o = NULL) {
+    _obj = ((uintptr_t)(void*) o) << oop_shift;
+  }
+  ObjArrayChunkedTask(oop o, int chunk, int mult) {
+    assert(0 <= chunk && chunk < nth_bit(chunk_bits), "chunk is sane: %d", chunk);
+    assert(0 <= mult && mult < nth_bit(pow_bits), "pow is sane: %d", mult);
+    uintptr_t t_b = ((uintptr_t) chunk) << chunk_shift;
+    uintptr_t t_m = ((uintptr_t) mult) << pow_shift;
+    uintptr_t obj = (uintptr_t)(void*)o;
+    assert(obj < nth_bit(oop_bits), "obj ref is sane: " PTR_FORMAT, obj);
+    intptr_t t_o = obj << oop_shift;
+    _obj = t_o | t_m | t_b;
+  }
+  ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj) { }
+
+  ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) {
+    _obj = t._obj;
+    return *this;
+  }
+  volatile ObjArrayChunkedTask&
+  operator =(const volatile ObjArrayChunkedTask& t) volatile {
+    (void)const_cast<uintptr_t&>(_obj = t._obj);
+    return *this;
+  }
+
+  inline oop obj()   const { return (oop) reinterpret_cast<void*>((_obj >> oop_shift) & right_n_bits(oop_bits)); }
+  inline int chunk() const { return (int) (_obj >> chunk_shift) & right_n_bits(chunk_bits); }
+  inline int pow()   const { return (int) ((_obj >> pow_shift) & right_n_bits(pow_bits)); }
+  inline bool is_not_chunked() const { return (_obj & ~right_n_bits(oop_bits + pow_bits)) == 0; }
+
+  DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
+
+  static size_t max_addressable() {
+    return nth_bit(oop_bits);
+  }
+
+  static int chunk_size() {
+    return nth_bit(chunk_bits);
+  }
+
+private:
+  uintptr_t _obj;
+};
+#else
+class ObjArrayChunkedTask
+{
+public:
+  enum {
+    chunk_bits  = 10,
+    pow_bits    = 5,
+  };
+public:
+  ObjArrayChunkedTask(oop o = NULL, int chunk = 0, int pow = 0): _obj(o) {
+    assert(0 <= chunk && chunk < nth_bit(chunk_bits), "chunk is sane: %d", chunk);
+    assert(0 <= pow && pow < nth_bit(pow_bits), "pow is sane: %d", pow);
+    _chunk = chunk;
+    _pow = pow;
+  }
+  ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj), _chunk(t._chunk), _pow(t._pow) { }
+
+  ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) {
+    _obj = t._obj;
+    _chunk = t._chunk;
+    _pow = t._pow;
+    return *this;
+  }
+  volatile ObjArrayChunkedTask&
+  operator =(const volatile ObjArrayChunkedTask& t) volatile {
+    (void)const_cast<oop&>(_obj = t._obj);
+    _chunk = t._chunk;
+    _pow = t._pow;
+    return *this;
+  }
+
+  inline oop obj()   const { return _obj; }
+  inline int chunk() const { return _chunk; }
+  inline int pow()  const { return _pow; }
+
+  inline bool is_not_chunked() const { return _chunk == 0; }
+
+  DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
+
+  static size_t max_addressable() {
+    return sizeof(oop);
+  }
+
+  static int chunk_size() {
+    return nth_bit(chunk_bits);
+  }
+
+private:
+  oop _obj;
+  int _chunk;
+  int _pow;
+};
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+typedef ObjArrayChunkedTask ShenandoahMarkTask;
+typedef BufferedOverflowTaskQueue<ShenandoahMarkTask, mtGC> ShenandoahBufferedOverflowTaskQueue;
+typedef Padded<ShenandoahBufferedOverflowTaskQueue> ShenandoahObjToScanQueue;
+
+template <class T, MEMFLAGS F>
+class ParallelClaimableQueueSet: public GenericTaskQueueSet<T, F> {
+private:
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile jint));
+  volatile jint     _claimed_index;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+
+  debug_only(uint   _reserved;  )
+
+public:
+  using GenericTaskQueueSet<T, F>::size;
+
+public:
+  ParallelClaimableQueueSet(int n) : GenericTaskQueueSet<T, F>(n), _claimed_index(0) {
+    debug_only(_reserved = 0; )
+  }
+
+  void clear_claimed() { _claimed_index = 0; }
+  T*   claim_next();
+
+  // reserve queues that not for parallel claiming
+  void reserve(uint n) {
+    assert(n <= size(), "Sanity");
+    _claimed_index = (jint)n;
+    debug_only(_reserved = n;)
+  }
+
+  debug_only(uint get_reserved() const { return (uint)_reserved; })
+};
+
+template <class T, MEMFLAGS F>
+T* ParallelClaimableQueueSet<T, F>::claim_next() {
+  jint size = (jint)GenericTaskQueueSet<T, F>::size();
+
+  if (_claimed_index >= size) {
+    return NULL;
+  }
+
+  jint index = Atomic::add(1, &_claimed_index);
+
+  if (index <= size) {
+    return GenericTaskQueueSet<T, F>::queue((uint)index - 1);
+  } else {
+    return NULL;
+  }
+}
+
+class ShenandoahObjToScanQueueSet: public ParallelClaimableQueueSet<ShenandoahObjToScanQueue, mtGC> {
+public:
+  ShenandoahObjToScanQueueSet(int n) : ParallelClaimableQueueSet<ShenandoahObjToScanQueue, mtGC>(n) {}
+
+  bool is_empty();
+  void clear();
+
+#if TASKQUEUE_STATS
+  static void print_taskqueue_stats_hdr(outputStream* const st);
+  void print_taskqueue_stats() const;
+  void reset_taskqueue_stats();
+#endif // TASKQUEUE_STATS
+};
+
+class ShenandoahTerminatorTerminator : public TerminatorTerminator {
+public:
+  // return true, terminates immediately, even if there's remaining work left
+  virtual bool should_force_termination() { return false; }
+};
+
+class ShenandoahCancelledTerminatorTerminator : public ShenandoahTerminatorTerminator {
+  virtual bool should_exit_termination() {
+    return false;
+  }
+  virtual bool should_force_termination() {
+    return true;
+  }
+};
+
+class ShenandoahTaskTerminator : public StackObj {
+private:
+  OWSTTaskTerminator* const   _terminator;
+public:
+  ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
+  ~ShenandoahTaskTerminator();
+
+  bool offer_termination(ShenandoahTerminatorTerminator* terminator) {
+    return _terminator->offer_termination(terminator);
+  }
+
+  void reset_for_reuse() { _terminator->reset_for_reuse(); }
+  bool offer_termination() { return offer_termination((ShenandoahTerminatorTerminator*)NULL); }
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+
+template <class E, MEMFLAGS F, unsigned int N>
+bool BufferedOverflowTaskQueue<E, F, N>::pop(E &t)
+{
+  if (!_buf_empty) {
+    t = _elem;
+    _buf_empty = true;
+    return true;
+  }
+
+  if (taskqueue_t::pop_local(t)) {
+    return true;
+  }
+
+  return taskqueue_t::pop_overflow(t);
+}
+
+template <class E, MEMFLAGS F, unsigned int N>
+inline bool BufferedOverflowTaskQueue<E, F, N>::push(E t)
+{
+  if (_buf_empty) {
+    _elem = t;
+    _buf_empty = false;
+  } else {
+    bool pushed = taskqueue_t::push(_elem);
+    assert(pushed, "overflow queue should always succeed pushing");
+    _elem = t;
+  }
+  return true;
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
+
+#include "gc/shared/plab.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/sizes.hpp"
+
+class ShenandoahThreadLocalData {
+public:
+  static const uint INVALID_WORKER_ID = uint(-1);
+
+private:
+  char _gc_state;
+  char _oom_during_evac;
+  ShenandoahSATBMarkQueue _satb_mark_queue;
+  PLAB* _gclab;
+  size_t _gclab_size;
+  uint  _worker_id;
+  bool _force_satb_flush;
+
+  ShenandoahThreadLocalData() :
+    _gc_state(0),
+    _oom_during_evac(0),
+    _satb_mark_queue(&ShenandoahBarrierSet::satb_mark_queue_set()),
+    _gclab(NULL),
+    _gclab_size(0),
+    _worker_id(INVALID_WORKER_ID),
+    _force_satb_flush(false) {
+  }
+
+  ~ShenandoahThreadLocalData() {
+    if (_gclab != NULL) {
+      delete _gclab;
+    }
+  }
+
+  static ShenandoahThreadLocalData* data(Thread* thread) {
+    assert(UseShenandoahGC, "Sanity");
+    return thread->gc_data<ShenandoahThreadLocalData>();
+  }
+
+  static ByteSize satb_mark_queue_offset() {
+    return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _satb_mark_queue);
+  }
+
+public:
+  static void create(Thread* thread) {
+    new (data(thread)) ShenandoahThreadLocalData();
+  }
+
+  static void destroy(Thread* thread) {
+    data(thread)->~ShenandoahThreadLocalData();
+  }
+
+  static SATBMarkQueue& satb_mark_queue(Thread* thread) {
+    return data(thread)->_satb_mark_queue;
+  }
+
+  static bool is_oom_during_evac(Thread* thread) {
+    return (data(thread)->_oom_during_evac & 1) == 1;
+  }
+
+  static void set_oom_during_evac(Thread* thread, bool oom) {
+    if (oom) {
+      data(thread)->_oom_during_evac |= 1;
+    } else {
+      data(thread)->_oom_during_evac &= ~1;
+    }
+  }
+
+  static void set_gc_state(Thread* thread, char gc_state) {
+    data(thread)->_gc_state = gc_state;
+  }
+
+  static char gc_state(Thread* thread) {
+    return data(thread)->_gc_state;
+  }
+
+  static void set_worker_id(Thread* thread, uint id) {
+    assert(thread->is_Worker_thread(), "Must be a worker thread");
+    data(thread)->_worker_id = id;
+  }
+
+  static uint worker_id(Thread* thread) {
+    assert(thread->is_Worker_thread(), "Must be a worker thread");
+    return data(thread)->_worker_id;
+  }
+
+  static void set_force_satb_flush(Thread* thread, bool v) {
+    data(thread)->_force_satb_flush = v;
+  }
+
+  static bool is_force_satb_flush(Thread* thread) {
+    return data(thread)->_force_satb_flush;
+  }
+
+  static void initialize_gclab(Thread* thread) {
+    assert (thread->is_Java_thread() || thread->is_Worker_thread(), "Only Java and GC worker threads are allowed to get GCLABs");
+    data(thread)->_gclab = new PLAB(PLAB::min_size());
+    data(thread)->_gclab_size = 0;
+  }
+
+  static PLAB* gclab(Thread* thread) {
+    return data(thread)->_gclab;
+  }
+
+  static size_t gclab_size(Thread* thread) {
+    return data(thread)->_gclab_size;
+  }
+
+  static void set_gclab_size(Thread* thread, size_t v) {
+    data(thread)->_gclab_size = v;
+  }
+
+#ifdef ASSERT
+  static void set_evac_allowed(Thread* thread, bool evac_allowed) {
+    if (evac_allowed) {
+      data(thread)->_oom_during_evac |= 2;
+    } else {
+      data(thread)->_oom_during_evac &= ~2;
+    }
+  }
+
+  static bool is_evac_allowed(Thread* thread) {
+    return (data(thread)->_oom_during_evac & 2) == 2;
+  }
+#endif
+
+  // Offsets
+  static ByteSize satb_mark_queue_active_offset() {
+    return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active();
+  }
+
+  static ByteSize satb_mark_queue_index_offset() {
+    return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index();
+  }
+
+  static ByteSize satb_mark_queue_buffer_offset() {
+    return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf();
+  }
+
+  static ByteSize gc_state_offset() {
+    return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state);
+  }
+
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahTimingTracker.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "runtime/os.hpp"
+
+
+ShenandoahPhaseTimings::Phase ShenandoahTerminationTracker::_current_termination_phase = ShenandoahPhaseTimings::_num_phases;
+
+ShenandoahWorkerTimingsTracker::ShenandoahWorkerTimingsTracker(ShenandoahWorkerTimings* worker_times,
+                                                              ShenandoahPhaseTimings::GCParPhases phase, uint worker_id) :
+  _phase(phase), _worker_times(worker_times), _worker_id(worker_id) {
+  if (_worker_times != NULL) {
+    _start_time = os::elapsedTime();
+  }
+}
+
+ShenandoahWorkerTimingsTracker::~ShenandoahWorkerTimingsTracker() {
+  if (_worker_times != NULL) {
+    _worker_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
+  }
+
+  if (ShenandoahGCPhase::is_root_work_phase()) {
+    ShenandoahPhaseTimings::Phase root_phase = ShenandoahGCPhase::current_phase();
+    ShenandoahPhaseTimings::Phase cur_phase = (ShenandoahPhaseTimings::Phase)((int)root_phase + (int)_phase + 1);
+    _event.commit(GCId::current(), _worker_id, ShenandoahPhaseTimings::phase_name(cur_phase));
+  }
+}
+
+ShenandoahTerminationTimingsTracker::ShenandoahTerminationTimingsTracker(uint worker_id) :
+  _worker_id(worker_id)  {
+  if (ShenandoahTerminationTrace) {
+    _start_time = os::elapsedTime();
+  }
+}
+
+ShenandoahTerminationTimingsTracker::~ShenandoahTerminationTimingsTracker() {
+  if (ShenandoahTerminationTrace) {
+    ShenandoahHeap::heap()->phase_timings()->termination_times()->record_time_secs(_worker_id, os::elapsedTime() - _start_time);
+  }
+}
+
+ShenandoahTerminationTracker::ShenandoahTerminationTracker(ShenandoahPhaseTimings::Phase phase) : _phase(phase) {
+  assert(_current_termination_phase == ShenandoahPhaseTimings::_num_phases, "Should be invalid");
+  assert(phase == ShenandoahPhaseTimings::termination ||
+         phase == ShenandoahPhaseTimings::final_traversal_gc_termination ||
+         phase == ShenandoahPhaseTimings::full_gc_mark_termination ||
+         phase == ShenandoahPhaseTimings::conc_termination ||
+         phase == ShenandoahPhaseTimings::conc_traversal_termination ||
+         phase == ShenandoahPhaseTimings::weakrefs_termination ||
+         phase == ShenandoahPhaseTimings::full_gc_weakrefs_termination,
+         "Only these phases");
+
+  assert(Thread::current()->is_VM_thread() || Thread::current()->is_ConcurrentGC_thread(),
+    "Called from wrong thread");
+  _current_termination_phase = phase;
+  ShenandoahHeap::heap()->phase_timings()->termination_times()->reset();
+}
+
+ShenandoahTerminationTracker::~ShenandoahTerminationTracker() {
+  assert(_phase == _current_termination_phase, "Can not change phase");
+  ShenandoahPhaseTimings* phase_times = ShenandoahHeap::heap()->phase_timings();
+
+  double t = phase_times->termination_times()->average();
+  phase_times->record_phase_time(_phase, t);
+  debug_only(_current_termination_phase = ShenandoahPhaseTimings::_num_phases;)
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTimingTracker.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTIMINGTRACKER_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTIMINGTRACKER_HPP
+
+#include "jfr/jfrEvents.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "memory/allocation.hpp"
+
+class ShenandoahWorkerTimingsTracker : public StackObj {
+private:
+  double _start_time;
+  ShenandoahPhaseTimings::GCParPhases _phase;
+  ShenandoahWorkerTimings* _worker_times;
+  uint _worker_id;
+
+  EventGCPhaseParallel _event;
+public:
+    ShenandoahWorkerTimingsTracker(ShenandoahWorkerTimings* worker_times, ShenandoahPhaseTimings::GCParPhases phase, uint worker_id);
+    ~ShenandoahWorkerTimingsTracker();
+};
+
+
+class ShenandoahTerminationTimingsTracker : public StackObj {
+private:
+  double _start_time;
+  uint   _worker_id;
+
+public:
+  ShenandoahTerminationTimingsTracker(uint worker_id);
+  ~ShenandoahTerminationTimingsTracker();
+};
+
+// Tracking termination time in specific GC phase
+class ShenandoahTerminationTracker : public StackObj {
+private:
+  ShenandoahPhaseTimings::Phase _phase;
+
+  static ShenandoahPhaseTimings::Phase _current_termination_phase;
+public:
+  ShenandoahTerminationTracker(ShenandoahPhaseTimings::Phase phase);
+  ~ShenandoahTerminationTracker();
+
+  static ShenandoahPhaseTimings::Phase current_termination_phase() { return _current_termination_phase; }
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTIMINGTRACKER_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTracer.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRACER_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRACER_HPP
+
+#include "gc/shared/gcTrace.hpp"
+
+class ShenandoahTracer : public GCTracer {
+public:
+  ShenandoahTracer() : GCTracer(Shenandoah) {}
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRACER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,1129 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/classLoaderData.hpp"
+#include "classfile/classLoaderDataGraph.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "gc/shared/referenceProcessorPhaseTimes.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "gc/shared/weakProcessor.hpp"
+#include "gc/shared/weakProcessor.inline.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahCodeRoots.hpp"
+#include "gc/shenandoah/shenandoahCollectionSet.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahStringDedup.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
+#include "gc/shenandoah/shenandoahTimingTracker.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVerifier.hpp"
+
+#include "memory/iterator.hpp"
+#include "memory/metaspace.hpp"
+#include "memory/resourceArea.hpp"
+
+/**
+ * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
+ * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
+ * is incremental-update-based.
+ *
+ * NOTE on interaction with TAMS: we want to avoid traversing new objects for
+ * several reasons:
+ * - We will not reclaim them in this cycle anyway, because they are not in the
+ *   cset
+ * - It makes up for the bulk of work during final-pause
+ * - It also shortens the concurrent cycle because we don't need to
+ *   pointlessly traverse through newly allocated objects.
+ * - As a nice side-effect, it solves the I-U termination problem (mutators
+ *   cannot outrun the GC by allocating like crazy)
+ * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
+ *   target object of stores if it's new. Treating new objects live implicitely
+ *   achieves the same, but without extra barriers. I think the effect of
+ *   shortened final-pause (mentioned above) is the main advantage of MWF. In
+ *   particular, we will not see the head of a completely new long linked list
+ *   in final-pause and end up traversing huge chunks of the heap there.
+ * - We don't need to see/update the fields of new objects either, because they
+ *   are either still null, or anything that's been stored into them has been
+ *   evacuated+enqueued before (and will thus be treated later).
+ *
+ * We achieve this by setting TAMS for each region, and everything allocated
+ * beyond TAMS will be 'implicitely marked'.
+ *
+ * Gotchas:
+ * - While we want new objects to be implicitely marked, we don't want to count
+ *   them alive. Otherwise the next cycle wouldn't pick them up and consider
+ *   them for cset. This means that we need to protect such regions from
+ *   getting accidentally thrashed at the end of traversal cycle. This is why I
+ *   keep track of alloc-regions and check is_alloc_region() in the trashing
+ *   code.
+ * - We *need* to traverse through evacuated objects. Those objects are
+ *   pre-existing, and any references in them point to interesting objects that
+ *   we need to see. We also want to count them as live, because we just
+ *   determined that they are alive :-) I achieve this by upping TAMS
+ *   concurrently for every gclab/gc-shared alloc before publishing the
+ *   evacuated object. This way, the GC threads will not consider such objects
+ *   implictely marked, and traverse through them as normal.
+ */
+class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  ShenandoahTraversalGC* _traversal_gc;
+  ShenandoahHeap* const _heap;
+
+public:
+  ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
+    _queue(q),
+    _heap(ShenandoahHeap::heap())
+ { }
+
+  void do_buffer(void** buffer, size_t size) {
+    for (size_t i = 0; i < size; ++i) {
+      oop* p = (oop*) &buffer[i];
+      oop obj = RawAccess<>::oop_load(p);
+      shenandoah_assert_not_forwarded(p, obj);
+      if (_heap->marking_context()->mark(obj)) {
+        _queue->push(ShenandoahMarkTask(obj));
+      }
+    }
+  }
+};
+
+class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
+private:
+  ShenandoahTraversalSATBBufferClosure* _satb_cl;
+
+public:
+  ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
+    _satb_cl(satb_cl) {}
+
+  void do_thread(Thread* thread) {
+    if (thread->is_Java_thread()) {
+      JavaThread* jt = (JavaThread*)thread;
+      ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
+    } else if (thread->is_VM_thread()) {
+      ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
+    }
+  }
+};
+
+// Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
+// and remark them later during final-traversal.
+class ShenandoahMarkCLDClosure : public CLDClosure {
+private:
+  OopClosure* _cl;
+public:
+  ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
+  void do_cld(ClassLoaderData* cld) {
+    cld->oops_do(_cl, true, true);
+  }
+};
+
+// Like CLDToOopClosure, but only process modified CLDs
+class ShenandoahRemarkCLDClosure : public CLDClosure {
+private:
+  OopClosure* _cl;
+public:
+  ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
+  void do_cld(ClassLoaderData* cld) {
+    if (cld->has_modified_oops()) {
+      cld->oops_do(_cl, true, true);
+    }
+  }
+};
+
+class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
+private:
+  ShenandoahRootProcessor* _rp;
+  ShenandoahHeap* _heap;
+  ShenandoahCsetCodeRootsIterator* _cset_coderoots;
+public:
+  ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) :
+    AbstractGangTask("Shenandoah Init Traversal Collection"),
+    _rp(rp),
+    _heap(ShenandoahHeap::heap()),
+    _cset_coderoots(cset_coderoots) {}
+
+  void work(uint worker_id) {
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+
+    ShenandoahEvacOOMScope oom_evac_scope;
+    ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
+    ShenandoahObjToScanQueue* q = queues->queue(worker_id);
+
+    bool process_refs = _heap->process_references();
+    bool unload_classes = _heap->unload_classes();
+    ReferenceProcessor* rp = NULL;
+    if (process_refs) {
+      rp = _heap->ref_processor();
+    }
+
+    // Step 1: Process ordinary GC roots.
+    {
+      ShenandoahTraversalClosure roots_cl(q, rp);
+      ShenandoahMarkCLDClosure cld_cl(&roots_cl);
+      MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
+      if (unload_classes) {
+        _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, NULL, worker_id);
+        // Need to pre-evac code roots here. Otherwise we might see from-space constants.
+        ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
+        ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
+        _cset_coderoots->possibly_parallel_blobs_do(&code_cl);
+      } else {
+        _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
+      }
+    }
+  }
+};
+
+class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
+private:
+  ShenandoahTaskTerminator* _terminator;
+  ShenandoahHeap* _heap;
+public:
+  ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
+    AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
+    _terminator(terminator),
+    _heap(ShenandoahHeap::heap()) {}
+
+  void work(uint worker_id) {
+    ShenandoahConcurrentWorkerSession worker_session(worker_id);
+    ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
+    ShenandoahEvacOOMScope oom_evac_scope;
+    ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
+
+    // Drain all outstanding work in queues.
+    traversal_gc->main_loop(worker_id, _terminator, true);
+  }
+};
+
+class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
+private:
+  ShenandoahRootProcessor* _rp;
+  ShenandoahTaskTerminator* _terminator;
+  ShenandoahHeap* _heap;
+public:
+  ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
+    AbstractGangTask("Shenandoah Final Traversal Collection"),
+    _rp(rp),
+    _terminator(terminator),
+    _heap(ShenandoahHeap::heap()) {}
+
+  void work(uint worker_id) {
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+
+    ShenandoahEvacOOMScope oom_evac_scope;
+    ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
+
+    ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
+    ShenandoahObjToScanQueue* q = queues->queue(worker_id);
+
+    bool process_refs = _heap->process_references();
+    bool unload_classes = _heap->unload_classes();
+    ReferenceProcessor* rp = NULL;
+    if (process_refs) {
+      rp = _heap->ref_processor();
+    }
+
+    // Step 0: Drain outstanding SATB queues.
+    // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
+    ShenandoahTraversalSATBBufferClosure satb_cl(q);
+    {
+      // Process remaining finished SATB buffers.
+      SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
+      while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
+      // Process remaining threads SATB buffers below.
+    }
+
+    // Step 1: Process GC roots.
+    // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
+    // and the references to the oops are updated during init pause. New nmethods are handled
+    // in similar way during nmethod-register process. Therefore, we don't need to rescan code
+    // roots here.
+    if (!_heap->is_degenerated_gc_in_progress()) {
+      ShenandoahTraversalClosure roots_cl(q, rp);
+      CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
+      ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
+      if (unload_classes) {
+        ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
+        _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
+      } else {
+        _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
+      }
+    } else {
+      ShenandoahTraversalDegenClosure roots_cl(q, rp);
+      CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
+      ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
+      if (unload_classes) {
+        ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
+        _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
+      } else {
+        _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
+      }
+    }
+
+    {
+      ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
+      ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
+
+      // Step 3: Finally drain all outstanding work in queues.
+      traversal_gc->main_loop(worker_id, _terminator, false);
+    }
+
+  }
+};
+
+ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
+  _heap(heap),
+  _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
+  _traversal_set(ShenandoahHeapRegionSet()) {
+
+  uint num_queues = heap->max_workers();
+  for (uint i = 0; i < num_queues; ++i) {
+    ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
+    task_queue->initialize();
+    _task_queues->register_queue(i, task_queue);
+  }
+}
+
+ShenandoahTraversalGC::~ShenandoahTraversalGC() {
+}
+
+void ShenandoahTraversalGC::prepare_regions() {
+  size_t num_regions = _heap->num_regions();
+  ShenandoahMarkingContext* const ctx = _heap->marking_context();
+  for (size_t i = 0; i < num_regions; i++) {
+    ShenandoahHeapRegion* region = _heap->get_region(i);
+    if (_heap->is_bitmap_slice_committed(region)) {
+      if (_traversal_set.is_in(i)) {
+        ctx->capture_top_at_mark_start(region);
+        region->clear_live_data();
+        assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
+      } else {
+        // Everything outside the traversal set is always considered live.
+        ctx->reset_top_at_mark_start(region);
+      }
+    } else {
+      // FreeSet may contain uncommitted empty regions, once they are recommitted,
+      // their TAMS may have old values, so reset them here.
+      ctx->reset_top_at_mark_start(region);
+    }
+  }
+}
+
+void ShenandoahTraversalGC::prepare() {
+  _heap->collection_set()->clear();
+  assert(_heap->collection_set()->count() == 0, "collection set not clear");
+
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
+    _heap->make_parsable(true);
+  }
+
+  if (UseTLAB) {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
+    _heap->resize_tlabs();
+  }
+
+  assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
+  assert(!_heap->marking_context()->is_complete(), "should not be complete");
+
+  ShenandoahFreeSet* free_set = _heap->free_set();
+  ShenandoahCollectionSet* collection_set = _heap->collection_set();
+
+  // Find collection set
+  _heap->heuristics()->choose_collection_set(collection_set);
+  prepare_regions();
+
+  // Rebuild free set
+  free_set->rebuild();
+
+  log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
+                     collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
+}
+
+void ShenandoahTraversalGC::init_traversal_collection() {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
+
+  if (ShenandoahVerify) {
+    _heap->verifier()->verify_before_traversal();
+  }
+
+  if (VerifyBeforeGC) {
+    Universe::verify();
+  }
+
+  {
+    ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
+    ShenandoahHeapLocker lock(_heap->lock());
+    prepare();
+  }
+
+  _heap->set_concurrent_traversal_in_progress(true);
+
+  bool process_refs = _heap->process_references();
+  if (process_refs) {
+    ReferenceProcessor* rp = _heap->ref_processor();
+    rp->enable_discovery(true /*verify_no_refs*/);
+    rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
+  }
+
+  {
+    ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
+    assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
+    TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
+
+#if defined(COMPILER2) || INCLUDE_JVMCI
+    DerivedPointerTable::clear();
+#endif
+
+    {
+      uint nworkers = _heap->workers()->active_workers();
+      task_queues()->reserve(nworkers);
+      ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
+
+      ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator();
+
+      ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots);
+      _heap->workers()->run_task(&traversal_task);
+    }
+
+#if defined(COMPILER2) || INCLUDE_JVMCI
+    DerivedPointerTable::update_pointers();
+#endif
+  }
+
+  if (ShenandoahPacing) {
+    _heap->pacer()->setup_for_traversal();
+  }
+}
+
+void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
+  ShenandoahObjToScanQueue* q = task_queues()->queue(w);
+
+  // Initialize live data.
+  jushort* ld = _heap->get_liveness_cache(w);
+
+  ReferenceProcessor* rp = NULL;
+  if (_heap->process_references()) {
+    rp = _heap->ref_processor();
+  }
+  {
+    if (!_heap->is_degenerated_gc_in_progress()) {
+      if (_heap->unload_classes()) {
+        if (ShenandoahStringDedup::is_enabled()) {
+          ShenandoahTraversalMetadataDedupClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
+        } else {
+          ShenandoahTraversalMetadataClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
+        }
+      } else {
+        if (ShenandoahStringDedup::is_enabled()) {
+          ShenandoahTraversalDedupClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
+        } else {
+          ShenandoahTraversalClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
+        }
+      }
+    } else {
+      if (_heap->unload_classes()) {
+        if (ShenandoahStringDedup::is_enabled()) {
+          ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
+        } else {
+          ShenandoahTraversalMetadataDegenClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
+        }
+      } else {
+        if (ShenandoahStringDedup::is_enabled()) {
+          ShenandoahTraversalDedupDegenClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
+        } else {
+          ShenandoahTraversalDegenClosure cl(q, rp);
+          main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
+        }
+      }
+    }
+  }
+
+  _heap->flush_liveness_cache(w);
+}
+
+template <class T>
+void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
+  ShenandoahObjToScanQueueSet* queues = task_queues();
+  ShenandoahObjToScanQueue* q = queues->queue(worker_id);
+  ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
+
+  uintx stride = ShenandoahMarkLoopStride;
+
+  ShenandoahMarkTask task;
+
+  // Process outstanding queues, if any.
+  q = queues->claim_next();
+  while (q != NULL) {
+    if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
+      ShenandoahCancelledTerminatorTerminator tt;
+      ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
+      ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
+      while (!terminator->offer_termination(&tt));
+      return;
+    }
+
+    for (uint i = 0; i < stride; i++) {
+      if (q->pop(task)) {
+        conc_mark->do_task<T>(q, cl, live_data, &task);
+      } else {
+        assert(q->is_empty(), "Must be empty");
+        q = queues->claim_next();
+        break;
+      }
+    }
+  }
+
+  if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
+
+  // Normal loop.
+  q = queues->queue(worker_id);
+
+  ShenandoahTraversalSATBBufferClosure drain_satb(q);
+  SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
+
+  while (true) {
+    if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
+
+    while (satb_mq_set.completed_buffers_num() > 0) {
+      satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
+    }
+
+    uint work = 0;
+    for (uint i = 0; i < stride; i++) {
+      if (q->pop(task) ||
+          queues->steal(worker_id, task)) {
+        conc_mark->do_task<T>(q, cl, live_data, &task);
+        work++;
+      } else {
+        break;
+      }
+    }
+
+    if (work == 0) {
+      // No more work, try to terminate
+      ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
+      ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
+      ShenandoahTerminationTimingsTracker term_tracker(worker_id);
+      if (terminator->offer_termination()) return;
+    }
+  }
+}
+
+bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
+  if (_heap->cancelled_gc()) {
+    ShenandoahCancelledTerminatorTerminator tt;
+    ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
+    ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
+    while (! terminator->offer_termination(&tt));
+    return true;
+  }
+  return false;
+}
+
+void ShenandoahTraversalGC::concurrent_traversal_collection() {
+  ClassLoaderDataGraph::clear_claimed_marks();
+
+  ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
+  if (!_heap->cancelled_gc()) {
+    uint nworkers = _heap->workers()->active_workers();
+    task_queues()->reserve(nworkers);
+    ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
+
+    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    ShenandoahConcurrentTraversalCollectionTask task(&terminator);
+    _heap->workers()->run_task(&task);
+  }
+
+  if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
+    preclean_weak_refs();
+  }
+}
+
+void ShenandoahTraversalGC::final_traversal_collection() {
+  _heap->make_parsable(true);
+
+  if (!_heap->cancelled_gc()) {
+#if defined(COMPILER2) || INCLUDE_JVMCI
+    DerivedPointerTable::clear();
+#endif
+    ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
+    uint nworkers = _heap->workers()->active_workers();
+    task_queues()->reserve(nworkers);
+
+    // Finish traversal
+    ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
+    ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
+
+    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
+    _heap->workers()->run_task(&task);
+#if defined(COMPILER2) || INCLUDE_JVMCI
+    DerivedPointerTable::update_pointers();
+#endif
+  }
+
+  if (!_heap->cancelled_gc() && _heap->process_references()) {
+    weak_refs_work();
+  }
+
+  if (!_heap->cancelled_gc() && _heap->unload_classes()) {
+    _heap->unload_classes_and_cleanup_tables(false);
+    fixup_roots();
+  }
+
+  if (!_heap->cancelled_gc()) {
+    assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
+    TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
+    TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
+
+    // No more marking expected
+    _heap->mark_complete_marking_context();
+
+    // Resize metaspace
+    MetaspaceGC::compute_new_size();
+
+    // Still good? We can now trash the cset, and make final verification
+    {
+      ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
+      ShenandoahHeapLocker lock(_heap->lock());
+
+      // Trash everything
+      // Clear immediate garbage regions.
+      size_t num_regions = _heap->num_regions();
+
+      ShenandoahHeapRegionSet* traversal_regions = traversal_set();
+      ShenandoahFreeSet* free_regions = _heap->free_set();
+      ShenandoahMarkingContext* const ctx = _heap->marking_context();
+      free_regions->clear();
+      for (size_t i = 0; i < num_regions; i++) {
+        ShenandoahHeapRegion* r = _heap->get_region(i);
+        bool not_allocated = ctx->top_at_mark_start(r) == r->top();
+
+        bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
+        if (r->is_humongous_start() && candidate) {
+          // Trash humongous.
+          HeapWord* humongous_obj = r->bottom() + ShenandoahBrooksPointer::word_size();
+          assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
+          r->make_trash_immediate();
+          while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
+            i++;
+            r = _heap->get_region(i);
+            assert(r->is_humongous_continuation(), "must be humongous continuation");
+            r->make_trash_immediate();
+          }
+        } else if (!r->is_empty() && candidate) {
+          // Trash regular.
+          assert(!r->is_humongous(), "handled above");
+          assert(!r->is_trash(), "must not already be trashed");
+          r->make_trash_immediate();
+        }
+      }
+      _heap->collection_set()->clear();
+      _heap->free_set()->rebuild();
+      reset();
+    }
+
+    assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
+    _heap->set_concurrent_traversal_in_progress(false);
+    assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
+
+    if (ShenandoahVerify) {
+      _heap->verifier()->verify_after_traversal();
+    }
+
+    if (VerifyAfterGC) {
+      Universe::verify();
+    }
+  }
+}
+
+class ShenandoahTraversalFixRootsClosure : public OopClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      if (!oopDesc::equals_raw(obj, forw)) {
+        RawAccess<IS_NOT_NULL>::oop_store(p, forw);
+      }
+    }
+  }
+
+public:
+  inline void do_oop(oop* p) { do_oop_work(p); }
+  inline void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
+private:
+  ShenandoahRootProcessor* _rp;
+
+public:
+  ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
+    AbstractGangTask("Shenandoah traversal fix roots"),
+    _rp(rp) {}
+
+  void work(uint worker_id) {
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+    ShenandoahTraversalFixRootsClosure cl;
+    MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
+    CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
+    _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
+  }
+};
+
+void ShenandoahTraversalGC::fixup_roots() {
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  DerivedPointerTable::clear();
+#endif
+  ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
+  ShenandoahTraversalFixRootsTask update_roots_task(&rp);
+  _heap->workers()->run_task(&update_roots_task);
+#if defined(COMPILER2) || INCLUDE_JVMCI
+  DerivedPointerTable::update_pointers();
+#endif
+}
+
+void ShenandoahTraversalGC::reset() {
+  _task_queues->clear();
+}
+
+ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
+  return _task_queues;
+}
+
+class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
+private:
+  ShenandoahHeap* const _heap;
+public:
+  ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
+  virtual bool should_return() { return _heap->cancelled_gc(); }
+};
+
+class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
+public:
+  void do_void() {
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+    ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
+    assert(sh->process_references(), "why else would we be here?");
+    ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
+    shenandoah_assert_rp_isalive_installed();
+    traversal_gc->main_loop((uint) 0, &terminator, true);
+  }
+};
+
+class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  Thread* _thread;
+  ShenandoahTraversalGC* _traversal_gc;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
+    _queue(q), _thread(Thread::current()),
+    _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
+    _mark_context(ShenandoahHeap::heap()->marking_context()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahTraversalWeakUpdateClosure : public OopClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p) {
+    // Cannot call maybe_update_with_forwarded, because on traversal-degen
+    // path the collection set is already dropped. Instead, do the unguarded store.
+    // TODO: This can be fixed after degen-traversal stops dropping cset.
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      shenandoah_assert_marked(p, obj);
+      RawAccess<IS_NOT_NULL>::oop_store(p, obj);
+    }
+  }
+
+public:
+  ShenandoahTraversalWeakUpdateClosure() {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  Thread* _thread;
+  ShenandoahTraversalGC* _traversal_gc;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
+          _queue(q), _thread(Thread::current()),
+          _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
+          _mark_context(ShenandoahHeap::heap()->marking_context()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  Thread* _thread;
+  ShenandoahTraversalGC* _traversal_gc;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    ShenandoahEvacOOMScope evac_scope;
+    _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
+          _queue(q), _thread(Thread::current()),
+          _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
+          _mark_context(ShenandoahHeap::heap()->marking_context()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
+private:
+  ShenandoahObjToScanQueue* _queue;
+  Thread* _thread;
+  ShenandoahTraversalGC* _traversal_gc;
+  ShenandoahMarkingContext* const _mark_context;
+
+  template <class T>
+  inline void do_oop_work(T* p) {
+    ShenandoahEvacOOMScope evac_scope;
+    _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
+  }
+
+public:
+  ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
+          _queue(q), _thread(Thread::current()),
+          _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
+          _mark_context(ShenandoahHeap::heap()->marking_context()) {}
+
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
+private:
+  ReferenceProcessor* _rp;
+
+public:
+  ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
+          AbstractGangTask("Precleaning task"),
+          _rp(rp) {}
+
+  void work(uint worker_id) {
+    assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+    ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
+    ShenandoahEvacOOMScope oom_evac_scope;
+
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+
+    ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
+
+    ShenandoahForwardedIsAliveClosure is_alive;
+    ShenandoahTraversalCancelledGCYieldClosure yield;
+    ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
+    ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
+    ResourceMark rm;
+    _rp->preclean_discovered_references(&is_alive, &keep_alive,
+                                        &complete_gc, &yield,
+                                        NULL);
+  }
+};
+
+void ShenandoahTraversalGC::preclean_weak_refs() {
+  // Pre-cleaning weak references before diving into STW makes sense at the
+  // end of concurrent mark. This will filter out the references which referents
+  // are alive. Note that ReferenceProcessor already filters out these on reference
+  // discovery, and the bulk of work is done here. This phase processes leftovers
+  // that missed the initial filtering, i.e. when referent was marked alive after
+  // reference was discovered by RP.
+
+  assert(_heap->process_references(), "sanity");
+  assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
+
+  // Shortcut if no references were discovered to avoid winding up threads.
+  ReferenceProcessor* rp = _heap->ref_processor();
+  if (!rp->has_discovered_references()) {
+    return;
+  }
+
+  ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
+
+  shenandoah_assert_rp_isalive_not_installed();
+  ShenandoahForwardedIsAliveClosure is_alive;
+  ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
+
+  assert(task_queues()->is_empty(), "Should be empty");
+
+  // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
+  // queues and other goodies. When upstream ReferenceProcessor starts supporting
+  // parallel precleans, we can extend this to more threads.
+  ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
+
+  WorkGang* workers = _heap->workers();
+  uint nworkers = workers->active_workers();
+  assert(nworkers == 1, "This code uses only a single worker");
+  task_queues()->reserve(nworkers);
+
+  ShenandoahTraversalPrecleanTask task(rp);
+  workers->run_task(&task);
+
+  assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
+}
+
+// Weak Reference Closures
+class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
+  uint _worker_id;
+  ShenandoahTaskTerminator* _terminator;
+  bool _reset_terminator;
+
+public:
+  ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
+    _worker_id(worker_id),
+    _terminator(t),
+    _reset_terminator(reset_terminator) {
+  }
+
+  void do_void() {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+    ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
+    assert(sh->process_references(), "why else would we be here?");
+    shenandoah_assert_rp_isalive_installed();
+
+    traversal_gc->main_loop(_worker_id, _terminator, false);
+
+    if (_reset_terminator) {
+      _terminator->reset_for_reuse();
+    }
+  }
+};
+
+class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
+  uint _worker_id;
+  ShenandoahTaskTerminator* _terminator;
+  bool _reset_terminator;
+
+public:
+  ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
+          _worker_id(worker_id),
+          _terminator(t),
+          _reset_terminator(reset_terminator) {
+  }
+
+  void do_void() {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+    ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
+    assert(sh->process_references(), "why else would we be here?");
+    shenandoah_assert_rp_isalive_installed();
+
+    ShenandoahEvacOOMScope evac_scope;
+    traversal_gc->main_loop(_worker_id, _terminator, false);
+
+    if (_reset_terminator) {
+      _terminator->reset_for_reuse();
+    }
+  }
+};
+
+void ShenandoahTraversalGC::weak_refs_work() {
+  assert(_heap->process_references(), "sanity");
+
+  ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
+
+  ShenandoahGCPhase phase(phase_root);
+
+  ReferenceProcessor* rp = _heap->ref_processor();
+
+  // NOTE: We cannot shortcut on has_discovered_references() here, because
+  // we will miss marking JNI Weak refs then, see implementation in
+  // ReferenceProcessor::process_discovered_references.
+  weak_refs_work_doit();
+
+  rp->verify_no_references_recorded();
+  assert(!rp->discovery_enabled(), "Post condition");
+
+}
+
+class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
+private:
+  AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
+  ShenandoahTaskTerminator* _terminator;
+
+public:
+  ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
+                                      ShenandoahTaskTerminator* t) :
+    AbstractGangTask("Process reference objects in parallel"),
+    _proc_task(proc_task),
+    _terminator(t) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahEvacOOMScope oom_evac_scope;
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
+
+    ShenandoahForwardedIsAliveClosure is_alive;
+    if (!heap->is_degenerated_gc_in_progress()) {
+      ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
+      _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
+    } else {
+      ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
+      _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
+    }
+  }
+};
+
+class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
+private:
+  WorkGang* _workers;
+
+public:
+  ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
+
+  // Executes a task using worker threads.
+  void execute(ProcessTask& task, uint ergo_workers) {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
+    ShenandoahPushWorkerQueuesScope scope(_workers,
+                                          traversal_gc->task_queues(),
+                                          ergo_workers,
+                                          /* do_check = */ false);
+    uint nworkers = _workers->active_workers();
+    traversal_gc->task_queues()->reserve(nworkers);
+    ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
+    ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
+    _workers->run_task(&proc_task_proxy);
+  }
+};
+
+void ShenandoahTraversalGC::weak_refs_work_doit() {
+  ReferenceProcessor* rp = _heap->ref_processor();
+
+  ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
+
+  shenandoah_assert_rp_isalive_not_installed();
+  ShenandoahForwardedIsAliveClosure is_alive;
+  ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
+
+  WorkGang* workers = _heap->workers();
+  uint nworkers = workers->active_workers();
+
+  rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
+  rp->set_active_mt_degree(nworkers);
+
+  assert(task_queues()->is_empty(), "Should be empty");
+
+  // complete_gc and keep_alive closures instantiated here are only needed for
+  // single-threaded path in RP. They share the queue 0 for tracking work, which
+  // simplifies implementation. Since RP may decide to call complete_gc several
+  // times, we need to be able to reuse the terminator.
+  uint serial_worker_id = 0;
+  ShenandoahTaskTerminator terminator(1, task_queues());
+  ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
+  ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
+
+  ShenandoahTraversalRefProcTaskExecutor executor(workers);
+
+  ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
+  if (!_heap->is_degenerated_gc_in_progress()) {
+    ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
+    rp->process_discovered_references(&is_alive, &keep_alive,
+                                      &complete_gc, &executor,
+                                      &pt);
+  } else {
+    ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
+    rp->process_discovered_references(&is_alive, &keep_alive,
+                                      &complete_gc, &executor,
+                                      &pt);
+  }
+
+  {
+    ShenandoahGCPhase phase(phase_process);
+    ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
+
+    // Process leftover weak oops (using parallel version)
+    ShenandoahTraversalWeakUpdateClosure cl;
+    WeakProcessor::weak_oops_do(workers, &is_alive, &cl, 1);
+
+    pt.print_all_references();
+
+    assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP
+
+#include "memory/allocation.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "runtime/thread.hpp"
+
+class ShenandoahTraversalGC : public CHeapObj<mtGC> {
+private:
+  ShenandoahHeap* const _heap;
+  ShenandoahObjToScanQueueSet* const _task_queues;
+  ShenandoahHeapRegionSet _traversal_set;
+
+public:
+  ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions);
+  ~ShenandoahTraversalGC();
+
+  ShenandoahHeapRegionSet* traversal_set() { return &_traversal_set; }
+
+  void reset();
+  void prepare();
+  void init_traversal_collection();
+  void concurrent_traversal_collection();
+  void final_traversal_collection();
+
+  template <class T, bool STRING_DEDUP, bool DEGEN>
+  inline void process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahMarkingContext* const mark_context);
+
+  bool check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield);
+
+  ShenandoahObjToScanQueueSet* task_queues();
+
+  void main_loop(uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield);
+
+private:
+  void prepare_regions();
+
+  template <class T>
+  void main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield);
+
+  void preclean_weak_refs();
+  void weak_refs_work();
+  void weak_refs_work_doit();
+
+  void fixup_roots();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+#include "gc/shenandoah/shenandoahStringDedup.hpp"
+#include "gc/shenandoah/shenandoahTraversalGC.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+template <class T, bool STRING_DEDUP, bool DEGEN>
+void ShenandoahTraversalGC::process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahMarkingContext* const mark_context) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    if (DEGEN) {
+      oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      if (!oopDesc::equals_raw(obj, forw)) {
+        // Update reference.
+        RawAccess<IS_NOT_NULL>::oop_store(p, forw);
+      }
+      obj = forw;
+    } else if (_heap->in_collection_set(obj)) {
+      oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      if (oopDesc::equals_raw(obj, forw)) {
+        forw = _heap->evacuate_object(obj, thread);
+      }
+      shenandoah_assert_forwarded_except(p, obj, _heap->cancelled_gc());
+      // Update reference.
+      _heap->atomic_compare_exchange_oop(forw, p, obj);
+      obj = forw;
+    }
+
+    shenandoah_assert_not_forwarded(p, obj);
+    shenandoah_assert_not_in_cset_except(p, obj, _heap->cancelled_gc());
+
+    if (mark_context->mark(obj)) {
+      bool succeeded = queue->push(ShenandoahMarkTask(obj));
+      assert(succeeded, "must succeed to push to task queue");
+
+      if (STRING_DEDUP && ShenandoahStringDedup::is_candidate(obj) && !_heap->cancelled_gc()) {
+        assert(ShenandoahStringDedup::is_enabled(), "Must be enabled");
+        // Only dealing with to-space string, so that we can avoid evac-oom protocol, which is costly here.
+        shenandoah_assert_not_in_cset(p, obj);
+        ShenandoahStringDedup::enqueue_candidate(obj);
+      }
+    }
+  }
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "jfr/jfrEvents.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcWhen.hpp"
+#include "gc/shenandoah/shenandoahAllocTracker.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+
+ShenandoahPhaseTimings::Phase ShenandoahGCPhase::_current_phase = ShenandoahGCPhase::_invalid_phase;
+
+ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) :
+  _heap(ShenandoahHeap::heap()),
+  _timer(_heap->gc_timer()),
+  _tracer(_heap->tracer()) {
+  assert(!ShenandoahGCPhase::is_valid_phase(ShenandoahGCPhase::current_phase()),
+    "No current GC phase");
+
+  _timer->register_gc_start();
+  _tracer->report_gc_start(cause, _timer->gc_start());
+  _heap->trace_heap(GCWhen::BeforeGC, _tracer);
+
+  _heap->shenandoah_policy()->record_cycle_start();
+  _heap->heuristics()->record_cycle_start();
+  _trace_cycle.initialize(_heap->cycle_memory_manager(), _heap->gc_cause(),
+          /* allMemoryPoolsAffected */    true,
+          /* recordGCBeginTime = */       true,
+          /* recordPreGCUsage = */        true,
+          /* recordPeakUsage = */         true,
+          /* recordPostGCUsage = */       true,
+          /* recordAccumulatedGCTime = */ true,
+          /* recordGCEndTime = */         true,
+          /* countCollection = */         true
+  );
+}
+
+ShenandoahGCSession::~ShenandoahGCSession() {
+  _heap->heuristics()->record_cycle_end();
+  _timer->register_gc_end();
+  _heap->trace_heap(GCWhen::AfterGC, _tracer);
+  _tracer->report_gc_end(_timer->gc_end(), _timer->time_partitions());
+  assert(!ShenandoahGCPhase::is_valid_phase(ShenandoahGCPhase::current_phase()),
+    "No current GC phase");
+}
+
+ShenandoahGCPauseMark::ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_type type) :
+  _heap(ShenandoahHeap::heap()), _gc_id_mark(gc_id), _svc_gc_mark(type), _is_gc_active_mark() {
+
+  // FIXME: It seems that JMC throws away level 0 events, which are the Shenandoah
+  // pause events. Create this pseudo level 0 event to push real events to level 1.
+  _heap->gc_timer()->register_gc_phase_start("Shenandoah", Ticks::now());
+  _trace_pause.initialize(_heap->stw_memory_manager(), _heap->gc_cause(),
+          /* allMemoryPoolsAffected */    true,
+          /* recordGCBeginTime = */       true,
+          /* recordPreGCUsage = */        false,
+          /* recordPeakUsage = */         false,
+          /* recordPostGCUsage = */       false,
+          /* recordAccumulatedGCTime = */ true,
+          /* recordGCEndTime = */         true,
+          /* countCollection = */         true
+  );
+
+  _heap->heuristics()->record_gc_start();
+}
+
+ShenandoahGCPauseMark::~ShenandoahGCPauseMark() {
+  _heap->gc_timer()->register_gc_phase_end(Ticks::now());
+  _heap->heuristics()->record_gc_end();
+}
+
+ShenandoahGCPhase::ShenandoahGCPhase(const ShenandoahPhaseTimings::Phase phase) :
+  _heap(ShenandoahHeap::heap()), _phase(phase) {
+  assert(Thread::current()->is_VM_thread() ||
+         Thread::current()->is_ConcurrentGC_thread(),
+        "Must be set by these threads");
+  _parent_phase = _current_phase;
+  _current_phase = phase;
+
+  _heap->phase_timings()->record_phase_start(_phase);
+}
+
+ShenandoahGCPhase::~ShenandoahGCPhase() {
+  _heap->phase_timings()->record_phase_end(_phase);
+  _current_phase = _parent_phase;
+}
+
+bool ShenandoahGCPhase::is_valid_phase(ShenandoahPhaseTimings::Phase phase) {
+  return phase >= 0 && phase < ShenandoahPhaseTimings::_num_phases;
+}
+
+bool ShenandoahGCPhase::is_root_work_phase() {
+  switch(current_phase()) {
+    case ShenandoahPhaseTimings::scan_roots:
+    case ShenandoahPhaseTimings::update_roots:
+    case ShenandoahPhaseTimings::init_evac:
+    case ShenandoahPhaseTimings::final_update_refs_roots:
+    case ShenandoahPhaseTimings::degen_gc_update_roots:
+    case ShenandoahPhaseTimings::init_traversal_gc_work:
+    case ShenandoahPhaseTimings::final_traversal_gc_work:
+    case ShenandoahPhaseTimings::final_traversal_update_roots:
+    case ShenandoahPhaseTimings::full_gc_roots:
+      return true;
+    default:
+      return false;
+  }
+}
+
+ShenandoahAllocTrace::ShenandoahAllocTrace(size_t words_size, ShenandoahAllocRequest::Type alloc_type) {
+  if (ShenandoahAllocationTrace) {
+    _start = os::elapsedTime();
+    _size = words_size;
+    _alloc_type = alloc_type;
+  } else {
+    _start = 0;
+    _size = 0;
+    _alloc_type = ShenandoahAllocRequest::Type(0);
+  }
+}
+
+ShenandoahAllocTrace::~ShenandoahAllocTrace() {
+  if (ShenandoahAllocationTrace) {
+    double stop = os::elapsedTime();
+    double duration_sec = stop - _start;
+    double duration_us = duration_sec * 1000000;
+    ShenandoahAllocTracker* tracker = ShenandoahHeap::heap()->alloc_tracker();
+    assert(tracker != NULL, "Must be");
+    tracker->record_alloc_latency(_size, _alloc_type, duration_us);
+    if (duration_us > ShenandoahAllocationStallThreshold) {
+      log_warning(gc)("Allocation stall: %.0f us (threshold: " INTX_FORMAT " us)",
+                      duration_us, ShenandoahAllocationStallThreshold);
+    }
+  }
+}
+
+ShenandoahWorkerSession::ShenandoahWorkerSession(uint worker_id) : _worker_id(worker_id) {
+  Thread* thr = Thread::current();
+  assert(ShenandoahThreadLocalData::worker_id(thr) == ShenandoahThreadLocalData::INVALID_WORKER_ID, "Already set");
+  ShenandoahThreadLocalData::set_worker_id(thr, worker_id);
+}
+
+ShenandoahConcurrentWorkerSession::~ShenandoahConcurrentWorkerSession() {
+  _event.commit(GCId::current(), ShenandoahPhaseTimings::phase_name(ShenandoahGCPhase::current_phase()));
+}
+
+ShenandoahParallelWorkerSession::~ShenandoahParallelWorkerSession() {
+  _event.commit(GCId::current(), _worker_id, ShenandoahPhaseTimings::phase_name(ShenandoahGCPhase::current_phase()));
+}
+ShenandoahWorkerSession::~ShenandoahWorkerSession() {
+#ifdef ASSERT
+  Thread* thr = Thread::current();
+  assert(ShenandoahThreadLocalData::worker_id(thr) != ShenandoahThreadLocalData::INVALID_WORKER_ID, "Must be set");
+  ShenandoahThreadLocalData::set_worker_id(thr, ShenandoahThreadLocalData::INVALID_WORKER_ID);
+#endif
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAHUTILS_HPP
+#define SHARE_VM_GC_SHENANDOAHUTILS_HPP
+
+#include "jfr/jfrEvents.hpp"
+
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/shared/gcVMOperations.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/vmThread.hpp"
+#include "runtime/vmOperations.hpp"
+#include "services/memoryService.hpp"
+
+class GCTimer;
+class GCTracer;
+
+class ShenandoahGCSession : public StackObj {
+private:
+  ShenandoahHeap* const _heap;
+  GCTimer*  const _timer;
+  GCTracer* const _tracer;
+
+  TraceMemoryManagerStats _trace_cycle;
+public:
+  ShenandoahGCSession(GCCause::Cause cause);
+  ~ShenandoahGCSession();
+};
+
+class ShenandoahGCPhase : public StackObj {
+private:
+  static const ShenandoahPhaseTimings::Phase _invalid_phase = ShenandoahPhaseTimings::_num_phases;
+  static ShenandoahPhaseTimings::Phase       _current_phase;
+
+  ShenandoahHeap* const _heap;
+  const ShenandoahPhaseTimings::Phase   _phase;
+  ShenandoahPhaseTimings::Phase         _parent_phase;
+public:
+  ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase);
+  ~ShenandoahGCPhase();
+
+  static ShenandoahPhaseTimings::Phase current_phase() { return _current_phase; }
+
+  static bool is_valid_phase(ShenandoahPhaseTimings::Phase phase);
+  static bool is_current_phase_valid() { return is_valid_phase(current_phase()); }
+  static bool is_root_work_phase();
+};
+
+// Aggregates all the things that should happen before/after the pause.
+class ShenandoahGCPauseMark : public StackObj {
+private:
+  ShenandoahHeap* const _heap;
+  const GCIdMark                _gc_id_mark;
+  const SvcGCMarker             _svc_gc_mark;
+  const IsGCActiveMark          _is_gc_active_mark;
+  TraceMemoryManagerStats       _trace_pause;
+
+public:
+  ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_type type);
+  ~ShenandoahGCPauseMark();
+};
+
+class ShenandoahAllocTrace : public StackObj {
+private:
+  double _start;
+  size_t _size;
+  ShenandoahAllocRequest::Type _alloc_type;
+public:
+  ShenandoahAllocTrace(size_t words_size, ShenandoahAllocRequest::Type alloc_type);
+  ~ShenandoahAllocTrace();
+};
+
+class ShenandoahSafepoint : public AllStatic {
+public:
+  // check if Shenandoah GC safepoint is in progress
+  static inline bool is_at_shenandoah_safepoint() {
+    if (!SafepointSynchronize::is_at_safepoint()) return false;
+
+    VM_Operation* vm_op = VMThread::vm_operation();
+    if (vm_op == NULL) return false;
+
+    VM_Operation::VMOp_Type type = vm_op->type();
+    return type == VM_Operation::VMOp_ShenandoahInitMark ||
+           type == VM_Operation::VMOp_ShenandoahFinalMarkStartEvac ||
+           type == VM_Operation::VMOp_ShenandoahFinalEvac ||
+           type == VM_Operation::VMOp_ShenandoahInitTraversalGC ||
+           type == VM_Operation::VMOp_ShenandoahFinalTraversalGC ||
+           type == VM_Operation::VMOp_ShenandoahInitUpdateRefs ||
+           type == VM_Operation::VMOp_ShenandoahFinalUpdateRefs ||
+           type == VM_Operation::VMOp_ShenandoahFullGC ||
+           type == VM_Operation::VMOp_ShenandoahDegeneratedGC;
+  }
+};
+
+class ShenandoahWorkerSession : public StackObj {
+protected:
+  uint _worker_id;
+
+  ShenandoahWorkerSession(uint worker_id);
+  ~ShenandoahWorkerSession();
+public:
+  static inline uint worker_id() {
+    Thread* thr = Thread::current();
+    uint id = ShenandoahThreadLocalData::worker_id(thr);
+    assert(id != ShenandoahThreadLocalData::INVALID_WORKER_ID, "Worker session has not been created");
+    return id;
+  }
+};
+
+class ShenandoahConcurrentWorkerSession : public ShenandoahWorkerSession {
+private:
+  EventGCPhaseConcurrent _event;
+
+public:
+  ShenandoahConcurrentWorkerSession(uint worker_id) : ShenandoahWorkerSession(worker_id) { }
+  ~ShenandoahConcurrentWorkerSession();
+};
+
+class ShenandoahParallelWorkerSession : public ShenandoahWorkerSession {
+private:
+  EventGCPhaseParallel _event;
+
+public:
+  ShenandoahParallelWorkerSession(uint worker_id) : ShenandoahWorkerSession(worker_id) { }
+  ~ShenandoahParallelWorkerSession();
+};
+
+class ShenandoahSuspendibleThreadSetJoiner {
+private:
+  SuspendibleThreadSetJoiner _joiner;
+public:
+  ShenandoahSuspendibleThreadSetJoiner(bool active = true) : _joiner(active) {
+    assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be joined before evac scope");
+  }
+  ~ShenandoahSuspendibleThreadSetJoiner() {
+    assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be left after evac scope");
+  }
+};
+
+class ShenandoahSuspendibleThreadSetLeaver {
+private:
+  SuspendibleThreadSetLeaver _leaver;
+public:
+  ShenandoahSuspendibleThreadSetLeaver(bool active = true) : _leaver(active) {
+    assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be left after evac scope");
+  }
+  ~ShenandoahSuspendibleThreadSetLeaver() {
+    assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be joined before evac scope");
+  }
+};
+
+#endif // SHARE_VM_GC_SHENANDOAHUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVMOperations.hpp"
+
+bool VM_ShenandoahReferenceOperation::doit_prologue() {
+  Heap_lock->lock();
+  return true;
+}
+
+void VM_ShenandoahReferenceOperation::doit_epilogue() {
+  if (Universe::has_reference_pending_list()) {
+    Heap_lock->notify_all();
+  }
+  Heap_lock->unlock();
+}
+
+void VM_ShenandoahInitMark::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_init_mark();
+}
+
+void VM_ShenandoahFinalMarkStartEvac::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_final_mark();
+}
+
+void VM_ShenandoahFinalEvac::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_final_evac();
+}
+
+void VM_ShenandoahFullGC::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::FULL);
+  ShenandoahHeap::heap()->entry_full(_gc_cause);
+}
+
+void VM_ShenandoahDegeneratedGC::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_degenerated(_point);
+}
+
+void VM_ShenandoahInitTraversalGC::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_init_traversal();
+}
+
+void VM_ShenandoahFinalTraversalGC::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_final_traversal();
+}
+
+void VM_ShenandoahInitUpdateRefs::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_init_updaterefs();
+}
+
+void VM_ShenandoahFinalUpdateRefs::doit() {
+  ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
+  ShenandoahHeap::heap()->entry_final_updaterefs();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
+#define SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
+
+#include "gc/shared/gcVMOperations.hpp"
+
+// VM_operations for the Shenandoah Collector.
+//
+// VM_ShenandoahOperation
+//   - VM_ShenandoahInitMark: initiate concurrent marking
+//   - VM_ShenandoahReferenceOperation:
+//       - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation
+//       - VM_ShenandoahFinalEvac: finish concurrent evacuation
+//       - VM_ShenandoahInitUpdateRefs: initiate update references
+//       - VM_ShenandoahFinalUpdateRefs: finish up update references
+//       - VM_ShenandoahFullGC: do full GC
+//       - VM_ShenandoahInitTraversalGC: init traversal GC
+//       - VM_ShenandoahFinalTraversalGC: finish traversal GC
+
+class VM_ShenandoahOperation : public VM_Operation {
+protected:
+  uint         _gc_id;
+public:
+  VM_ShenandoahOperation() : _gc_id(GCId::current()) {};
+};
+
+class VM_ShenandoahReferenceOperation : public VM_ShenandoahOperation {
+public:
+  VM_ShenandoahReferenceOperation() : VM_ShenandoahOperation() {};
+  bool doit_prologue();
+  void doit_epilogue();
+};
+
+class VM_ShenandoahInitMark: public VM_ShenandoahOperation {
+public:
+  VM_ShenandoahInitMark() : VM_ShenandoahOperation() {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitMark; }
+  const char* name()             const { return "Shenandoah Init Marking"; }
+  virtual void doit();
+};
+
+class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahReferenceOperation {
+public:
+  VM_ShenandoahFinalMarkStartEvac() : VM_ShenandoahReferenceOperation() {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalMarkStartEvac; }
+  const char* name()             const { return "Shenandoah Final Mark and Start Evacuation"; }
+  virtual  void doit();
+};
+
+class VM_ShenandoahFinalEvac: public VM_ShenandoahOperation {
+public:
+  VM_ShenandoahFinalEvac() : VM_ShenandoahOperation() {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalEvac; }
+  const char* name()             const { return "Shenandoah Final Evacuation"; }
+  virtual  void doit();
+};
+
+class VM_ShenandoahDegeneratedGC: public VM_ShenandoahReferenceOperation {
+private:
+  // Really the ShenandoahHeap::ShenandoahDegenerationPoint, but casted to int here
+  // in order to avoid dependency on ShenandoahHeap
+  int _point;
+public:
+  VM_ShenandoahDegeneratedGC(int point) : VM_ShenandoahReferenceOperation(), _point(point) {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahDegeneratedGC; }
+  const char* name()             const { return "Shenandoah Degenerated GC"; }
+  virtual  void doit();
+};
+
+class VM_ShenandoahFullGC : public VM_ShenandoahReferenceOperation {
+private:
+  GCCause::Cause _gc_cause;
+public:
+  VM_ShenandoahFullGC(GCCause::Cause gc_cause) : VM_ShenandoahReferenceOperation(), _gc_cause(gc_cause) {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFullGC; }
+  const char* name()             const { return "Shenandoah Full GC"; }
+  virtual void doit();
+};
+
+class VM_ShenandoahInitTraversalGC: public VM_ShenandoahOperation {
+public:
+  VM_ShenandoahInitTraversalGC() : VM_ShenandoahOperation() {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitTraversalGC; }
+  const char* name()             const { return "Shenandoah Init Traversal Collection"; }
+  virtual void doit();
+};
+
+class VM_ShenandoahFinalTraversalGC: public VM_ShenandoahReferenceOperation {
+public:
+  VM_ShenandoahFinalTraversalGC() : VM_ShenandoahReferenceOperation() {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalTraversalGC; }
+  const char* name()             const { return "Shenandoah Final Traversal Collection"; }
+  virtual void doit();
+};
+
+class VM_ShenandoahInitUpdateRefs: public VM_ShenandoahOperation {
+public:
+  VM_ShenandoahInitUpdateRefs() : VM_ShenandoahOperation() {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitUpdateRefs; }
+  const char* name()             const { return "Shenandoah Init Update References"; }
+  virtual void doit();
+};
+
+class VM_ShenandoahFinalUpdateRefs: public VM_ShenandoahOperation {
+public:
+  VM_ShenandoahFinalUpdateRefs() : VM_ShenandoahOperation() {};
+  VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalUpdateRefs; }
+  const char* name()             const { return "Shenandoah Final Update References"; }
+  virtual void doit();
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahBrooksPointer.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVerifier.hpp"
+#include "memory/allocation.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/resourceArea.hpp"
+
+// Avoid name collision on verify_oop (defined in macroAssembler_arm.hpp)
+#ifdef verify_oop
+#undef verify_oop
+#endif
+
+class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
+private:
+  const char* _phase;
+  ShenandoahVerifier::VerifyOptions _options;
+  ShenandoahVerifierStack* _stack;
+  ShenandoahHeap* _heap;
+  MarkBitMap* _map;
+  ShenandoahLivenessData* _ld;
+  void* _interior_loc;
+  oop _loc;
+
+public:
+  ShenandoahVerifyOopClosure(ShenandoahVerifierStack* stack, MarkBitMap* map, ShenandoahLivenessData* ld,
+                             const char* phase, ShenandoahVerifier::VerifyOptions options) :
+    _phase(phase),
+    _options(options),
+    _stack(stack),
+    _heap(ShenandoahHeap::heap()),
+    _map(map),
+    _ld(ld),
+    _interior_loc(NULL),
+    _loc(NULL) { }
+
+private:
+  void check(ShenandoahAsserts::SafeLevel level, oop obj, bool test, const char* label) {
+    if (!test) {
+      ShenandoahAsserts::print_failure(level, obj, _interior_loc, _loc, _phase, label, __FILE__, __LINE__);
+    }
+  }
+
+  template <class T>
+  void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+
+      // Single threaded verification can use faster non-atomic stack and bitmap
+      // methods.
+      //
+      // For performance reasons, only fully verify non-marked field values.
+      // We are here when the host object for *p is already marked.
+
+      HeapWord* addr = (HeapWord*) obj;
+      if (_map->par_mark(addr)) {
+        verify_oop_at(p, obj);
+        _stack->push(ShenandoahVerifierTask(obj));
+      }
+    }
+  }
+
+  void verify_oop(oop obj) {
+    // Perform consistency checks with gradually decreasing safety level. This guarantees
+    // that failure report would not try to touch something that was not yet verified to be
+    // safe to process.
+
+    check(ShenandoahAsserts::_safe_unknown, obj, _heap->is_in(obj),
+              "oop must be in heap");
+    check(ShenandoahAsserts::_safe_unknown, obj, check_obj_alignment(obj),
+              "oop must be aligned");
+
+    ShenandoahHeapRegion *obj_reg = _heap->heap_region_containing(obj);
+    Klass* obj_klass = obj->klass_or_null();
+
+    // Verify that obj is not in dead space:
+    {
+      // Do this before touching obj->size()
+      check(ShenandoahAsserts::_safe_unknown, obj, obj_klass != NULL,
+             "Object klass pointer should not be NULL");
+      check(ShenandoahAsserts::_safe_unknown, obj, Metaspace::contains(obj_klass),
+             "Object klass pointer must go to metaspace");
+
+      HeapWord *obj_addr = (HeapWord *) obj;
+      check(ShenandoahAsserts::_safe_unknown, obj, obj_addr < obj_reg->top(),
+             "Object start should be within the region");
+
+      if (!obj_reg->is_humongous()) {
+        check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->size()) <= obj_reg->top(),
+               "Object end should be within the region");
+      } else {
+        size_t humongous_start = obj_reg->region_number();
+        size_t humongous_end = humongous_start + (obj->size() >> ShenandoahHeapRegion::region_size_words_shift());
+        for (size_t idx = humongous_start + 1; idx < humongous_end; idx++) {
+          check(ShenandoahAsserts::_safe_unknown, obj, _heap->get_region(idx)->is_humongous_continuation(),
+                 "Humongous object is in continuation that fits it");
+        }
+      }
+
+      // ------------ obj is safe at this point --------------
+
+      check(ShenandoahAsserts::_safe_oop, obj, obj_reg->is_active(),
+            "Object should be in active region");
+
+      switch (_options._verify_liveness) {
+        case ShenandoahVerifier::_verify_liveness_disable:
+          // skip
+          break;
+        case ShenandoahVerifier::_verify_liveness_complete:
+          Atomic::add(obj->size() + ShenandoahBrooksPointer::word_size(), &_ld[obj_reg->region_number()]);
+          // fallthrough for fast failure for un-live regions:
+        case ShenandoahVerifier::_verify_liveness_conservative:
+          check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(),
+                   "Object must belong to region with live data");
+          break;
+        default:
+          assert(false, "Unhandled liveness verification");
+      }
+    }
+
+    oop fwd = (oop) ShenandoahBrooksPointer::get_raw_unchecked(obj);
+
+    ShenandoahHeapRegion* fwd_reg = NULL;
+
+    if (!oopDesc::equals_raw(obj, fwd)) {
+      check(ShenandoahAsserts::_safe_oop, obj, _heap->is_in(fwd),
+             "Forwardee must be in heap");
+      check(ShenandoahAsserts::_safe_oop, obj, !CompressedOops::is_null(fwd),
+             "Forwardee is set");
+      check(ShenandoahAsserts::_safe_oop, obj, check_obj_alignment(fwd),
+             "Forwardee must be aligned");
+
+      // Do this before touching fwd->size()
+      Klass* fwd_klass = fwd->klass_or_null();
+      check(ShenandoahAsserts::_safe_oop, obj, fwd_klass != NULL,
+             "Forwardee klass pointer should not be NULL");
+      check(ShenandoahAsserts::_safe_oop, obj, Metaspace::contains(fwd_klass),
+             "Forwardee klass pointer must go to metaspace");
+      check(ShenandoahAsserts::_safe_oop, obj, obj_klass == fwd_klass,
+             "Forwardee klass pointer must go to metaspace");
+
+      fwd_reg = _heap->heap_region_containing(fwd);
+
+      // Verify that forwardee is not in the dead space:
+      check(ShenandoahAsserts::_safe_oop, obj, !fwd_reg->is_humongous(),
+             "Should have no humongous forwardees");
+
+      HeapWord *fwd_addr = (HeapWord *) fwd;
+      check(ShenandoahAsserts::_safe_oop, obj, fwd_addr < fwd_reg->top(),
+             "Forwardee start should be within the region");
+      check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->size()) <= fwd_reg->top(),
+             "Forwardee end should be within the region");
+
+      oop fwd2 = (oop) ShenandoahBrooksPointer::get_raw_unchecked(fwd);
+      check(ShenandoahAsserts::_safe_oop, obj, oopDesc::equals_raw(fwd, fwd2),
+             "Double forwarding");
+    } else {
+      fwd_reg = obj_reg;
+    }
+
+    // ------------ obj and fwd are safe at this point --------------
+
+    switch (_options._verify_marked) {
+      case ShenandoahVerifier::_verify_marked_disable:
+        // skip
+        break;
+      case ShenandoahVerifier::_verify_marked_incomplete:
+        check(ShenandoahAsserts::_safe_all, obj, _heap->marking_context()->is_marked(obj),
+               "Must be marked in incomplete bitmap");
+        break;
+      case ShenandoahVerifier::_verify_marked_complete:
+        check(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj),
+               "Must be marked in complete bitmap");
+        break;
+      default:
+        assert(false, "Unhandled mark verification");
+    }
+
+    switch (_options._verify_forwarded) {
+      case ShenandoahVerifier::_verify_forwarded_disable:
+        // skip
+        break;
+      case ShenandoahVerifier::_verify_forwarded_none: {
+        check(ShenandoahAsserts::_safe_all, obj, oopDesc::equals_raw(obj, fwd),
+               "Should not be forwarded");
+        break;
+      }
+      case ShenandoahVerifier::_verify_forwarded_allow: {
+        if (!oopDesc::equals_raw(obj, fwd)) {
+          check(ShenandoahAsserts::_safe_all, obj, obj_reg != fwd_reg,
+                 "Forwardee should be in another region");
+        }
+        break;
+      }
+      default:
+        assert(false, "Unhandled forwarding verification");
+    }
+
+    switch (_options._verify_cset) {
+      case ShenandoahVerifier::_verify_cset_disable:
+        // skip
+        break;
+      case ShenandoahVerifier::_verify_cset_none:
+        check(ShenandoahAsserts::_safe_all, obj, !_heap->in_collection_set(obj),
+               "Should not have references to collection set");
+        break;
+      case ShenandoahVerifier::_verify_cset_forwarded:
+        if (_heap->in_collection_set(obj)) {
+          check(ShenandoahAsserts::_safe_all, obj, !oopDesc::equals_raw(obj, fwd),
+                 "Object in collection set, should have forwardee");
+        }
+        break;
+      default:
+        assert(false, "Unhandled cset verification");
+    }
+
+  }
+
+public:
+  /**
+   * Verify object with known interior reference.
+   * @param p interior reference where the object is referenced from; can be off-heap
+   * @param obj verified object
+   */
+  template <class T>
+  void verify_oop_at(T* p, oop obj) {
+    _interior_loc = p;
+    verify_oop(obj);
+    _interior_loc = NULL;
+  }
+
+  /**
+   * Verify object without known interior reference.
+   * Useful when picking up the object at known offset in heap,
+   * but without knowing what objects reference it.
+   * @param obj verified object
+   */
+  void verify_oop_standalone(oop obj) {
+    _interior_loc = NULL;
+    verify_oop(obj);
+    _interior_loc = NULL;
+  }
+
+  /**
+   * Verify oop fields from this object.
+   * @param obj host object for verified fields
+   */
+  void verify_oops_from(oop obj) {
+    _loc = obj;
+    obj->oop_iterate(this);
+    _loc = NULL;
+  }
+
+  virtual void do_oop(oop* p) { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure {
+private:
+  size_t _used, _committed, _garbage;
+public:
+  ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0) {};
+
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    _used += r->used();
+    _garbage += r->garbage();
+    _committed += r->is_committed() ? ShenandoahHeapRegion::region_size_bytes() : 0;
+  }
+
+  size_t used() { return _used; }
+  size_t committed() { return _committed; }
+  size_t garbage() { return _garbage; }
+};
+
+class ShenandoahVerifyHeapRegionClosure : public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahHeap* _heap;
+  const char* _phase;
+  ShenandoahVerifier::VerifyRegions _regions;
+public:
+  ShenandoahVerifyHeapRegionClosure(const char* phase, ShenandoahVerifier::VerifyRegions regions) :
+    _heap(ShenandoahHeap::heap()),
+    _phase(phase),
+    _regions(regions) {};
+
+  void print_failure(ShenandoahHeapRegion* r, const char* label) {
+    ResourceMark rm;
+
+    ShenandoahMessageBuffer msg("Shenandoah verification failed; %s: %s\n\n", _phase, label);
+
+    stringStream ss;
+    r->print_on(&ss);
+    msg.append("%s", ss.as_string());
+
+    report_vm_error(__FILE__, __LINE__, msg.buffer());
+  }
+
+  void verify(ShenandoahHeapRegion* r, bool test, const char* msg) {
+    if (!test) {
+      print_failure(r, msg);
+    }
+  }
+
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    switch (_regions) {
+      case ShenandoahVerifier::_verify_regions_disable:
+        break;
+      case ShenandoahVerifier::_verify_regions_notrash:
+        verify(r, !r->is_trash(),
+               "Should not have trash regions");
+        break;
+      case ShenandoahVerifier::_verify_regions_nocset:
+        verify(r, !r->is_cset(),
+               "Should not have cset regions");
+        break;
+      case ShenandoahVerifier::_verify_regions_notrash_nocset:
+        verify(r, !r->is_trash(),
+               "Should not have trash regions");
+        verify(r, !r->is_cset(),
+               "Should not have cset regions");
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+
+    verify(r, r->capacity() == ShenandoahHeapRegion::region_size_bytes(),
+           "Capacity should match region size");
+
+    verify(r, r->bottom() <= r->top(),
+           "Region top should not be less than bottom");
+
+    verify(r, r->bottom() <= _heap->marking_context()->top_at_mark_start(r),
+           "Region TAMS should not be less than bottom");
+
+    verify(r, _heap->marking_context()->top_at_mark_start(r) <= r->top(),
+           "Complete TAMS should not be larger than top");
+
+    verify(r, r->get_live_data_bytes() <= r->capacity(),
+           "Live data cannot be larger than capacity");
+
+    verify(r, r->garbage() <= r->capacity(),
+           "Garbage cannot be larger than capacity");
+
+    verify(r, r->used() <= r->capacity(),
+           "Used cannot be larger than capacity");
+
+    verify(r, r->get_shared_allocs() <= r->capacity(),
+           "Shared alloc count should not be larger than capacity");
+
+    verify(r, r->get_tlab_allocs() <= r->capacity(),
+           "TLAB alloc count should not be larger than capacity");
+
+    verify(r, r->get_gclab_allocs() <= r->capacity(),
+           "GCLAB alloc count should not be larger than capacity");
+
+    verify(r, r->get_shared_allocs() + r->get_tlab_allocs() + r->get_gclab_allocs() == r->used(),
+           "Accurate accounting: shared + TLAB + GCLAB = used");
+
+    verify(r, !r->is_empty() || !r->has_live(),
+           "Empty regions should not have live data");
+
+    verify(r, r->is_cset() == _heap->collection_set()->is_in(r),
+           "Transitional: region flags and collection set agree");
+
+    verify(r, r->is_empty() || r->seqnum_first_alloc() != 0,
+           "Non-empty regions should have first seqnum set");
+
+    verify(r, r->is_empty() || (r->seqnum_first_alloc_mutator() != 0 || r->seqnum_first_alloc_gc() != 0),
+           "Non-empty regions should have first seqnum set to either GC or mutator");
+
+    verify(r, r->is_empty() || r->seqnum_last_alloc() != 0,
+           "Non-empty regions should have last seqnum set");
+
+    verify(r, r->is_empty() || (r->seqnum_last_alloc_mutator() != 0 || r->seqnum_last_alloc_gc() != 0),
+           "Non-empty regions should have last seqnum set to either GC or mutator");
+
+    verify(r, r->seqnum_first_alloc() <= r->seqnum_last_alloc(),
+           "First seqnum should not be greater than last timestamp");
+
+    verify(r, r->seqnum_first_alloc_mutator() <= r->seqnum_last_alloc_mutator(),
+           "First mutator seqnum should not be greater than last seqnum");
+
+    verify(r, r->seqnum_first_alloc_gc() <= r->seqnum_last_alloc_gc(),
+           "First GC seqnum should not be greater than last seqnum");
+  }
+};
+
+class ShenandoahVerifierReachableTask : public AbstractGangTask {
+private:
+  const char* _label;
+  ShenandoahRootProcessor* _rp;
+  ShenandoahVerifier::VerifyOptions _options;
+  ShenandoahHeap* _heap;
+  ShenandoahLivenessData* _ld;
+  MarkBitMap* _bitmap;
+  volatile size_t _processed;
+
+public:
+  ShenandoahVerifierReachableTask(MarkBitMap* bitmap,
+                                  ShenandoahLivenessData* ld,
+                                  ShenandoahRootProcessor* rp,
+                                  const char* label,
+                                  ShenandoahVerifier::VerifyOptions options) :
+    AbstractGangTask("Shenandoah Parallel Verifier Reachable Task"),
+    _label(label),
+    _rp(rp),
+    _options(options),
+    _heap(ShenandoahHeap::heap()),
+    _ld(ld),
+    _bitmap(bitmap),
+    _processed(0) {};
+
+  size_t processed() {
+    return _processed;
+  }
+
+  virtual void work(uint worker_id) {
+    ResourceMark rm;
+    ShenandoahVerifierStack stack;
+
+    // On level 2, we need to only check the roots once.
+    // On level 3, we want to check the roots, and seed the local stack.
+    // It is a lesser evil to accept multiple root scans at level 3, because
+    // extended parallelism would buy us out.
+    if (((ShenandoahVerifyLevel == 2) && (worker_id == 0))
+        || (ShenandoahVerifyLevel >= 3)) {
+        ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld,
+                                      ShenandoahMessageBuffer("%s, Roots", _label),
+                                      _options);
+        _rp->process_all_roots_slow(&cl);
+    }
+
+    size_t processed = 0;
+
+    if (ShenandoahVerifyLevel >= 3) {
+      ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld,
+                                    ShenandoahMessageBuffer("%s, Reachable", _label),
+                                    _options);
+      while (!stack.is_empty()) {
+        processed++;
+        ShenandoahVerifierTask task = stack.pop();
+        cl.verify_oops_from(task.obj());
+      }
+    }
+
+    Atomic::add(processed, &_processed);
+  }
+};
+
+class ShenandoahVerifierMarkedRegionTask : public AbstractGangTask {
+private:
+  const char* _label;
+  ShenandoahVerifier::VerifyOptions _options;
+  ShenandoahHeap *_heap;
+  MarkBitMap* _bitmap;
+  ShenandoahLivenessData* _ld;
+  volatile size_t _claimed;
+  volatile size_t _processed;
+
+public:
+  ShenandoahVerifierMarkedRegionTask(MarkBitMap* bitmap,
+                                     ShenandoahLivenessData* ld,
+                                     const char* label,
+                                     ShenandoahVerifier::VerifyOptions options) :
+          AbstractGangTask("Shenandoah Parallel Verifier Marked Region"),
+          _label(label),
+          _options(options),
+          _heap(ShenandoahHeap::heap()),
+          _bitmap(bitmap),
+          _ld(ld),
+          _claimed(0),
+          _processed(0) {};
+
+  size_t processed() {
+    return _processed;
+  }
+
+  virtual void work(uint worker_id) {
+    ShenandoahVerifierStack stack;
+    ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld,
+                                  ShenandoahMessageBuffer("%s, Marked", _label),
+                                  _options);
+
+    while (true) {
+      size_t v = Atomic::add(1u, &_claimed) - 1;
+      if (v < _heap->num_regions()) {
+        ShenandoahHeapRegion* r = _heap->get_region(v);
+        if (!r->is_humongous() && !r->is_trash()) {
+          work_regular(r, stack, cl);
+        } else if (r->is_humongous_start()) {
+          work_humongous(r, stack, cl);
+        }
+      } else {
+        break;
+      }
+    }
+  }
+
+  virtual void work_humongous(ShenandoahHeapRegion *r, ShenandoahVerifierStack& stack, ShenandoahVerifyOopClosure& cl) {
+    size_t processed = 0;
+    HeapWord* obj = r->bottom() + ShenandoahBrooksPointer::word_size();
+    if (_heap->complete_marking_context()->is_marked((oop)obj)) {
+      verify_and_follow(obj, stack, cl, &processed);
+    }
+    Atomic::add(processed, &_processed);
+  }
+
+  virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) {
+    size_t processed = 0;
+    MarkBitMap* mark_bit_map = _heap->complete_marking_context()->mark_bit_map();
+    HeapWord* tams = _heap->complete_marking_context()->top_at_mark_start(r);
+
+    // Bitmaps, before TAMS
+    if (tams > r->bottom()) {
+      HeapWord* start = r->bottom() + ShenandoahBrooksPointer::word_size();
+      HeapWord* addr = mark_bit_map->get_next_marked_addr(start, tams);
+
+      while (addr < tams) {
+        verify_and_follow(addr, stack, cl, &processed);
+        addr += ShenandoahBrooksPointer::word_size();
+        if (addr < tams) {
+          addr = mark_bit_map->get_next_marked_addr(addr, tams);
+        }
+      }
+    }
+
+    // Size-based, after TAMS
+    {
+      HeapWord* limit = r->top();
+      HeapWord* addr = tams + ShenandoahBrooksPointer::word_size();
+
+      while (addr < limit) {
+        verify_and_follow(addr, stack, cl, &processed);
+        addr += oop(addr)->size() + ShenandoahBrooksPointer::word_size();
+      }
+    }
+
+    Atomic::add(processed, &_processed);
+  }
+
+  void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) {
+    if (!_bitmap->par_mark(addr)) return;
+
+    // Verify the object itself:
+    oop obj = oop(addr);
+    cl.verify_oop_standalone(obj);
+
+    // Verify everything reachable from that object too, hopefully realizing
+    // everything was already marked, and never touching further:
+    cl.verify_oops_from(obj);
+    (*processed)++;
+
+    while (!stack.is_empty()) {
+      ShenandoahVerifierTask task = stack.pop();
+      cl.verify_oops_from(task.obj());
+      (*processed)++;
+    }
+  }
+};
+
+class VerifyThreadGCState : public ThreadClosure {
+private:
+  const char* _label;
+  char _expected;
+
+public:
+  VerifyThreadGCState(const char* label, char expected) : _expected(expected) {}
+  void do_thread(Thread* t) {
+    char actual = ShenandoahThreadLocalData::gc_state(t);
+    if (actual != _expected) {
+      fatal("%s: Thread %s: expected gc-state %d, actual %d", _label, t->name(), _expected, actual);
+    }
+  }
+};
+
+void ShenandoahVerifier::verify_at_safepoint(const char *label,
+                                             VerifyForwarded forwarded, VerifyMarked marked,
+                                             VerifyCollectionSet cset,
+                                             VerifyLiveness liveness, VerifyRegions regions,
+                                             VerifyGCState gcstate) {
+  guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens");
+  guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize");
+
+  // Avoid side-effect of changing workers' active thread count, but bypass concurrent/parallel protocol check
+  ShenandoahPushWorkerScope verify_worker_scope(_heap->workers(), _heap->max_workers(), false /*bypass check*/);
+
+  log_info(gc,start)("Verify %s, Level " INTX_FORMAT, label, ShenandoahVerifyLevel);
+
+  // GC state checks
+  {
+    char expected = -1;
+    bool enabled;
+    switch (gcstate) {
+      case _verify_gcstate_disable:
+        enabled = false;
+        break;
+      case _verify_gcstate_forwarded:
+        enabled = true;
+        expected = ShenandoahHeap::HAS_FORWARDED;
+        break;
+      case _verify_gcstate_stable:
+        enabled = true;
+        expected = ShenandoahHeap::STABLE;
+        break;
+      default:
+        enabled = false;
+        assert(false, "Unhandled gc-state verification");
+    }
+
+    if (enabled) {
+      char actual = _heap->gc_state();
+      if (actual != expected) {
+        fatal("%s: Global gc-state: expected %d, actual %d", label, expected, actual);
+      }
+
+      VerifyThreadGCState vtgcs(label, expected);
+      Threads::java_threads_do(&vtgcs);
+    }
+  }
+
+  // Heap size checks
+  {
+    ShenandoahHeapLocker lock(_heap->lock());
+
+    ShenandoahCalculateRegionStatsClosure cl;
+    _heap->heap_region_iterate(&cl);
+    size_t heap_used = _heap->used();
+    guarantee(cl.used() == heap_used,
+              "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "K, regions-used = " SIZE_FORMAT "K",
+              label, heap_used/K, cl.used()/K);
+
+    size_t heap_committed = _heap->committed();
+    guarantee(cl.committed() == heap_committed,
+              "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "K, regions-committed = " SIZE_FORMAT "K",
+              label, heap_committed/K, cl.committed()/K);
+  }
+
+  // Internal heap region checks
+  if (ShenandoahVerifyLevel >= 1) {
+    ShenandoahVerifyHeapRegionClosure cl(label, regions);
+    _heap->heap_region_iterate(&cl);
+  }
+
+  OrderAccess::fence();
+  _heap->make_parsable(false);
+
+  // Allocate temporary bitmap for storing marking wavefront:
+  _verification_bit_map->clear();
+
+  // Allocate temporary array for storing liveness data
+  ShenandoahLivenessData* ld = NEW_C_HEAP_ARRAY(ShenandoahLivenessData, _heap->num_regions(), mtGC);
+  Copy::fill_to_bytes((void*)ld, _heap->num_regions()*sizeof(ShenandoahLivenessData), 0);
+
+  const VerifyOptions& options = ShenandoahVerifier::VerifyOptions(forwarded, marked, cset, liveness, regions, gcstate);
+
+  // Steps 1-2. Scan root set to get initial reachable set. Finish walking the reachable heap.
+  // This verifies what application can see, since it only cares about reachable objects.
+  size_t count_reachable = 0;
+  if (ShenandoahVerifyLevel >= 2) {
+    ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(),
+                               ShenandoahPhaseTimings::_num_phases); // no need for stats
+
+    ShenandoahVerifierReachableTask task(_verification_bit_map, ld, &rp, label, options);
+    _heap->workers()->run_task(&task);
+    count_reachable = task.processed();
+  }
+
+  // Step 3. Walk marked objects. Marked objects might be unreachable. This verifies what collector,
+  // not the application, can see during the region scans. There is no reason to process the objects
+  // that were already verified, e.g. those marked in verification bitmap. There is interaction with TAMS:
+  // before TAMS, we verify the bitmaps, if available; after TAMS, we walk until the top(). It mimics
+  // what marked_object_iterate is doing, without calling into that optimized (and possibly incorrect)
+  // version
+
+  size_t count_marked = 0;
+  if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete) {
+    guarantee(_heap->marking_context()->is_complete(), "Marking context should be complete");
+    ShenandoahVerifierMarkedRegionTask task(_verification_bit_map, ld, label, options);
+    _heap->workers()->run_task(&task);
+    count_marked = task.processed();
+  } else {
+    guarantee(ShenandoahVerifyLevel < 4 || marked == _verify_marked_incomplete || marked == _verify_marked_disable, "Should be");
+  }
+
+  // Step 4. Verify accumulated liveness data, if needed. Only reliable if verification level includes
+  // marked objects.
+
+  if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete && liveness == _verify_liveness_complete) {
+    for (size_t i = 0; i < _heap->num_regions(); i++) {
+      ShenandoahHeapRegion* r = _heap->get_region(i);
+
+      juint verf_live = 0;
+      if (r->is_humongous()) {
+        // For humongous objects, test if start region is marked live, and if so,
+        // all humongous regions in that chain have live data equal to their "used".
+        juint start_live = OrderAccess::load_acquire(&ld[r->humongous_start_region()->region_number()]);
+        if (start_live > 0) {
+          verf_live = (juint)(r->used() / HeapWordSize);
+        }
+      } else {
+        verf_live = OrderAccess::load_acquire(&ld[r->region_number()]);
+      }
+
+      size_t reg_live = r->get_live_data_words();
+      if (reg_live != verf_live) {
+        ResourceMark rm;
+        stringStream ss;
+        r->print_on(&ss);
+        fatal("%s: Live data should match: region-live = " SIZE_FORMAT ", verifier-live = " UINT32_FORMAT "\n%s",
+              label, reg_live, verf_live, ss.as_string());
+      }
+    }
+  }
+
+  log_info(gc)("Verify %s, Level " INTX_FORMAT " (" SIZE_FORMAT " reachable, " SIZE_FORMAT " marked)",
+               label, ShenandoahVerifyLevel, count_reachable, count_marked);
+
+  FREE_C_HEAP_ARRAY(ShenandoahLivenessData, ld);
+}
+
+void ShenandoahVerifier::verify_generic(VerifyOption vo) {
+  verify_at_safepoint(
+          "Generic Verification",
+          _verify_forwarded_allow,     // conservatively allow forwarded
+          _verify_marked_disable,      // do not verify marked: lots ot time wasted checking dead allocations
+          _verify_cset_disable,        // cset may be inconsistent
+          _verify_liveness_disable,    // no reliable liveness data
+          _verify_regions_disable,     // no reliable region data
+          _verify_gcstate_disable      // no data about gcstate
+  );
+}
+
+void ShenandoahVerifier::verify_before_concmark() {
+  if (_heap->has_forwarded_objects()) {
+    verify_at_safepoint(
+            "Before Mark",
+            _verify_forwarded_allow,     // may have forwarded references
+            _verify_marked_disable,      // do not verify marked: lots ot time wasted checking dead allocations
+            _verify_cset_forwarded,      // allow forwarded references to cset
+            _verify_liveness_disable,    // no reliable liveness data
+            _verify_regions_notrash,     // no trash regions
+            _verify_gcstate_forwarded    // there are forwarded objects
+    );
+  } else {
+    verify_at_safepoint(
+            "Before Mark",
+            _verify_forwarded_none,      // UR should have fixed up
+            _verify_marked_disable,      // do not verify marked: lots ot time wasted checking dead allocations
+            _verify_cset_none,           // UR should have fixed this
+            _verify_liveness_disable,    // no reliable liveness data
+            _verify_regions_notrash,     // no trash regions
+            _verify_gcstate_stable       // there are no forwarded objects
+    );
+  }
+}
+
+void ShenandoahVerifier::verify_after_concmark() {
+  verify_at_safepoint(
+          "After Mark",
+          _verify_forwarded_none,      // no forwarded references
+          _verify_marked_complete,     // bitmaps as precise as we can get
+          _verify_cset_none,           // no references to cset anymore
+          _verify_liveness_complete,   // liveness data must be complete here
+          _verify_regions_disable,     // trash regions not yet recycled
+          _verify_gcstate_stable       // mark should have stabilized the heap
+  );
+}
+
+void ShenandoahVerifier::verify_before_evacuation() {
+  verify_at_safepoint(
+          "Before Evacuation",
+          _verify_forwarded_none,    // no forwarded references
+          _verify_marked_complete,   // walk over marked objects too
+          _verify_cset_disable,      // non-forwarded references to cset expected
+          _verify_liveness_complete, // liveness data must be complete here
+          _verify_regions_disable,   // trash regions not yet recycled
+          _verify_gcstate_stable     // mark should have stabilized the heap
+  );
+}
+
+void ShenandoahVerifier::verify_after_evacuation() {
+  verify_at_safepoint(
+          "After Evacuation",
+          _verify_forwarded_allow,     // objects are still forwarded
+          _verify_marked_complete,     // bitmaps might be stale, but alloc-after-mark should be well
+          _verify_cset_forwarded,      // all cset refs are fully forwarded
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_notrash,     // trash regions have been recycled already
+          _verify_gcstate_forwarded    // evacuation produced some forwarded objects
+  );
+}
+
+void ShenandoahVerifier::verify_before_updaterefs() {
+  verify_at_safepoint(
+          "Before Updating References",
+          _verify_forwarded_allow,     // forwarded references allowed
+          _verify_marked_complete,     // bitmaps might be stale, but alloc-after-mark should be well
+          _verify_cset_forwarded,      // all cset refs are fully forwarded
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_notrash,     // trash regions have been recycled already
+          _verify_gcstate_forwarded    // evacuation should have produced some forwarded objects
+  );
+}
+
+void ShenandoahVerifier::verify_after_updaterefs() {
+  verify_at_safepoint(
+          "After Updating References",
+          _verify_forwarded_none,      // no forwarded references
+          _verify_marked_complete,     // bitmaps might be stale, but alloc-after-mark should be well
+          _verify_cset_none,           // no cset references, all updated
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_nocset,      // no cset regions, trash regions have appeared
+          _verify_gcstate_stable       // update refs had cleaned up forwarded objects
+  );
+}
+
+void ShenandoahVerifier::verify_after_degenerated() {
+  verify_at_safepoint(
+          "After Degenerated GC",
+          _verify_forwarded_none,      // all objects are non-forwarded
+          _verify_marked_complete,     // all objects are marked in complete bitmap
+          _verify_cset_none,           // no cset references
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_notrash_nocset, // no trash, no cset
+          _verify_gcstate_stable       // degenerated refs had cleaned up forwarded objects
+  );
+}
+
+void ShenandoahVerifier::verify_before_traversal() {
+  verify_at_safepoint(
+          "Before Traversal",
+          _verify_forwarded_none,      // cannot have forwarded objects
+          _verify_marked_disable,      // bitmaps are not relevant before traversal
+          _verify_cset_none,           // no cset references before traversal
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_notrash_nocset, // no trash and no cset regions
+          _verify_gcstate_stable       // nothing forwarded before traversal
+  );
+}
+
+void ShenandoahVerifier::verify_after_traversal() {
+  verify_at_safepoint(
+          "After Traversal",
+          _verify_forwarded_none,      // cannot have forwarded objects
+          _verify_marked_complete,     // should have complete marking after traversal
+          _verify_cset_none,           // no cset references left after traversal
+          _verify_liveness_disable,    // liveness data is not collected for new allocations
+          _verify_regions_nocset,      // no cset regions, trash regions allowed
+          _verify_gcstate_stable       // nothing forwarded after traversal
+  );
+}
+
+void ShenandoahVerifier::verify_before_fullgc() {
+  verify_at_safepoint(
+          "Before Full GC",
+          _verify_forwarded_allow,     // can have forwarded objects
+          _verify_marked_disable,      // do not verify marked: lots ot time wasted checking dead allocations
+          _verify_cset_disable,        // cset might be foobared
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_disable,     // no reliable region data here
+          _verify_gcstate_disable      // no reliable gcstate data
+  );
+}
+
+void ShenandoahVerifier::verify_after_fullgc() {
+  verify_at_safepoint(
+          "After Full GC",
+          _verify_forwarded_none,      // all objects are non-forwarded
+          _verify_marked_complete,     // all objects are marked in complete bitmap
+          _verify_cset_none,           // no cset references
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_notrash_nocset, // no trash, no cset
+          _verify_gcstate_stable       // full gc cleaned up everything
+  );
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP
+
+#include "gc/shared/markBitMap.hpp"
+#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/stack.hpp"
+
+class ShenandoahHeap;
+
+#ifdef _WINDOWS
+#pragma warning( disable : 4522 )
+#endif
+
+class ShenandoahVerifierTask {
+public:
+  ShenandoahVerifierTask(oop o = NULL, int idx = 0): _obj(o) { }
+  ShenandoahVerifierTask(oop o, size_t idx): _obj(o) { }
+  ShenandoahVerifierTask(const ShenandoahVerifierTask& t): _obj(t._obj) { }
+
+  ShenandoahVerifierTask& operator =(const ShenandoahVerifierTask& t) {
+    _obj = t._obj;
+    return *this;
+  }
+  volatile ShenandoahVerifierTask&
+  operator =(const volatile ShenandoahVerifierTask& t) volatile {
+    (void)const_cast<oop&>(_obj = t._obj);
+    return *this;
+  }
+
+  inline oop obj()  const { return _obj; }
+
+private:
+  oop _obj;
+};
+
+typedef Stack<ShenandoahVerifierTask, mtGC> ShenandoahVerifierStack;
+typedef volatile juint ShenandoahLivenessData;
+
+class ShenandoahVerifier : public CHeapObj<mtGC> {
+private:
+  ShenandoahHeap* _heap;
+  MarkBitMap* _verification_bit_map;
+public:
+  typedef enum {
+    // Disable marked objects verification.
+    _verify_marked_disable,
+
+    // Objects should be marked in "next" bitmap.
+    _verify_marked_incomplete,
+
+    // Objects should be marked in "complete" bitmap.
+    _verify_marked_complete,
+  } VerifyMarked;
+
+  typedef enum {
+    // Disable forwarded objects verification.
+    _verify_forwarded_disable,
+
+    // Objects should not have forwardees.
+    _verify_forwarded_none,
+
+    // Objects may have forwardees.
+    _verify_forwarded_allow,
+  } VerifyForwarded;
+
+  typedef enum {
+    // Disable collection set verification.
+    _verify_cset_disable,
+
+    // Should have no references to cset.
+    _verify_cset_none,
+
+    // May have references to cset, all should be forwarded.
+    // Note: Allowing non-forwarded references to cset is equivalent
+    // to _verify_cset_disable.
+    _verify_cset_forwarded,
+  } VerifyCollectionSet;
+
+  typedef enum {
+    // Disable liveness verification
+    _verify_liveness_disable,
+
+    // All objects should belong to live regions
+    _verify_liveness_conservative,
+
+    // All objects should belong to live regions,
+    // and liveness data should be accurate
+    _verify_liveness_complete,
+  } VerifyLiveness;
+
+  typedef enum {
+    // Disable region verification
+    _verify_regions_disable,
+
+    // No trash regions allowed
+    _verify_regions_notrash,
+
+    // No collection set regions allowed
+    _verify_regions_nocset,
+
+    // No trash and no cset regions allowed
+    _verify_regions_notrash_nocset,
+  } VerifyRegions;
+
+  typedef enum {
+    // Disable gc-state verification
+    _verify_gcstate_disable,
+
+    // Nothing is in progress, no forwarded objects
+    _verify_gcstate_stable,
+
+    // Nothing is in progress, some objects are forwarded
+    _verify_gcstate_forwarded,
+  } VerifyGCState;
+
+  struct VerifyOptions {
+    VerifyForwarded     _verify_forwarded;
+    VerifyMarked        _verify_marked;
+    VerifyCollectionSet _verify_cset;
+    VerifyLiveness      _verify_liveness;
+    VerifyRegions       _verify_regions;
+    VerifyGCState       _verify_gcstate;
+
+    VerifyOptions(VerifyForwarded verify_forwarded,
+                  VerifyMarked verify_marked,
+                  VerifyCollectionSet verify_collection_set,
+                  VerifyLiveness verify_liveness,
+                  VerifyRegions verify_regions,
+                  VerifyGCState verify_gcstate) :
+            _verify_forwarded(verify_forwarded), _verify_marked(verify_marked),
+            _verify_cset(verify_collection_set),
+            _verify_liveness(verify_liveness), _verify_regions(verify_regions),
+            _verify_gcstate(verify_gcstate) {}
+  };
+
+private:
+  void verify_at_safepoint(const char *label,
+                           VerifyForwarded forwarded,
+                           VerifyMarked marked,
+                           VerifyCollectionSet cset,
+                           VerifyLiveness liveness,
+                           VerifyRegions regions,
+                           VerifyGCState gcstate);
+
+public:
+  ShenandoahVerifier(ShenandoahHeap* heap, MarkBitMap* verification_bitmap) :
+          _heap(heap), _verification_bit_map(verification_bitmap) {};
+
+  void verify_before_concmark();
+  void verify_after_concmark();
+  void verify_before_evacuation();
+  void verify_after_evacuation();
+  void verify_before_updaterefs();
+  void verify_after_updaterefs();
+  void verify_before_fullgc();
+  void verify_after_fullgc();
+  void verify_before_traversal();
+  void verify_after_traversal();
+  void verify_after_degenerated();
+  void verify_generic(VerifyOption option);
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "gc/shenandoah/shenandoahWorkGroup.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+
+#include "logging/log.hpp"
+
+ShenandoahWorkerScope::ShenandoahWorkerScope(WorkGang* workers, uint nworkers, const char* msg, bool check) :
+  _workers(workers) {
+  assert(msg != NULL, "Missing message");
+
+  _n_workers = _workers->update_active_workers(nworkers);
+  assert(_n_workers <= nworkers, "Must be");
+
+  log_info(gc, task)("Using %u of %u workers for %s",
+    _n_workers, ShenandoahHeap::heap()->max_workers(), msg);
+
+  if (check) {
+    ShenandoahHeap::heap()->assert_gc_workers(_n_workers);
+  }
+}
+
+ShenandoahWorkerScope::~ShenandoahWorkerScope() {
+  assert(_workers->active_workers() == _n_workers,
+    "Active workers can not be changed within this scope");
+}
+
+ShenandoahPushWorkerScope::ShenandoahPushWorkerScope(WorkGang* workers, uint nworkers, bool check) :
+  _old_workers(workers->active_workers()),
+  _workers(workers) {
+  _n_workers = _workers->update_active_workers(nworkers);
+  assert(_n_workers <= nworkers, "Must be");
+
+  // bypass concurrent/parallel protocol check for non-regular paths, e.g. verifier, etc.
+  if (check) {
+    ShenandoahHeap::heap()->assert_gc_workers(_n_workers);
+  }
+}
+
+ShenandoahPushWorkerScope::~ShenandoahPushWorkerScope() {
+  assert(_workers->active_workers() == _n_workers,
+    "Active workers can not be changed within this scope");
+  // Restore old worker value
+  uint nworkers = _workers->update_active_workers(_old_workers);
+  assert(nworkers == _old_workers, "Must be able to restore");
+}
+
+ShenandoahPushWorkerQueuesScope::ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool check) :
+  ShenandoahPushWorkerScope(workers, nworkers, check), _queues(queues) {
+  _queues->reserve(_n_workers);
+}
+
+ShenandoahPushWorkerQueuesScope::~ShenandoahPushWorkerQueuesScope() {
+  // Restore old worker value
+  _queues->reserve(_old_workers);
+}
+
+AbstractGangWorker* ShenandoahWorkGang::install_worker(uint which) {
+  AbstractGangWorker* worker = WorkGang::install_worker(which);
+  ShenandoahThreadLocalData::create(worker);
+  if (_initialize_gclab) {
+    ShenandoahThreadLocalData::initialize_gclab(worker);
+  }
+  return worker;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKGROUP_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKGROUP_HPP
+
+#include "gc/shared/workgroup.hpp"
+#include "gc/shenandoah/shenandoahTaskqueue.hpp"
+#include "memory/allocation.hpp"
+
+class ShenandoahObjToScanQueueSet;
+
+class ShenandoahWorkerScope : public StackObj {
+private:
+  uint      _n_workers;
+  WorkGang* _workers;
+public:
+  ShenandoahWorkerScope(WorkGang* workers, uint nworkers, const char* msg, bool do_check = true);
+  ~ShenandoahWorkerScope();
+};
+
+class ShenandoahPushWorkerScope : StackObj {
+protected:
+  uint      _n_workers;
+  uint      _old_workers;
+  WorkGang* _workers;
+
+public:
+  ShenandoahPushWorkerScope(WorkGang* workers, uint nworkers, bool do_check = true);
+  ~ShenandoahPushWorkerScope();
+};
+
+class ShenandoahPushWorkerQueuesScope : public ShenandoahPushWorkerScope {
+private:
+  ShenandoahObjToScanQueueSet* _queues;
+
+public:
+  ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool do_check = true);
+  ~ShenandoahPushWorkerQueuesScope();
+};
+
+class ShenandoahWorkGang : public WorkGang {
+private:
+  bool     _initialize_gclab;
+public:
+  ShenandoahWorkGang(const char* name,
+           uint workers,
+           bool are_GC_task_threads,
+           bool are_ConcurrentGC_threads) :
+    WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads), _initialize_gclab(false) {
+    }
+
+  // Create a GC worker and install it into the work gang.
+  // We need to initialize gclab for dynamic allocated workers
+  AbstractGangWorker* install_worker(uint which);
+
+  // We allow _active_workers < _total_workers when UseDynamicNumberOfGCThreads is off.
+  // We use the same WorkGang for concurrent and parallel processing, and honor
+  // ConcGCThreads and ParallelGCThreads settings
+  virtual uint active_workers() const {
+    assert(_active_workers > 0, "no active worker");
+    assert(_active_workers <= _total_workers,
+           "_active_workers: %u > _total_workers: %u", _active_workers, _total_workers);
+    return _active_workers;
+  }
+
+  void set_initialize_gclab() { assert(!_initialize_gclab, "Can only enable once"); _initialize_gclab = true; }
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKGROUP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/workerPolicy.hpp"
+#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
+#include "runtime/thread.hpp"
+
+uint ShenandoahWorkerPolicy::_prev_par_marking     = 0;
+uint ShenandoahWorkerPolicy::_prev_conc_marking    = 0;
+uint ShenandoahWorkerPolicy::_prev_conc_evac       = 0;
+uint ShenandoahWorkerPolicy::_prev_fullgc          = 0;
+uint ShenandoahWorkerPolicy::_prev_degengc         = 0;
+uint ShenandoahWorkerPolicy::_prev_stw_traversal   = 0;
+uint ShenandoahWorkerPolicy::_prev_conc_traversal  = 0;
+uint ShenandoahWorkerPolicy::_prev_conc_update_ref = 0;
+uint ShenandoahWorkerPolicy::_prev_par_update_ref  = 0;
+uint ShenandoahWorkerPolicy::_prev_conc_cleanup    = 0;
+uint ShenandoahWorkerPolicy::_prev_conc_reset      = 0;
+
+uint ShenandoahWorkerPolicy::calc_workers_for_init_marking() {
+  uint active_workers = (_prev_par_marking == 0) ? ParallelGCThreads : _prev_par_marking;
+
+  _prev_par_marking =
+    WorkerPolicy::calc_active_workers(ParallelGCThreads,
+                                      active_workers,
+                                      Threads::number_of_non_daemon_threads());
+  return _prev_par_marking;
+}
+
+uint ShenandoahWorkerPolicy::calc_workers_for_conc_marking() {
+  uint active_workers = (_prev_conc_marking == 0) ?  ConcGCThreads : _prev_conc_marking;
+  _prev_conc_marking =
+    WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
+                                           active_workers,
+                                           Threads::number_of_non_daemon_threads());
+  return _prev_conc_marking;
+}
+
+// Reuse the calculation result from init marking
+uint ShenandoahWorkerPolicy::calc_workers_for_final_marking() {
+  return _prev_par_marking;
+}
+
+// Calculate workers for concurrent evacuation (concurrent GC)
+uint ShenandoahWorkerPolicy::calc_workers_for_conc_evac() {
+  uint active_workers = (_prev_conc_evac == 0) ? ConcGCThreads : _prev_conc_evac;
+  _prev_conc_evac =
+    WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
+                                           active_workers,
+                                           Threads::number_of_non_daemon_threads());
+  return _prev_conc_evac;
+}
+
+// Calculate workers for parallel fullgc
+uint ShenandoahWorkerPolicy::calc_workers_for_fullgc() {
+  uint active_workers = (_prev_fullgc == 0) ?  ParallelGCThreads : _prev_fullgc;
+  _prev_fullgc =
+    WorkerPolicy::calc_active_workers(ParallelGCThreads,
+                                      active_workers,
+                                      Threads::number_of_non_daemon_threads());
+  return _prev_fullgc;
+}
+
+// Calculate workers for parallel degenerated gc
+uint ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated() {
+  uint active_workers = (_prev_degengc == 0) ?  ParallelGCThreads : _prev_degengc;
+  _prev_degengc =
+    WorkerPolicy::calc_active_workers(ParallelGCThreads,
+                                      active_workers,
+                                      Threads::number_of_non_daemon_threads());
+  return _prev_degengc;
+}
+
+// Calculate workers for Stop-the-world traversal GC
+uint ShenandoahWorkerPolicy::calc_workers_for_stw_traversal() {
+  uint active_workers = (_prev_stw_traversal == 0) ? ParallelGCThreads : _prev_stw_traversal;
+  _prev_stw_traversal =
+    WorkerPolicy::calc_active_workers(ParallelGCThreads,
+                                      active_workers,
+                                      Threads::number_of_non_daemon_threads());
+  return _prev_stw_traversal;
+}
+
+// Calculate workers for concurent traversal GC
+uint ShenandoahWorkerPolicy::calc_workers_for_conc_traversal() {
+  uint active_workers = (_prev_conc_traversal == 0) ? ConcGCThreads : _prev_conc_traversal;
+  _prev_conc_traversal =
+    WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
+                                           active_workers,
+                                           Threads::number_of_non_daemon_threads());
+  return _prev_conc_traversal;
+}
+
+// Calculate workers for concurrent reference update
+uint ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref() {
+  uint active_workers = (_prev_conc_update_ref == 0) ? ConcGCThreads : _prev_conc_update_ref;
+  _prev_conc_update_ref =
+    WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
+                                           active_workers,
+                                           Threads::number_of_non_daemon_threads());
+  return _prev_conc_update_ref;
+}
+
+// Calculate workers for parallel reference update
+uint ShenandoahWorkerPolicy::calc_workers_for_final_update_ref() {
+  uint active_workers = (_prev_par_update_ref == 0) ? ParallelGCThreads : _prev_par_update_ref;
+  _prev_par_update_ref =
+    WorkerPolicy::calc_active_workers(ParallelGCThreads,
+                                      active_workers,
+                                      Threads::number_of_non_daemon_threads());
+  return _prev_par_update_ref;
+}
+
+uint ShenandoahWorkerPolicy::calc_workers_for_conc_preclean() {
+  // Precleaning is single-threaded
+  return 1;
+}
+
+uint ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup() {
+  uint active_workers = (_prev_conc_cleanup == 0) ? ConcGCThreads : _prev_conc_cleanup;
+  _prev_conc_cleanup =
+          WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
+                                                 active_workers,
+                                                 Threads::number_of_non_daemon_threads());
+  return _prev_conc_cleanup;
+}
+
+uint ShenandoahWorkerPolicy::calc_workers_for_conc_reset() {
+  uint active_workers = (_prev_conc_reset == 0) ? ConcGCThreads : _prev_conc_reset;
+  _prev_conc_reset =
+          WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
+                                                 active_workers,
+                                                 Threads::number_of_non_daemon_threads());
+  return _prev_conc_reset;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP
+
+#include "memory/allocation.hpp"
+
+class ShenandoahWorkerPolicy : AllStatic {
+private:
+  static uint _prev_par_marking;
+  static uint _prev_conc_marking;
+  static uint _prev_conc_evac;
+  static uint _prev_fullgc;
+  static uint _prev_degengc;
+  static uint _prev_stw_traversal;
+  static uint _prev_conc_traversal;
+  static uint _prev_conc_update_ref;
+  static uint _prev_par_update_ref;
+  static uint _prev_conc_cleanup;
+  static uint _prev_conc_reset;
+
+public:
+  // Calculate the number of workers for initial marking
+  static uint calc_workers_for_init_marking();
+
+  // Calculate the number of workers for concurrent marking
+  static uint calc_workers_for_conc_marking();
+
+  // Calculate the number of workers for final marking
+  static uint calc_workers_for_final_marking();
+
+  // Calculate workers for concurrent evacuation (concurrent GC)
+  static uint calc_workers_for_conc_evac();
+
+  // Calculate workers for parallel full gc
+  static uint calc_workers_for_fullgc();
+
+  // Calculate workers for parallel degenerated gc
+  static uint calc_workers_for_stw_degenerated();
+
+  // Calculate workers for Stop-the-world traversal GC
+  static uint calc_workers_for_stw_traversal();
+
+  // Calculate workers for concurrent traversal GC
+  static uint calc_workers_for_conc_traversal();
+
+  // Calculate workers for concurrent reference update
+  static uint calc_workers_for_conc_update_ref();
+
+  // Calculate workers for parallel/final reference update
+  static uint calc_workers_for_final_update_ref();
+
+  // Calculate workers for concurrent precleaning
+  static uint calc_workers_for_conc_preclean();
+
+  // Calculate workers for concurrent cleanup
+  static uint calc_workers_for_conc_cleanup();
+
+  // Calculate workers for concurrent reset
+  static uint calc_workers_for_conc_reset();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP
+
+#define GC_SHENANDOAH_FLAGS(develop,                                        \
+                            develop_pd,                                     \
+                            product,                                        \
+                            product_pd,                                     \
+                            diagnostic,                                     \
+                            diagnostic_pd,                                  \
+                            experimental,                                   \
+                            notproduct,                                     \
+                            manageable,                                     \
+                            product_rw,                                     \
+                            lp64_product,                                   \
+                            range,                                          \
+                            constraint,                                     \
+                            writeable)                                      \
+                                                                            \
+  experimental(size_t, ShenandoahHeapRegionSize, 0,                         \
+          "Size of the Shenandoah regions. Set to zero to detect "          \
+          "automatically.")                                                 \
+                                                                            \
+  experimental(size_t, ShenandoahTargetNumRegions, 2048,                    \
+          "Target number of regions. We try to get around that many "       \
+          "regions, based on Shenandoah{Min,Max}RegionSize.")               \
+                                                                            \
+  experimental(size_t, ShenandoahMinRegionSize, 256 * K,                    \
+          "Minimum Shenandoah heap region size.")                           \
+                                                                            \
+  experimental(size_t, ShenandoahMaxRegionSize, 32 * M,                     \
+          "Maximum Shenandoah heap region size.")                           \
+                                                                            \
+  experimental(intx, ShenandoahHumongousThreshold, 100,                     \
+          "How large should the object be to get allocated in humongous "   \
+          "region, in percents of heap region size. This also caps the "    \
+          "maximum TLAB size.")                                             \
+          range(1, 100)                                                     \
+                                                                            \
+  experimental(ccstr, ShenandoahGCHeuristics, "adaptive",                   \
+          "The heuristics to use in Shenandoah GC. Possible values:"        \
+          " *) adaptive - adapt to maintain the given amount of free heap;" \
+          " *) static  -  start concurrent GC when static free heap "       \
+          "               threshold and static allocation threshold are "   \
+          "               tripped;"                                         \
+          " *) passive -  do not start concurrent GC, wait for Full GC; "   \
+          " *) aggressive - run concurrent GC continuously, evacuate "      \
+          "               everything;"                                      \
+          " *) compact - run GC with lower footprint target, may end up "   \
+          "               doing continuous GC, evacuate lots of live "      \
+          "               objects, uncommit heap aggressively;")            \
+                                                                            \
+  experimental(ccstr, ShenandoahUpdateRefsEarly, "adaptive",                \
+          "Run a separate concurrent reference updating phase after"        \
+          "concurrent evacuation. Possible values: 'on', 'off', 'adaptive'")\
+                                                                            \
+  experimental(uintx, ShenandoahRefProcFrequency, 5,                        \
+          "How often should (weak, soft, etc) references be processed. "    \
+          "References get processed at every Nth GC cycle. Set to zero "    \
+          "to disable reference processing.")                               \
+                                                                            \
+  experimental(uintx, ShenandoahUnloadClassesFrequency, 5,                  \
+          "How often should classes get unloaded. "                         \
+          "Class unloading is performed at every Nth GC cycle. "            \
+          "Set to zero to disable class unloading during concurrent GC.")   \
+                                                                            \
+  experimental(uintx, ShenandoahGarbageThreshold, 60,                       \
+          "Sets the percentage of garbage a region need to contain before " \
+          "it can be marked for collection. Does not apply to all "         \
+          "heuristics.")                                                    \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahFreeThreshold, 10,                          \
+          "Set the percentage of free heap at which a GC cycle is started. "\
+          "Does not apply to all heuristics.")                              \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahInitFreeThreshold, 70,                      \
+          "Initial remaining free heap threshold for learning steps in "    \
+          "heuristics. In percents of total heap size. Does not apply to "  \
+          "all heuristics.")                                                \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahMinFreeThreshold, 10,                       \
+          "Minimum remaining free space threshold, after which collection " \
+          "definitely triggers. Does not apply to all heuristics.")         \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahAllocationThreshold, 0,                     \
+          "Set percentage of memory allocated since last GC cycle before "  \
+          "a new GC cycle can be started. Set to zero to effectively "      \
+          "disable.")                                                       \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahLearningSteps, 5,                           \
+          "Number of GC cycles to run in order to learn application "       \
+          "and GC performance for adaptive heuristics.")                    \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahImmediateThreshold, 90,                     \
+          "If mark identifies more than this much immediate garbage "       \
+          "regions, it shall recycle them, and shall not continue the "     \
+          "rest of the GC cycle. The value is in percents of total "        \
+          "number of candidate regions for collection set. Setting this "   \
+          "threshold to 100% effectively disables this shortcut.")          \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahMergeUpdateRefsMinGap, 100,                 \
+          "If GC is currently running in separate update-refs mode "        \
+          "this numbers gives the threshold when to switch to "             \
+          "merged update-refs mode. Number is percentage relative to"       \
+          "duration(marking)+duration(update-refs).")                       \
+                                                                            \
+  experimental(uintx, ShenandoahMergeUpdateRefsMaxGap, 200,                 \
+          "If GC is currently running in merged update-refs mode "          \
+          "this numbers gives the threshold when to switch to "             \
+          "separate update-refs mode. Number is percentage relative "       \
+          "to duration(marking)+duration(update-refs).")                    \
+                                                                            \
+  experimental(uintx, ShenandoahGuaranteedGCInterval, 5*60*1000,            \
+          "Adaptive and dynamic heuristics would guarantee a GC cycle "     \
+          "at least with this interval. This is useful when large idle"     \
+          " intervals are present, where GC can run without stealing "      \
+          "time from active application. Time is in milliseconds.")         \
+                                                                            \
+  experimental(bool, ShenandoahAlwaysClearSoftRefs, false,                  \
+          "Clear soft references always, instead of using any smart "       \
+          "cleanup policy. This minimizes footprint at expense of more "    \
+          "softref churn in applications.")                                 \
+                                                                            \
+  experimental(bool, ShenandoahUncommit, true,                              \
+          "Allow Shenandoah to uncommit unused memory.")                    \
+                                                                            \
+  experimental(uintx, ShenandoahUncommitDelay, 5*60*1000,                   \
+          "Shenandoah would start to uncommit memory for regions that were" \
+          " not used for more than this time. First use after that would "  \
+          "incur allocation stalls. Actively used regions would never be "  \
+          "uncommitted, because they never decay. Time is in milliseconds." \
+          "Setting this delay to 0 effectively makes Shenandoah to "        \
+          "uncommit the regions almost immediately.")                       \
+                                                                            \
+  experimental(bool, ShenandoahRegionSampling, false,                       \
+          "Turns on heap region sampling via JVMStat")                      \
+                                                                            \
+  experimental(int, ShenandoahRegionSamplingRate, 40,                       \
+          "Sampling rate for heap region sampling. "                        \
+          "Number of milliseconds between samples")                         \
+                                                                            \
+  experimental(uintx, ShenandoahControlIntervalMin, 1,                      \
+          "The minumum sleep interval for control loop that drives "        \
+          "the cycles. Lower values would increase GC responsiveness "      \
+          "to changing heap conditions, at the expense of higher perf "     \
+          "overhead. Time is in milliseconds.")                             \
+                                                                            \
+  experimental(uintx, ShenandoahControlIntervalMax, 10,                     \
+          "The maximum sleep interval for control loop that drives "        \
+          "the cycles. Lower values would increase GC responsiveness "      \
+          "to changing heap conditions, at the expense of higher perf "     \
+          "overhead. Time is in milliseconds.")                             \
+                                                                            \
+  experimental(uintx, ShenandoahControlIntervalAdjustPeriod, 1000,          \
+          "The time period for one step in control loop interval "          \
+          "adjustment. Lower values make adjustments faster, at the "       \
+          "expense of higher perf overhead. Time is in milliseconds.")      \
+                                                                            \
+  diagnostic(bool, ShenandoahVerify, false,                                 \
+          "Verify the Shenandoah garbage collector")                        \
+                                                                            \
+  diagnostic(intx, ShenandoahVerifyLevel, 4,                                \
+          "Shenandoah verification level: "                                 \
+          "0 = basic heap checks; "                                         \
+          "1 = previous level, plus basic region checks; "                  \
+          "2 = previous level, plus all roots; "                            \
+          "3 = previous level, plus all reachable objects; "                \
+          "4 = previous level, plus all marked objects")                    \
+                                                                            \
+  diagnostic(bool, ShenandoahElasticTLAB, true,                             \
+          "Use Elastic TLABs with Shenandoah")                              \
+                                                                            \
+  diagnostic(bool, ShenandoahAllowMixedAllocs, true,                        \
+          "Allow mixing mutator and collector allocations in a single "     \
+          "region")                                                         \
+                                                                            \
+  experimental(uintx, ShenandoahAllocSpikeFactor, 5,                        \
+          "The amount of heap space to reserve for absorbing the "          \
+          "allocation spikes. Larger value wastes more memory in "          \
+          "non-emergency cases, but provides more safety in emergency "     \
+          "cases. In percents of total heap size.")                         \
+          range(0,100)                                                      \
+                                                                            \
+  experimental(uintx, ShenandoahEvacReserve, 5,                             \
+          "Maximum amount of free space to reserve for evacuation. "        \
+          "Larger values make GC more aggressive, while leaving less "      \
+          "headroom for application to allocate in. "                       \
+          "In percents of total heap size.")                                \
+          range(1,100)                                                      \
+                                                                            \
+  experimental(double, ShenandoahEvacWaste, 1.2,                            \
+          "How much waste evacuations produce within the reserved "         \
+          "space. Larger values make evacuations more resilient "           \
+          "against allocation failures, at expense of smaller csets "       \
+          "on each cycle.")                                                 \
+          range(1.0,100.0)                                                  \
+                                                                            \
+  experimental(bool, ShenandoahEvacReserveOverflow, true,                   \
+          "Allow evacuations to overflow the reserved space. "              \
+          "Enabling it will make evacuations more resilient when "          \
+          "evacuation reserve/waste is incorrect, at the risk that "        \
+          "application allocations run out of memory too early.")           \
+                                                                            \
+  diagnostic(bool, ShenandoahAllocationTrace, false,                        \
+          "Trace allocation latencies and stalls. Can be expensive when "   \
+          "lots of allocations happen, and may introduce scalability "      \
+          "bottlenecks.")                                                   \
+                                                                            \
+  diagnostic(intx, ShenandoahAllocationStallThreshold, 10000,               \
+          "When allocation tracing is enabled, the allocation stalls "      \
+          "larger than this threshold would be reported as warnings. "      \
+          "Time is in microseconds.")                                       \
+                                                                            \
+  experimental(uintx, ShenandoahEvacAssist, 10,                             \
+          "How many objects to evacuate on WB assist path. "                \
+          "Use zero to disable.")                                           \
+                                                                            \
+  experimental(bool, ShenandoahPacing, true,                                \
+          "Pace application allocations to give GC chance to start "        \
+          "and complete before allocation failure is reached.")             \
+                                                                            \
+  experimental(uintx, ShenandoahPacingMaxDelay, 10,                         \
+          "Max delay for pacing application allocations. "                  \
+          "Time is in milliseconds.")                                       \
+                                                                            \
+  experimental(uintx, ShenandoahPacingIdleSlack, 2,                         \
+          "Percent of heap counted as non-taxable allocations during idle. "\
+          "Larger value makes the pacing milder during idle phases, "       \
+          "requiring less rendezvous with control thread. Lower value "     \
+          "makes the pacing control less responsive to out-of-cycle allocs.")\
+          range(0, 100)                                                     \
+                                                                            \
+  experimental(uintx, ShenandoahPacingCycleSlack, 10,                       \
+          "Percent of free space taken as non-taxable allocations during "  \
+          "the GC cycle. Larger value makes the pacing milder at the "      \
+          "beginning of the GC cycle. Lower value makes the pacing less "   \
+          "uniform during the cycle.")                                      \
+          range(0, 100)                                                     \
+                                                                            \
+  experimental(double, ShenandoahPacingSurcharge, 1.1,                      \
+          "Additional pacing tax surcharge to help unclutter the heap. "    \
+          "Larger values makes the pacing more aggressive. Lower values "   \
+          "risk GC cycles finish with less memory than were available at "  \
+          "the beginning of it.")                                           \
+          range(1.0, 100.0)                                                 \
+                                                                            \
+  experimental(uintx, ShenandoahCriticalFreeThreshold, 1,                   \
+          "Percent of heap that needs to be free after recovery cycles, "   \
+          "either Degenerated or Full GC. If this much space is not "       \
+          "available, next recovery step would triggered.")                 \
+          range(0, 100)                                                     \
+                                                                            \
+  diagnostic(bool, ShenandoahDegeneratedGC, true,                           \
+          "Use Degenerated GC as the graceful degradation step. Disabling " \
+          "this leads to degradation to Full GC")                           \
+                                                                            \
+  experimental(uintx, ShenandoahFullGCThreshold, 3,                         \
+          "How many back-to-back Degenerated GCs to do before triggering "  \
+          "a Full GC.")                                                     \
+                                                                            \
+  experimental(bool, ShenandoahImplicitGCInvokesConcurrent, false,          \
+          "Should internally-caused GCs invoke concurrent cycles, or go to" \
+          "stop-the-world (degenerated/full)?")                             \
+                                                                            \
+  experimental(bool, ShenandoahHumongousMoves, true,                        \
+          "Allow moving humongous regions. This makes GC more resistant "   \
+          "to external fragmentation that may otherwise fail other "        \
+          "humongous allocations, at the expense of higher GC copying "     \
+          "costs.")                                                         \
+                                                                            \
+  diagnostic(bool, ShenandoahOOMDuringEvacALot, false,                      \
+          "Simulate OOM during evacuation frequently.")                     \
+                                                                            \
+  diagnostic(bool, ShenandoahAllocFailureALot, false,                       \
+          "Make lots of artificial allocation failures.")                   \
+                                                                            \
+  diagnostic(bool, ShenandoahTerminationTrace, false,                       \
+          "Tracing task termination timings")                               \
+                                                                            \
+  develop(bool, ShenandoahVerifyObjectEquals, false,                        \
+          "Verify that == and != are not used on oops. Only in fastdebug")  \
+                                                                            \
+  diagnostic(bool, ShenandoahAlwaysPreTouch, false,                         \
+          "Pre-touch heap memory, overrides global AlwaysPreTouch")         \
+                                                                            \
+  experimental(intx, ShenandoahMarkScanPrefetch, 32,                        \
+          "How many objects to prefetch ahead when traversing mark bitmaps."\
+          "Set to 0 to disable prefetching.")                               \
+          range(0, 256)                                                     \
+                                                                            \
+  experimental(uintx, ShenandoahMarkLoopStride, 1000,                       \
+          "How many items are processed during one marking step")           \
+                                                                            \
+  experimental(uintx, ShenandoahParallelRegionStride, 1024,                 \
+          "How many regions are processed in one stride during parallel "   \
+          "iteration.")                                                     \
+                                                                            \
+  experimental(size_t, ShenandoahSATBBufferSize, 1 * K,                     \
+          "Number of entries in an SATB log buffer.")                       \
+          range(1, max_uintx)                                               \
+                                                                            \
+  experimental(uintx, ShenandoahSATBBufferFlushInterval, 100,               \
+          "Forcefully flush non-empty SATB buffers at this interval. "      \
+          "Time is in milliseconds.")                                       \
+                                                                            \
+  experimental(uint, ShenandoahParallelSafepointThreads, 4,                 \
+          "Number of parallel threads used for safepoint prolog/epilog")    \
+                                                                            \
+  experimental(bool, ShenandoahPreclean, true,                              \
+          "Do concurrent preclean phase before final mark: process "        \
+          "definitely alive references to avoid dealing with them during "  \
+          "pause.")                                                         \
+                                                                            \
+  experimental(bool, ShenandoahSuspendibleWorkers, false,                   \
+          "Suspend concurrent GC worker threads at safepoints")             \
+                                                                            \
+  diagnostic(bool, ShenandoahSATBBarrier, true,                             \
+          "Turn on/off SATB barriers in Shenandoah")                        \
+                                                                            \
+  diagnostic(bool, ShenandoahKeepAliveBarrier, true,                        \
+          "Turn on/off keep alive barriers in Shenandoah")                  \
+                                                                            \
+  diagnostic(bool, ShenandoahWriteBarrier, true,                            \
+          "Turn on/off write barriers in Shenandoah")                       \
+                                                                            \
+  diagnostic(bool, ShenandoahReadBarrier, true,                             \
+          "Turn on/off read barriers in Shenandoah")                        \
+                                                                            \
+  diagnostic(bool, ShenandoahStoreValEnqueueBarrier, false,                 \
+          "Turn on/off enqueuing of oops for storeval barriers")            \
+                                                                            \
+  diagnostic(bool, ShenandoahStoreValReadBarrier, true,                     \
+          "Turn on/off store val read barriers in Shenandoah")              \
+                                                                            \
+  diagnostic(bool, ShenandoahCASBarrier, true,                              \
+          "Turn on/off CAS barriers in Shenandoah")                         \
+                                                                            \
+  diagnostic(bool, ShenandoahAcmpBarrier, true,                             \
+          "Turn on/off acmp barriers in Shenandoah")                        \
+                                                                            \
+  diagnostic(bool, ShenandoahCloneBarrier, true,                            \
+          "Turn on/off clone barriers in Shenandoah")                       \
+                                                                            \
+  diagnostic(bool, ShenandoahStoreCheck, false,                             \
+          "Emit additional code that checks objects are written to only"    \
+          " in to-space")                                                   \
+                                                                            \
+  experimental(bool, ShenandoahConcurrentScanCodeRoots, true,               \
+          "Scan code roots concurrently, instead of during a pause")        \
+                                                                            \
+  experimental(uintx, ShenandoahCodeRootsStyle, 2,                          \
+          "Use this style to scan code cache:"                              \
+          " 0 - sequential iterator;"                                       \
+          " 1 - parallel iterator;"                                         \
+          " 2 - parallel iterator with cset filters;")                      \
+                                                                            \
+  experimental(bool, ShenandoahOptimizeStaticFinals, true,                  \
+          "Optimize barriers on static final fields. "                      \
+          "Turn it off for maximum compatibility with reflection or JNI "   \
+          "code that manipulates final fields.")                            \
+                                                                            \
+  experimental(bool, ShenandoahOptimizeInstanceFinals, false,               \
+          "Optimize barriers on final instance fields."                     \
+          "Turn it off for maximum compatibility with reflection or JNI "   \
+          "code that manipulates final fields.")                            \
+                                                                            \
+  experimental(bool, ShenandoahOptimizeStableFinals, false,                 \
+          "Optimize barriers on stable fields."                             \
+          "Turn it off for maximum compatibility with reflection or JNI "   \
+          "code that manipulates final fields.")                            \
+                                                                            \
+  diagnostic(bool, ShenandoahDecreaseRegisterPressure, false,               \
+          "Try to reuse after-barrier values to reduce register pressure")  \
+                                                                            \
+  experimental(bool, ShenandoahCommonGCStateLoads, false,                   \
+         "Enable commonming for GC state loads in generated code.")         \
+                                                                            \
+  develop(bool, ShenandoahVerifyOptoBarriers, false,                        \
+          "Verify no missing barriers in C2")                               \
+                                                                            \
+  experimental(bool, ShenandoahDontIncreaseWBFreq, true,                    \
+          "Common 2 WriteBarriers or WriteBarrier and a ReadBarrier only "  \
+          "if the resulting WriteBarrier isn't executed more frequently")   \
+                                                                            \
+  experimental(bool, ShenandoahLoopOptsAfterExpansion, true,                \
+          "Attempt more loop opts after write barrier expansion")           \
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shenandoah/vmStructs_shenandoah.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP
+#define SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP
+
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
+
+#define VM_STRUCTS_SHENANDOAH(nonstatic_field, volatile_nonstatic_field, static_field)  \
+  static_field(ShenandoahHeapRegion, RegionSizeBytes,        size_t)                    \
+  nonstatic_field(ShenandoahHeap, _num_regions,              size_t)                    \
+  volatile_nonstatic_field(ShenandoahHeap, _used,            size_t)                    \
+  volatile_nonstatic_field(ShenandoahHeap, _committed,       size_t)                    \
+
+#define VM_INT_CONSTANTS_SHENANDOAH(declare_constant, declare_constant_with_value)
+
+#define VM_TYPES_SHENANDOAH(declare_type,                                     \
+                            declare_toplevel_type,                            \
+                            declare_integer_type)                             \
+  declare_type(ShenandoahHeap, CollectedHeap)                                 \
+  declare_type(ShenandoahHeapRegion, ContiguousSpace)                         \
+  declare_toplevel_type(ShenandoahHeap*)                                      \
+  declare_toplevel_type(ShenandoahHeapRegion*)                                \
+
+#endif // SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP
--- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -47,6 +47,11 @@
     return false;
   }
 
+  if (UseShenandoahGC) {
+    log_warning(jfr)("LeakProfiler is currently not supported in combination with Shenandoah GC");
+    return false;
+  }
+
   if (_object_sampler != NULL) {
     // already started
     return true;
--- a/src/hotspot/share/memory/metaspace.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/memory/metaspace.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -231,6 +231,7 @@
 class ClassLoaderMetaspace : public CHeapObj<mtClass> {
   friend class CollectedHeap; // For expand_and_allocate()
   friend class ZCollectedHeap; // For expand_and_allocate()
+  friend class ShenandoahHeap; // For expand_and_allocate()
   friend class Metaspace;
   friend class MetaspaceUtils;
   friend class metaspace::PrintCLDMetaspaceInfoClosure;
--- a/src/hotspot/share/opto/arraycopynode.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/arraycopynode.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -497,7 +497,7 @@
   } else {
     if (in(TypeFunc::Control) != ctl) {
       // we can't return new memory and control from Ideal at parse time
-      assert(!is_clonebasic(), "added control for clone?");
+      assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
       phase->record_for_igvn(this);
       return false;
     }
--- a/src/hotspot/share/opto/cfgnode.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/cfgnode.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -304,7 +304,6 @@
 protected:
   ProjNode* range_check_trap_proj(int& flip, Node*& l, Node*& r);
   Node* Ideal_common(PhaseGVN *phase, bool can_reshape);
-  Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn);
   Node* search_identical(int dist);
 
 public:
@@ -392,6 +391,7 @@
   virtual const RegMask &out_RegMask() const;
   Node* fold_compares(PhaseIterGVN* phase);
   static Node* up_one_dom(Node* curr, bool linear_only = false);
+  Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn);
 
   // Takes the type of val and filters it through the test represented
   // by if_proj and returns a more refined type if one is produced.
--- a/src/hotspot/share/opto/classes.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/classes.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,9 @@
 #if INCLUDE_ZGC
 #include "gc/z/c2/zBarrierSetC2.hpp"
 #endif
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#endif
 
 // ----------------------------------------------------------------------------
 // Build a table of virtual functions to map from Nodes to dense integer
--- a/src/hotspot/share/opto/classes.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/classes.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -264,6 +264,21 @@
 macro(RoundFloat)
 macro(SafePoint)
 macro(SafePointScalarObject)
+#if INCLUDE_SHENANDOAHGC
+#define shmacro(x) macro(x)
+#else
+#define shmacro(x) optionalmacro(x)
+#endif
+shmacro(ShenandoahCompareAndExchangeP)
+shmacro(ShenandoahCompareAndExchangeN)
+shmacro(ShenandoahCompareAndSwapN)
+shmacro(ShenandoahCompareAndSwapP)
+shmacro(ShenandoahWeakCompareAndSwapN)
+shmacro(ShenandoahWeakCompareAndSwapP)
+shmacro(ShenandoahEnqueueBarrier)
+shmacro(ShenandoahReadBarrier)
+shmacro(ShenandoahWriteBarrier)
+shmacro(ShenandoahWBMemProj)
 macro(SCMemProj)
 macro(SqrtD)
 macro(SqrtF)
--- a/src/hotspot/share/opto/compile.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/compile.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -3061,7 +3061,7 @@
         Node *m = wq.at(next);
         for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
           Node* use = m->fast_out(i);
-          if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
+          if (use->is_Mem() || use->is_EncodeNarrowPtr() || use->is_ShenandoahBarrier()) {
             use->ensure_control_or_add_prec(n->in(0));
           } else {
             switch(use->Opcode()) {
--- a/src/hotspot/share/opto/compile.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/compile.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -93,6 +93,8 @@
 enum LoopOptsMode {
   LoopOptsDefault,
   LoopOptsNone,
+  LoopOptsShenandoahExpand,
+  LoopOptsShenandoahPostExpand,
   LoopOptsSkipSplitIf,
   LoopOptsVerify,
   LoopOptsLastRound
--- a/src/hotspot/share/opto/lcm.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/lcm.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -178,6 +178,7 @@
     case Op_LoadRange:
     case Op_LoadD_unaligned:
     case Op_LoadL_unaligned:
+    case Op_ShenandoahReadBarrier:
       assert(mach->in(2) == val, "should be address");
       break;
     case Op_StoreB:
--- a/src/hotspot/share/opto/library_call.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/library_call.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -4464,7 +4464,7 @@
         for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
           Node* n = mms.memory();
           if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
-            assert(n->is_Store(), "what else?");
+            assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
             no_interfering_store = false;
             break;
           }
@@ -4473,7 +4473,7 @@
         for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
           Node* n = mms.memory();
           if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
-            assert(n->is_Store(), "what else?");
+            assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
             no_interfering_store = false;
             break;
           }
--- a/src/hotspot/share/opto/loopPredicate.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/loopPredicate.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -536,6 +536,9 @@
     if (_lpt->is_invariant(n)) { // known invariant
       _invariant.set(n->_idx);
     } else if (!n->is_CFG()) {
+      if (n->Opcode() == Op_ShenandoahWriteBarrier) {
+        return;
+      }
       Node *n_ctrl = _phase->ctrl_or_self(n);
       Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
       if (_phase->is_dominator(n_ctrl, u_ctrl)) {
--- a/src/hotspot/share/opto/loopTransform.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/loopTransform.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -2795,7 +2795,13 @@
              (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
              (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
              (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
-             (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
+             (bol->in(1)->Opcode() == Op_CompareAndSwapN ) ||
+             (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeP ) ||
+             (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeN ) ||
+             (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapP ) ||
+             (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapN ) ||
+             (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapP ) ||
+             (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapN )))
           return;               // Allocation loops RARELY take backedge
         // Find the OTHER exit path from the IF
         Node* ex = iff->proj_out(1-test_con);
--- a/src/hotspot/share/opto/loopnode.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/loopnode.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -3968,7 +3968,7 @@
     }
     while(worklist.size() != 0 && LCA != early) {
       Node* s = worklist.pop();
-      if (s->is_Load() || s->Opcode() == Op_SafePoint ||
+      if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint ||
           (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
         continue;
       } else if (s->is_MergeMem()) {
@@ -4185,7 +4185,17 @@
 //------------------------------build_loop_late_post---------------------------
 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
 // Second pass finds latest legal placement, and ideal loop placement.
-void PhaseIdealLoop::build_loop_late_post( Node *n ) {
+void PhaseIdealLoop::build_loop_late_post(Node *n) {
+  BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+
+  if (bs->build_loop_late_post(this, n)) {
+    return;
+  }
+
+  build_loop_late_post_work(n, true);
+}
+
+void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
 
   if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) {
     _igvn._worklist.push(n);  // Maybe we'll normalize it, if no more loops.
@@ -4206,7 +4216,6 @@
     // _must_ be pinned (they have to observe their control edge of course).
     // Unlike Stores (which modify an unallocable resource, the memory
     // state), Mods/Loads can float around.  So free them up.
-    bool pinned = true;
     switch( n->Opcode() ) {
     case Op_DivI:
     case Op_DivF:
@@ -4503,6 +4512,7 @@
     }
   }
 }
+#endif
 
 // Collect a R-P-O for the whole CFG.
 // Result list is in post-order (scan backwards for RPO)
@@ -4525,7 +4535,6 @@
     }
   }
 }
-#endif
 
 
 //=============================================================================
--- a/src/hotspot/share/opto/loopnode.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/loopnode.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -38,6 +38,8 @@
 class LoopNode;
 class Node;
 class OuterStripMinedLoopEndNode;
+class ShenandoahBarrierNode;
+class ShenandoahWriteBarrierNode;
 class PathFrequency;
 class PhaseIdealLoop;
 class CountedLoopReserveKit;
@@ -636,6 +638,8 @@
   friend class IdealLoopTree;
   friend class SuperWord;
   friend class CountedLoopReserveKit;
+  friend class ShenandoahBarrierNode;
+  friend class ShenandoahWriteBarrierNode;
 
   // Pre-computed def-use info
   PhaseIterGVN &_igvn;
@@ -863,7 +867,8 @@
   // Place Data nodes in some loop nest
   void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
   void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
-  void build_loop_late_post ( Node* n );
+  void build_loop_late_post_work(Node* n, bool pinned);
+  void build_loop_late_post(Node* n);
   void verify_strip_mined_scheduling(Node *n, Node* least);
 
   // Array of immediate dominance info for each CFG node indexed by node idx
@@ -1309,7 +1314,6 @@
 #ifndef PRODUCT
   void dump( ) const;
   void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const;
-  void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const;
   void verify() const;          // Major slow  :-)
   void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const;
   IdealLoopTree *get_loop_idx(Node* n) const {
@@ -1321,6 +1325,7 @@
   static int _loop_invokes;     // Count of PhaseIdealLoop invokes
   static int _loop_work;        // Sum of PhaseIdealLoop x _unique
 #endif
+  void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const;
 };
 
 // This kit may be used for making of a reserved copy of a loop before this loop
--- a/src/hotspot/share/opto/loopopts.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/loopopts.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1024,6 +1024,11 @@
         Node* m = n->fast_out(j);
         if (m->is_FastLock())
           return false;
+#if INCLUDE_SHENANDOAHGC
+        if (m->is_ShenandoahBarrier() && m->has_out_with(Op_FastLock)) {
+          return false;
+        }
+#endif
 #ifdef _LP64
         if (m->Opcode() == Op_ConvI2L)
           return false;
@@ -1310,6 +1315,7 @@
         // control, then the cloning of n is a pointless exercise, because
         // GVN will ensure that we end up where we started.
         if (!n->is_Load() || late_load_ctrl != n_ctrl) {
+          BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
           for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
             Node *u = n->last_out(j); // Clone private computation per use
             _igvn.rehash_node_delayed(u);
@@ -1340,6 +1346,10 @@
             // For inner loop uses get the preheader area.
             x_ctrl = place_near_use(x_ctrl);
 
+            if (bs->sink_node(this, n, x, x_ctrl, n_ctrl)) {
+              continue;
+            }
+
             if (n->is_Load()) {
               // For loads, add a control edge to a CFG node outside of the loop
               // to force them to not combine and return back inside the loop
@@ -3137,7 +3147,7 @@
 
           // if not pinned and not a load (which maybe anti-dependent on a store)
           // and not a CMove (Matcher expects only bool->cmove).
-          if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) {
+          if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove() && n->Opcode() != Op_ShenandoahWBMemProj) {
             cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
             sink_list.push(n);
             peel     >>= n->_idx; // delete n from peel set.
--- a/src/hotspot/share/opto/macro.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/macro.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -47,9 +47,13 @@
 #include "opto/subnode.hpp"
 #include "opto/type.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
 #endif // INCLUDE_G1GC
+#if INCLUDE_SHENANDOAHGC
+#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+#endif
 
 
 //
@@ -629,6 +633,7 @@
                                    k < kmax && can_eliminate; k++) {
           Node* n = use->fast_out(k);
           if (!n->is_Store() && n->Opcode() != Op_CastP2X &&
+              SHENANDOAHGC_ONLY((!UseShenandoahGC || !ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(n)) &&)
               !(n->is_ArrayCopy() &&
                 n->as_ArrayCopy()->is_clonebasic() &&
                 n->in(ArrayCopyNode::Dest) == use)) {
--- a/src/hotspot/share/opto/node.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/node.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -142,6 +142,7 @@
 class RootNode;
 class SafePointNode;
 class SafePointScalarObjectNode;
+class ShenandoahBarrierNode;
 class StartNode;
 class State;
 class StoreNode;
@@ -675,6 +676,7 @@
       DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
         DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
         DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
+      DEFINE_CLASS_ID(ShenandoahBarrier, Type, 7)
 
     DEFINE_CLASS_ID(Proj,  Node, 3)
       DEFINE_CLASS_ID(CatchProj, Proj, 0)
@@ -873,6 +875,7 @@
   DEFINE_CLASS_QUERY(Root)
   DEFINE_CLASS_QUERY(SafePoint)
   DEFINE_CLASS_QUERY(SafePointScalarObject)
+  DEFINE_CLASS_QUERY(ShenandoahBarrier)
   DEFINE_CLASS_QUERY(Start)
   DEFINE_CLASS_QUERY(Store)
   DEFINE_CLASS_QUERY(Sub)
--- a/src/hotspot/share/opto/type.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/type.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -3044,6 +3044,10 @@
   return this;
 }
 
+const TypeOopPtr *TypeOopPtr::cast_to_nonconst() const {
+  return this;
+}
+
 //-----------------------------cast_to_exactness-------------------------------
 const Type *TypeOopPtr::cast_to_exactness(bool klass_is_exact) const {
   // There is no such thing as an exact general oop.
@@ -3546,6 +3550,11 @@
   return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id, _speculative, _inline_depth);
 }
 
+const TypeOopPtr *TypeInstPtr::cast_to_nonconst() const {
+  if (const_oop() == NULL) return this;
+  return make(NotNull, klass(), _klass_is_exact, NULL, _offset, _instance_id, _speculative, _inline_depth);
+}
+
 //------------------------------xmeet_unloaded---------------------------------
 // Compute the MEET of two InstPtrs when at least one is unloaded.
 // Assume classes are different since called after check for same name/class-loader
@@ -4073,6 +4082,12 @@
   return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id, _speculative, _inline_depth);
 }
 
+const TypeOopPtr *TypeAryPtr::cast_to_nonconst() const {
+  if (const_oop() == NULL) return this;
+  return make(NotNull, NULL, _ary, klass(), _klass_is_exact, _offset, _instance_id, _speculative, _inline_depth);
+}
+
+
 //-----------------------------narrow_size_type-------------------------------
 // Local cache for arrayOopDesc::max_array_length(etype),
 // which is kind of slow (and cached elsewhere by other users).
--- a/src/hotspot/share/opto/type.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/opto/type.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1028,6 +1028,8 @@
 
   virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
 
+  virtual const TypeOopPtr *cast_to_nonconst() const;
+
   // corresponding pointer to klass, for a given instance
   const TypeKlassPtr* as_klass_type() const;
 
@@ -1110,6 +1112,8 @@
 
   virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
 
+  virtual const TypeOopPtr *cast_to_nonconst() const;
+
   virtual const TypePtr *add_offset( intptr_t offset ) const;
 
   // Speculative type helper methods.
@@ -1193,6 +1197,8 @@
 
   virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
 
+  virtual const TypeOopPtr *cast_to_nonconst() const;
+
   virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const;
   virtual const TypeInt* narrow_size_type(const TypeInt* size) const;
 
@@ -1770,6 +1776,8 @@
 // UseOptoBiasInlining
 #define XorXNode     XorLNode
 #define StoreXConditionalNode StoreLConditionalNode
+#define LoadXNode    LoadLNode
+#define StoreXNode   StoreLNode
 // Opcodes
 #define Op_LShiftX   Op_LShiftL
 #define Op_AndX      Op_AndL
@@ -1815,6 +1823,8 @@
 // UseOptoBiasInlining
 #define XorXNode     XorINode
 #define StoreXConditionalNode StoreIConditionalNode
+#define LoadXNode    LoadINode
+#define StoreXNode   StoreINode
 // Opcodes
 #define Op_LShiftX   Op_LShiftI
 #define Op_AndX      Op_AndI
--- a/src/hotspot/share/runtime/fieldDescriptor.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/runtime/fieldDescriptor.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -91,6 +91,7 @@
 
   bool is_static()                const    { return access_flags().is_static(); }
   bool is_final()                 const    { return access_flags().is_final(); }
+  bool is_stable()                const    { return access_flags().is_stable(); }
   bool is_volatile()              const    { return access_flags().is_volatile(); }
   bool is_transient()             const    { return access_flags().is_transient(); }
 
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Mon Dec 10 15:47:44 2018 +0100
@@ -234,6 +234,14 @@
 
     def(MonitoringSupport_lock     , PaddedMutex  , native   ,   true,  Monitor::_safepoint_check_never);      // used for serviceability monitoring support
   }
+  if (UseShenandoahGC) {
+    def(SATB_Q_FL_lock             , PaddedMutex  , access,      true,  Monitor::_safepoint_check_never);
+    def(SATB_Q_CBL_mon             , PaddedMonitor, access,      true,  Monitor::_safepoint_check_never);
+    def(Shared_SATB_Q_lock         , PaddedMutex  , access + 1,  true,  Monitor::_safepoint_check_never);
+
+    def(StringDedupQueue_lock      , PaddedMonitor, leaf,        true,  Monitor::_safepoint_check_never);
+    def(StringDedupTable_lock      , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
+  }
   def(ParGCRareEvent_lock          , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_sometimes);
   def(DerivedPointerTableGC_lock   , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
   def(CGCPhaseManager_lock         , PaddedMonitor, leaf,        false, Monitor::_safepoint_check_sometimes);
--- a/src/hotspot/share/runtime/vmOperations.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/runtime/vmOperations.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -98,6 +98,15 @@
   template(HeapIterateOperation)                  \
   template(ReportJavaOutOfMemory)                 \
   template(JFRCheckpoint)                         \
+  template(ShenandoahFullGC)                      \
+  template(ShenandoahInitMark)                    \
+  template(ShenandoahFinalMarkStartEvac)          \
+  template(ShenandoahFinalEvac)                   \
+  template(ShenandoahInitTraversalGC)             \
+  template(ShenandoahFinalTraversalGC)            \
+  template(ShenandoahInitUpdateRefs)              \
+  template(ShenandoahFinalUpdateRefs)             \
+  template(ShenandoahDegeneratedGC)               \
   template(Exit)                                  \
   template(LinuxDllLoad)                          \
   template(RotateGCLog)                           \
--- a/src/hotspot/share/utilities/globalDefinitions.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -69,6 +69,7 @@
 #define UINT64_FORMAT_X        "%" PRIx64
 #define INT64_FORMAT_W(width)  "%" #width PRId64
 #define UINT64_FORMAT_W(width) "%" #width PRIu64
+#define UINT64_FORMAT_X_W(width) "%" #width PRIx64
 
 #define PTR64_FORMAT           "0x%016" PRIx64
 
--- a/src/hotspot/share/utilities/macros.hpp	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/hotspot/share/utilities/macros.hpp	Mon Dec 10 15:47:44 2018 +0100
@@ -221,6 +221,24 @@
 #define NOT_SERIALGC_RETURN_(code) { return code; }
 #endif // INCLUDE_SERIALGC
 
+#ifndef INCLUDE_SHENANDOAHGC
+#define INCLUDE_SHENANDOAHGC 1
+#endif // INCLUDE_SHENANDOAHGC
+
+#if INCLUDE_SHENANDOAHGC
+#define SHENANDOAHGC_ONLY(x) x
+#define SHENANDOAHGC_ONLY_ARG(arg) arg,
+#define NOT_SHENANDOAHGC(x)
+#define NOT_SHENANDOAHGC_RETURN        /* next token must be ; */
+#define NOT_SHENANDOAHGC_RETURN_(code) /* next token must be ; */
+#else
+#define SHENANDOAHGC_ONLY(x)
+#define SHENANDOAHGC_ONLY_ARG(arg)
+#define NOT_SHENANDOAHGC(x) x
+#define NOT_SHENANDOAHGC_RETURN        {}
+#define NOT_SHENANDOAHGC_RETURN_(code) { return code; }
+#endif // INCLUDE_SHENANDOAHGC
+
 #ifndef INCLUDE_ZGC
 #define INCLUDE_ZGC 1
 #endif // INCLUDE_ZGC
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Mon Dec 10 15:47:44 2018 +0100
@@ -36,6 +36,7 @@
 import sun.jvm.hotspot.gc.epsilon.*;
 import sun.jvm.hotspot.gc.parallel.*;
 import sun.jvm.hotspot.gc.shared.*;
+import sun.jvm.hotspot.gc.shenandoah.*;
 import sun.jvm.hotspot.gc.g1.*;
 import sun.jvm.hotspot.gc.z.*;
 import sun.jvm.hotspot.interpreter.*;
@@ -1113,6 +1114,10 @@
                         } else if (collHeap instanceof EpsilonHeap) {
                           anno = "Epsilon ";
                           bad = false;
+                        } else if (collHeap instanceof ShenandoahHeap) {
+                          ShenandoahHeap heap = (ShenandoahHeap) collHeap;
+                          anno = "ShenandoahHeap ";
+                          bad = false;
                         } else if (collHeap instanceof ZCollectedHeap) {
                           ZCollectedHeap heap = (ZCollectedHeap) collHeap;
                           anno = "ZHeap ";
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Mon Dec 10 15:47:44 2018 +0100
@@ -37,6 +37,7 @@
   public static final CollectedHeapName G1 = new CollectedHeapName("G1");
   public static final CollectedHeapName EPSILON = new CollectedHeapName("Epsilon");
   public static final CollectedHeapName Z = new CollectedHeapName("Z");
+  public static final CollectedHeapName SHENANDOAH = new CollectedHeapName("Shenandoah");
 
   public String toString() {
     return name;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java	Mon Dec 10 15:47:44 2018 +0100
@@ -67,6 +67,12 @@
   _z_allocation_stall ("Allocation Stall"),
   _z_proactive ("Proactive"),
 
+  _shenandoah_allocation_failure_evac ("Allocation Failure During Evacuation"),
+  _shenandoah_stop_vm ("Stopping VM"),
+  _shenandoah_concurrent_gc ("Concurrent GC"),
+  _shenandoah_traversal_gc ("Traversal GC"),
+  _shenandoah_upgrade_to_full_gc ("Upgrade To Full GC"),
+
   _last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE");
 
   private final String value;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Mon Dec 10 15:47:44 2018 +0100
@@ -38,6 +38,7 @@
   G1Old ("G1Old"),
   G1Full ("G1Full"),
   Z ("Z"),
+  Shenandoah ("Shenandoah"),
   NA ("N/A"),
   GCNameEndSentinel ("GCNameEndSentinel");
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeap.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.shenandoah;
+
+import sun.jvm.hotspot.gc.shared.CollectedHeap;
+import sun.jvm.hotspot.gc.shared.CollectedHeapName;
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.memory.MemRegion;
+import sun.jvm.hotspot.types.CIntegerField;
+import java.io.PrintStream;
+import java.util.Observable;
+import java.util.Observer;
+
+public class ShenandoahHeap extends CollectedHeap {
+    static private CIntegerField numRegions;
+    static private CIntegerField used;
+    static private CIntegerField committed;
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+            public void update(Observable o, Object data) {
+                initialize(VM.getVM().getTypeDataBase());
+            }
+        });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ShenandoahHeap");
+        numRegions = type.getCIntegerField("_num_regions");
+        used = type.getCIntegerField("_used");
+        committed = type.getCIntegerField("_committed");
+    }
+
+    @Override
+    public CollectedHeapName kind() {
+        return CollectedHeapName.SHENANDOAH;
+    }
+
+    public long numOfRegions() {
+        return numRegions.getValue(addr);
+    }
+
+    @Override
+    public long used() {
+        return used.getValue(addr);
+    }
+
+    public long committed() {
+        return committed.getValue(addr);
+    }
+
+    @Override
+    public void printOn(PrintStream tty) {
+        MemRegion mr = reservedRegion();
+        tty.print("Shenandoah heap");
+        tty.print(" [" + mr.start() + ", " + mr.end() + "]");
+        tty.println(" region size " + ShenandoahHeapRegion.regionSizeBytes() / 1024 + " K");
+    }
+
+    public ShenandoahHeap(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shenandoah/ShenandoahHeapRegion.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.shenandoah;
+
+import sun.jvm.hotspot.gc.shared.ContiguousSpace;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.debugger.Address;
+
+import java.util.Observable;
+import java.util.Observer;
+
+
+public class ShenandoahHeapRegion extends ContiguousSpace {
+    private static CIntegerField RegionSizeBytes;
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+            public void update(Observable o, Object data) {
+                initialize(VM.getVM().getTypeDataBase());
+            }
+        });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ShenandoahHeapRegion");
+        RegionSizeBytes = type.getCIntegerField("RegionSizeBytes");
+    }
+
+    public static long regionSizeBytes() { return RegionSizeBytes.getValue(); }
+
+    public ShenandoahHeapRegion(Address addr) {
+        super(addr);
+    }
+}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Mon Dec 10 15:47:44 2018 +0100
@@ -36,6 +36,7 @@
 import sun.jvm.hotspot.gc.parallel.ParallelScavengeHeap;
 import sun.jvm.hotspot.gc.serial.SerialHeap;
 import sun.jvm.hotspot.gc.shared.CollectedHeap;
+import sun.jvm.hotspot.gc.shenandoah.ShenandoahHeap;
 import sun.jvm.hotspot.gc.z.ZCollectedHeap;
 import sun.jvm.hotspot.oops.Oop;
 import sun.jvm.hotspot.runtime.BasicType;
@@ -100,6 +101,7 @@
     addHeapTypeIfInDB(db, G1CollectedHeap.class);
     addHeapTypeIfInDB(db, EpsilonHeap.class);
     addHeapTypeIfInDB(db, ZCollectedHeap.class);
+    addHeapTypeIfInDB(db, ShenandoahHeap.class);
 
     mainThreadGroupField   = type.getOopField("_main_thread_group");
     systemThreadGroupField = type.getOopField("_system_thread_group");
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Mon Dec 10 15:47:44 2018 +0100
@@ -36,6 +36,7 @@
 import sun.jvm.hotspot.gc.shared.*;
 import sun.jvm.hotspot.gc.epsilon.*;
 import sun.jvm.hotspot.gc.g1.*;
+import sun.jvm.hotspot.gc.shenandoah.*;
 import sun.jvm.hotspot.gc.parallel.*;
 import sun.jvm.hotspot.gc.z.*;
 import sun.jvm.hotspot.memory.*;
@@ -367,6 +368,10 @@
     } else if (heap instanceof G1CollectedHeap) {
         G1CollectedHeap g1h = (G1CollectedHeap) heap;
         g1h.heapRegionIterate(lrc);
+    } else if (heap instanceof ShenandoahHeap) {
+       // Operation (currently) not supported with Shenandoah GC. Print
+       // a warning and leave the list of live regions empty.
+       System.err.println("Warning: Operation not supported with Shenandoah GC");
     } else if (heap instanceof ZCollectedHeap) {
        // Operation (currently) not supported with ZGC. Print
        // a warning and leave the list of live regions empty.
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java	Mon Dec 10 15:47:44 2018 +0100
@@ -54,6 +54,7 @@
   CMS_Final_Remark,
   G1CollectFull,
   ZOperation,
+  ShenandoahOperation,
   G1CollectForAllocation,
   G1IncCollectionPause,
   G1Concurrent,
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Mon Dec 10 15:47:44 2018 +0100
@@ -29,6 +29,7 @@
 import sun.jvm.hotspot.gc.g1.*;
 import sun.jvm.hotspot.gc.parallel.*;
 import sun.jvm.hotspot.gc.serial.*;
+import sun.jvm.hotspot.gc.shenandoah.*;
 import sun.jvm.hotspot.gc.shared.*;
 import sun.jvm.hotspot.gc.z.*;
 import sun.jvm.hotspot.debugger.JVMDebugger;
@@ -83,7 +84,11 @@
       printValMB("MetaspaceSize            = ", getFlagValue("MetaspaceSize", flagMap));
       printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
       printValMB("MaxMetaspaceSize         = ", getFlagValue("MaxMetaspaceSize", flagMap));
-      printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
+      if (heap instanceof ShenandoahHeap) {
+         printValMB("ShenandoahRegionSize     = ", ShenandoahHeapRegion.regionSizeBytes());
+      } else {
+         printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
+      }
 
       System.out.println();
       System.out.println("Heap Usage:");
@@ -126,6 +131,14 @@
          printValMB("used     = ", oldGen.used());
          printValMB("free     = ", oldFree);
          System.out.println(alignment + (double)oldGen.used() * 100.0 / oldGen.capacity() + "% used");
+      } else if (heap instanceof ShenandoahHeap) {
+         ShenandoahHeap sh = (ShenandoahHeap) heap;
+         long num_regions = sh.numOfRegions();
+         System.out.println("Shenandoah Heap:");
+         System.out.println("   regions   = " + num_regions);
+         printValMB("capacity  = ", num_regions * ShenandoahHeapRegion.regionSizeBytes());
+         printValMB("used      = ", sh.used());
+         printValMB("committed = ", sh.committed());
       } else if (heap instanceof EpsilonHeap) {
          EpsilonHeap eh = (EpsilonHeap) heap;
          printSpace(eh.space());
@@ -183,6 +196,14 @@
            return;
        }
 
+       l = getFlagValue("UseShenandoahGC", flagMap);
+       if (l == 1L) {
+           System.out.print("Shenandoah GC ");
+           l = getFlagValue("ParallelGCThreads", flagMap);
+           System.out.println("with " + l + " thread(s)");
+           return;
+       }
+
        System.out.println("Mark Sweep Compact GC");
    }
 
--- a/test/hotspot/jtreg/TEST.ROOT	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/TEST.ROOT	Mon Dec 10 15:47:44 2018 +0100
@@ -46,6 +46,7 @@
     vm.gc.Serial \
     vm.gc.Parallel \
     vm.gc.ConcMarkSweep \
+    vm.gc.Shenandoah \
     vm.gc.Epsilon \
     vm.gc.Z \
     vm.jvmci \
--- a/test/hotspot/jtreg/TEST.groups	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/TEST.groups	Mon Dec 10 15:47:44 2018 +0100
@@ -168,27 +168,34 @@
   :tier1_gc_1 \
   :tier1_gc_2 \
   :tier1_gc_gcold \
-  :tier1_gc_gcbasher
+  :tier1_gc_gcbasher \
+  :tier1_gc_shenandoah
 
 hotspot_not_fast_gc = \
   :hotspot_gc \
   -:tier1_gc
 
 tier1_gc_1 = \
-  gc/epsilon/ \
+  :gc_epsilon \
   gc/g1/ \
   -gc/g1/ihop/TestIHOPErgo.java
   -gc/g1/TestTimelyCompaction.java
 
 tier1_gc_2 = \
   gc/ \
-  -gc/epsilon/ \
+  -:gc_epsilon \
   -gc/g1/ \
   -gc/logging/TestUnifiedLoggingSwitchStress.java \
   -gc/stress \
   -gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \
   -gc/cms/TestMBeanCMS.java \
-  -gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java
+  -gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
+  -gc/shenandoah
+
+gc_epsilon = \
+  gc/epsilon/ \
+  gc/CriticalNativeArgs.java \
+  gc/stress/CriticalNativeStress.java 
 
 tier1_gc_gcold = \
   gc/stress/gcold/TestGCOldWithG1.java \
@@ -201,6 +208,52 @@
   gc/stress/gcbasher/TestGCBasherWithCMS.java \
   gc/stress/gcbasher/TestGCBasherWithSerial.java \
   gc/stress/gcbasher/TestGCBasherWithParallel.java
+  
+tier1_gc_shenandoah = \
+  gc/shenandoah/options/ \
+  gc/shenandoah/compiler/ \
+  gc/shenandoah/mxbeans/ \
+  gc/shenandoah/TestSmallHeap.java \
+  gc/shenandoah/oom/ \
+  gc/CriticalNativeArgs.java
+
+tier2_gc_shenandoah = \
+  runtime/MemberName/MemberNameLeak.java \
+  runtime/CompressedOops/UseCompressedOops.java \
+  gc/TestHumongousReferenceObject.java \
+  gc/TestSystemGC.java \
+  gc/arguments/TestDisableDefaultGC.java \
+  gc/arguments/TestUseCompressedOopsErgo.java \
+  gc/arguments/TestAlignmentToUseLargePages.java \
+  gc/class_unloading/TestClassUnloadingDisabled.java \
+  gc/ergonomics/TestInitialGCThreadLogging.java \
+  gc/ergonomics/TestDynamicNumberOfGCThreads.java \
+  gc/logging/TestGCId.java \
+  gc/metaspace/TestMetaspacePerfCounters.java \
+  gc/metaspace/TestMetaspacePerfCounters.java \
+  gc/startup_warnings/TestShenandoah.java \
+  gc/TestFullGCALot.java \
+  gc/logging/TestUnifiedLoggingSwitchStress.java \
+  runtime/Metaspace/DefineClass.java \
+  gc/shenandoah/ \
+  serviceability/sa/TestHeapDumpForInvokeDynamic.java \
+  -gc/shenandoah/TestStringDedupStress.java \
+  -gc/stress/CriticalNativeStress.java \
+  -:tier1_gc_shenandoah
+
+tier3_gc_shenandoah = \
+  gc/stress/gcold/TestGCOldWithShenandoah.java \
+  gc/stress/gcbasher/TestGCBasherWithShenandoah.java \
+  gc/stress/gclocker/TestGCLockerWithShenandoah.java \
+  gc/stress/systemgc/TestSystemGCWithShenandoah.java \
+  gc/shenandoah/TestStringDedupStress.java \
+  gc/stress/CriticalNativeStress.java \
+  -:tier2_gc_shenandoah
+
+hotspot_gc_shenandoah = \
+  :tier1_gc_shenandoah \
+  :tier2_gc_shenandoah \
+  :tier3_gc_shenandoah
 
 tier1_runtime = \
   runtime/ \
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java	Mon Dec 10 15:47:44 2018 +0100
@@ -39,7 +39,9 @@
  *                    CMS,
  *                    CMSCondMark,
  *                    Serial,
- *                    Parallel}
+ *                    Parallel,
+ *                    Shenandoah,
+ *                    ShenandoahTraversal}
  */
 
 
@@ -100,6 +102,19 @@
             procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC";
             procArgs[argcount - 2] = "-XX:+UseCondCardMark";
             break;
+        case "Shenandoah":
+            argcount = 10;
+            procArgs = new String[argcount];
+            procArgs[argcount - 3] = "-XX:+UnlockExperimentalVMOptions";
+            procArgs[argcount - 2] = "-XX:+UseShenandoahGC";
+            break;
+        case "ShenandoahTraversal":
+            argcount = 11;
+            procArgs = new String[argcount];
+            procArgs[argcount - 4] = "-XX:+UnlockExperimentalVMOptions";
+            procArgs[argcount - 3] = "-XX:+UseShenandoahGC";
+            procArgs[argcount - 2] = "-XX:ShenandoahGCHeuristics=traversal";
+            break;
         default:
             throw new RuntimeException("unexpected test type " + testType);
         }
@@ -355,6 +370,17 @@
                     "ret"
                 };
                 break;
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                 // Shenandoah generates normal object graphs for
+                 // volatile stores
+                matches = new String[] {
+                    "membar_release \\(elided\\)",
+                    useCompressedOops ? "stlrw?" : "stlr",
+                    "membar_volatile \\(elided\\)",
+                    "ret"
+                };
+                break;
             }
         } else {
             switch (testType) {
@@ -418,6 +444,20 @@
                     "ret"
                 };
                 break;
+
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                 // Shenandoah generates normal object graphs for
+                 // volatile stores
+                matches = new String[] {
+                    "membar_release",
+                    "dmb ish",
+                    useCompressedOops ? "strw?" : "str",
+                    "membar_volatile",
+                    "dmb ish",
+                    "ret"
+                };
+                break;
             }
         }
 
@@ -520,6 +560,17 @@
                     "ret"
                 };
                 break;
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                // For volatile CAS, Shenanodoah generates normal
+                // graphs with a shenandoah-specific cmpxchg
+                matches = new String[] {
+                    "membar_release \\(elided\\)",
+                    useCompressedOops ? "cmpxchgw?_acq_shenandoah" : "cmpxchg_acq_shenandoah",
+                    "membar_acquire \\(elided\\)",
+                    "ret"
+                };
+                break;
             }
         } else {
             switch (testType) {
@@ -583,6 +634,19 @@
                     "ret"
                 };
                 break;
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                // For volatile CAS, Shenanodoah generates normal
+                // graphs with a shenandoah-specific cmpxchg
+                matches = new String[] {
+                    "membar_release",
+                    "dmb ish",
+                    useCompressedOops ? "cmpxchgw?_shenandoah" : "cmpxchg_shenandoah",
+                    "membar_acquire",
+                    "dmb ish",
+                    "ret"
+                };
+                break;
             }
         }
 
@@ -701,6 +765,17 @@
                     "ret"
                 };
                 break;
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                // For volatile CAS, Shenanodoah generates normal
+                // graphs with a shenandoah-specific cmpxchg
+                matches = new String[] {
+                    "membar_release \\(elided\\)",
+                    useCompressedOops ? "cmpxchgw?_acq_shenandoah" : "cmpxchg_acq_shenandoah",
+                    "membar_acquire \\(elided\\)",
+                    "ret"
+                };
+                break;
             }
         } else {
             switch (testType) {
@@ -764,6 +839,19 @@
                     "ret"
                 };
                 break;
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                // For volatile CAS, Shenanodoah generates normal
+                // graphs with a shenandoah-specific cmpxchg
+                matches = new String[] {
+                    "membar_release",
+                    "dmb ish",
+                    useCompressedOops ? "cmpxchgw?_shenandoah" : "cmpxchg_shenandoah",
+                    "membar_acquire",
+                    "dmb ish",
+                    "ret"
+                };
+                break;
             }
         }
 
@@ -862,6 +950,15 @@
                     "ret"
                 };
                 break;
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                matches = new String[] {
+                    "membar_release \\(elided\\)",
+                    useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
+                    "membar_acquire \\(elided\\)",
+                    "ret"
+                };
+                break;
             }
         } else {
             switch (testType) {
@@ -925,6 +1022,17 @@
                     "ret"
                 };
                 break;
+            case "Shenandoah":
+            case "ShenandoahTraversal":
+                matches = new String[] {
+                    "membar_release",
+                    "dmb ish",
+                    useCompressedOops ? "atomic_xchgw? " : "atomic_xchg ",
+                    "membar_acquire",
+                    "dmb ish",
+                    "ret"
+                };
+                break;
             }
         }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesShenandoah.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary C2 should use ldar, stlr and ldaxr+stlxr insns for volatile operations
+ * @library /test/lib /
+ *
+ * @modules java.base/jdk.internal.misc
+ *
+ * @requires os.arch=="aarch64" & vm.debug == true &
+ *           vm.flavor == "server" & !vm.graal.enabled &
+ *           vm.gc.Shenandoah
+ *
+ * @build compiler.c2.aarch64.TestVolatiles
+ *        compiler.c2.aarch64.TestVolatileLoad
+ *        compiler.c2.aarch64.TestUnsafeVolatileLoad
+ *        compiler.c2.aarch64.TestVolatileStore
+ *        compiler.c2.aarch64.TestUnsafeVolatileStore
+ *        compiler.c2.aarch64.TestUnsafeVolatileCAS
+ *        compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
+ *        compiler.c2.aarch64.TestUnsafeVolatileCAE
+ *        compiler.c2.aarch64.TestUnsafeVolatileGAS
+ *        compiler.c2.aarch64.TestUnsafeVolatileGAA
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestVolatileLoad Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestVolatileStore Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileLoad Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileStore Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileCAS Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileWeakCAS Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileCAE Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileGAS Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileGAA Shenandoah
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestVolatileLoad ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestVolatileStore ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileLoad ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileStore ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileCAS ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileWeakCAS ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileCAE ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileGAS ShenandoahTraversal
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesShenandoah
+ *      TestUnsafeVolatileGAA ShenandoahTraversal
+ *
+ */
+
+package compiler.c2.aarch64;
+
+public class TestVolatilesShenandoah {
+    public static void main(String args[]) throws Throwable
+    {
+        // delegate work to shared code
+        new TestVolatiles().runtest(args[0], args[1]);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/CriticalNativeArgs.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+/*
+ * @test CriticalNativeStressEpsilon
+ * @key gc
+ * @bug 8199868
+ * @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Epsilon & !vm.graal.enabled
+ * @summary test argument unpacking nmethod wrapper of critical native method
+ * @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx256M -XX:+CriticalJNINatives CriticalNativeArgs
+ */
+
+/*
+ * @test CriticalNativeStressShenandoah
+ * @key gc
+ * @bug 8199868
+ * @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Shenandoah & !vm.graal.enabled
+ * @summary test argument unpacking nmethod wrapper of critical native method
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive    -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive    -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs
+ *
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs
+ *
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC                                                                        -Xcomp -Xmx256M -XX:+CriticalJNINatives CriticalNativeArgs
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=traversal  -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeArgs
+ */
+public class CriticalNativeArgs {
+    static {
+        System.loadLibrary("CriticalNative");
+    }
+
+    static native boolean isNull(int[] a);
+
+    public static void main(String[] args) {
+        int[] arr = new int[2];
+
+        if (isNull(arr)) {
+            throw new RuntimeException("Should not be null");
+        }
+
+        if (!isNull(null)) {
+            throw new RuntimeException("Should be null");
+        }
+    }
+}
--- a/test/hotspot/jtreg/gc/TestFullGCCount.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/TestFullGCCount.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
  * @test TestFullGCCount.java
  * @bug 7072527
  * @summary CMS: JMM GC counters overcount in some cases
- * @requires !(vm.gc.ConcMarkSweep & vm.opt.ExplicitGCInvokesConcurrent == true)
+ * @requires !(vm.gc == "ConcMarkSweep" & vm.opt.ExplicitGCInvokesConcurrent == true)
+ * @comment Shenandoah has "ExplicitGCInvokesConcurrent" on by default
+ * @requires !(vm.gc == "Shenandoah"    & vm.opt.ExplicitGCInvokesConcurrent != false)
  * @modules java.management
  * @run main/othervm -Xlog:gc TestFullGCCount
  */
--- a/test/hotspot/jtreg/gc/TestHumongousReferenceObject.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/TestHumongousReferenceObject.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,16 @@
  * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx128m -XX:+UseG1GC -XX:G1HeapRegionSize=4M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject
  * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx128m -XX:+UseG1GC -XX:G1HeapRegionSize=8M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject
  */
+
+/*
+ * @test TestHumongousReferenceObjectShenandoah
+ * @summary Test that verifies that iteration over large, plain Java objects, that potentially cross region boundaries, with references in them works.
+ * @requires vm.gc.Shenandoah
+ * @bug 8151499 8153734
+ * @modules java.base/jdk.internal.vm.annotation
+ * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahHeapRegionSize=8M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject
+ * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahHeapRegionSize=8M -XX:ContendedPaddingWidth=8192 -XX:+UnlockDiagnosticVMOptions -XX:+ShenandoahVerify TestHumongousReferenceObject
+ */
 public class TestHumongousReferenceObject {
 
     /*
--- a/test/hotspot/jtreg/gc/TestSystemGC.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/TestSystemGC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -45,6 +45,14 @@
  * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
  */
 
+/*
+ * @test TestSystemGCShenandoah
+ * @key gc
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @summary Runs System.gc() with different flags.
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestSystemGC
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
+ */
 public class TestSystemGC {
   public static void main(String args[]) throws Exception {
     System.gc();
--- a/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java	Mon Dec 10 15:47:44 2018 +0100
@@ -48,6 +48,16 @@
  * @run main/othervm -Xms71M -Xmx91M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages
  */
 
+/**
+ * @test TestAlignmentToUseLargePagesShenandoah
+ * @key gc
+ * @bug 8024396
+ * @comment Graal does not support Shenandoah
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @run main/othervm -Xms71M -Xmx91M -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms71M -Xmx91M -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ */
+
 public class TestAlignmentToUseLargePages {
   public static void main(String args[]) throws Exception {
     // nothing to do
--- a/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
                                                                   "-XX:-UseG1GC",
                                                                   "-XX:-UseConcMarkSweepGC",
                                                                   "-XX:+UnlockExperimentalVMOptions",
+                                                                  "-XX:-UseShenandoahGC",
                                                                   "-XX:-UseZGC",
                                                                   "-version");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
--- a/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
  * @test TestMaxMinHeapFreeRatioFlags
  * @key gc
  * @summary Verify that heap size changes according to max and min heap free ratios.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
  * @key gc
  * @bug 8025166
  * @summary Verify that heap devided among generations according to NewRatio
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
  * @key gc
  * @bug 8025166
  * @summary Verify that young gen size conforms values specified by NewSize, MaxNewSize and Xmn options
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
  * @test TestShrinkHeapInSteps
  * @key gc
  * @summary Verify that -XX:-ShrinkHeapInSteps works properly.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
  * @test TestSurvivorRatioFlag
  * @key gc
  * @summary Verify that actual survivor ratio is equal to specified SurvivorRatio value
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java	Mon Dec 10 15:47:44 2018 +0100
@@ -27,7 +27,7 @@
  * @summary Verify that option TargetSurvivorRatio affects survivor space occupancy after minor GC.
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
  * @requires vm.opt.UseJVMCICompiler != true
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java	Mon Dec 10 15:47:44 2018 +0100
@@ -54,6 +54,21 @@
  * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
  */
 
+/*
+ * @test TestUseCompressedOopsErgoShenandoah
+ * @key gc
+ * @bug 8010722
+ * @comment Graal does not support Shenandoah
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management/sun.management
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC
+ */
+
 public class TestUseCompressedOopsErgo {
 
   public static void main(String args[]) throws Exception {
--- a/test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
  * @summary Runs an simple application (GarbageProducer) with various
          combinations of -XX:{+|-}Verify{After|Before}GC flags and checks that
          output contain or doesn't contain expected patterns
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @modules java.base/jdk.internal.misc
  * @modules java.management
  * @library /test/lib
--- a/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java	Mon Dec 10 15:47:44 2018 +0100
@@ -64,6 +64,24 @@
  *                   -XX:-ClassUnloading -XX:+UseConcMarkSweepGC TestClassUnloadingDisabled
  */
 
+/*
+ * @test TestClassUnloadingDisabledShenandoah
+ * @key gc
+ * @bug 8114823
+ * @comment Graal does not support Shenandoah
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @requires vm.opt.ExplicitGCInvokesConcurrent != true
+ * @requires vm.opt.ClassUnloading != true
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ *                   -XX:-ClassUnloading -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestClassUnloadingDisabled
+ */
+
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
--- a/test/hotspot/jtreg/gc/epsilon/CriticalNativeArgs.java	Mon Dec 10 17:34:49 2018 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-
-/*
- * @test CriticalNativeStress
- * @key gc
- * @bug 8199868
- * @requires (os.arch =="x86_64" | os.arch == "amd64") & (vm.bits == "64") & vm.gc.Epsilon & !vm.graal.enabled
- * @summary test argument unpacking nmethod wrapper of critical native method
- * @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx256M -XX:+CriticalJNINatives CriticalNativeArgs
- */
-public class CriticalNativeArgs {
-  static {
-    System.loadLibrary("CriticalNative");
-  }
-
-  static native boolean isNull(int[] a);
-
-  public static void main(String[] args) {
-    int[] arr = new int[2];
-
-    if (isNull(arr)) {
-      throw new RuntimeException("Should not be null");
-    }
-
-    if (!isNull(null)) {
-      throw new RuntimeException("Should be null");
-    }
-  }
-}
-
--- a/test/hotspot/jtreg/gc/epsilon/CriticalNativeStress.java	Mon Dec 10 17:34:49 2018 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,197 +0,0 @@
-/*
- * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-import java.util.Random;
-
-/*
- * @test CriticalNativeStress
- * @key gc
- * @bug 8199868
- * @requires (os.arch =="x86_64" | os.arch == "amd64") & (vm.bits == "64") & vm.gc.Epsilon & !vm.graal.enabled
- * @summary test argument pinning by nmethod wrapper of critical native method
- * @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx1G -XX:+CriticalJNINatives CriticalNativeStress
- */
-public class CriticalNativeStress {
-  private static Random rand = new Random();
-  static {
-    System.loadLibrary("CriticalNative");
-  }
-
-  // CYCLES and THREAD_PER_CASE are used to tune the tests for different GC settings,
-  // so that they can execrise enough GC cycles and not OOM
-  private static int CYCLES = Integer.getInteger("cycles", 3);
-  private static int THREAD_PER_CASE = Integer.getInteger("threadPerCase", 1);
-
-  static native long sum1(long[] a);
-
-  // More than 6 parameters
-  static native long sum2(long a1, int[] a2, int[] a3, long[] a4, int[] a5);
-
-  static long sum(long[] a) {
-    long sum = 0;
-    for (int index = 0; index < a.length; index ++) {
-      sum += a[index];
-    }
-    return sum;
-  }
-
-  static long sum(int[] a) {
-    long sum = 0;
-    for (int index = 0; index < a.length; index ++) {
-      sum += a[index];
-    }
-    return sum;
-  }
-
-  private static volatile String garbage_array[];
-
-  // GC potentially moves arrays passed to critical native methods
-  // if they are not pinned correctly.
-  // Create enough garbages to exercise GC cycles, verify
-  // the arrays are pinned correctly.
-  static void create_garbage(int len) {
-    len = Math.max(len, 1024);
-    String array[] = new String[len];
-    for (int index = 0; index < len; index ++) {
-      array[index] = "String " + index;
-    }
-    garbage_array = array;
-  }
-
-  // Two test cases with different method signatures:
-  // Tests generate arbitrary length of arrays with
-  // arbitrary values, then calcuate sum of the array
-  // elements with critical native JNI methods and java
-  // methods, and compare the results for correctness.
-  static void run_test_case1() {
-    // Create testing arary with arbitrary length and
-    // values
-    int length = rand.nextInt(50) + 1;
-    long[] arr = new long[length];
-    for (int index = 0; index < length; index ++) {
-      arr[index] = rand.nextLong() % 1002;
-    }
-
-    // Generate garbages to trigger GCs
-    for (int index = 0; index < length; index ++) {
-      create_garbage(index);
-    }
-
-    // Compare results for correctness.
-    long native_sum = sum1(arr);
-    long java_sum = sum(arr);
-    if (native_sum != java_sum) {
-      StringBuffer sb = new StringBuffer("Sums do not match: native = ")
-        .append(native_sum).append(" java = ").append(java_sum);
-
-      throw new RuntimeException(sb.toString());
-    }
-  }
-
-  static void run_test_case2() {
-    // Create testing arary with arbitrary length and
-    // values
-    int index;
-    long a1 = rand.nextLong() % 1025;
-
-    int a2_length = rand.nextInt(50) + 1;
-    int[] a2 = new int[a2_length];
-    for (index = 0; index < a2_length; index ++) {
-      a2[index] = rand.nextInt(106);
-    }
-
-    int a3_length = rand.nextInt(150) + 1;
-    int[] a3 = new int[a3_length];
-    for (index = 0; index < a3_length; index ++) {
-      a3[index] = rand.nextInt(3333);
-    }
-
-    int a4_length = rand.nextInt(200) + 1;
-    long[] a4 = new long[a4_length];
-    for (index = 0; index < a4_length; index ++) {
-      a4[index] = rand.nextLong() % 122;
-    }
-
-    int a5_length = rand.nextInt(350) + 1;
-    int[] a5 = new int[a5_length];
-    for (index = 0; index < a5_length; index ++) {
-      a5[index] = rand.nextInt(333);
-    }
-
-    // Generate garbages to trigger GCs
-    for (index = 0; index < a1; index ++) {
-      create_garbage(index);
-    }
-
-    // Compare results for correctness.
-    long native_sum = sum2(a1, a2, a3, a4, a5);
-    long java_sum = a1 + sum(a2) + sum(a3) + sum(a4) + sum(a5);
-    if (native_sum != java_sum) {
-      StringBuffer sb = new StringBuffer("Sums do not match: native = ")
-        .append(native_sum).append(" java = ").append(java_sum);
-
-      throw new RuntimeException(sb.toString());
-    }
-  }
-
-  static class Case1Runner extends Thread {
-    public Case1Runner() {
-      start();
-    }
-
-    public void run() {
-      for (int index = 0; index < CYCLES; index ++) {
-        run_test_case1();
-      }
-    }
-  }
-
-  static class Case2Runner extends Thread {
-    public Case2Runner() {
-      start();
-    }
-
-    public void run() {
-      for (int index = 0; index < CYCLES; index ++) {
-        run_test_case2();
-      }
-    }
-  }
-
-  public static void main(String[] args) {
-    Thread[] thrs = new Thread[THREAD_PER_CASE * 2];
-    for (int index = 0; index < thrs.length; index = index + 2) {
-      thrs[index] = new Case1Runner();
-      thrs[index + 1] = new Case2Runner();
-    }
-
-    for (int index = 0; index < thrs.length; index ++) {
-      try {
-        thrs[index].join();
-      } catch (Exception e) {
-        e.printStackTrace();
-      }
-    }
-  }
-}
-
--- a/test/hotspot/jtreg/gc/epsilon/libCriticalNative.c	Mon Dec 10 17:34:49 2018 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "jni.h"
-
-JNIEXPORT jlong JNICALL JavaCritical_CriticalNativeStress_sum1
-  (jint length, jlong* a) {
-  jlong sum = 0;
-  jint index;
-  for (index = 0; index < length; index ++) {
-    sum += a[index];
-  }
-
-  return sum;
-}
-
-JNIEXPORT jlong JNICALL  JavaCritical_CriticalNativeStress_sum2
-  (jlong a1, jint a2_length, jint* a2, jint a4_length, jint* a4, jint a6_length, jlong* a6, jint a8_length, jint* a8) {
-  jlong sum = a1;
-  jint index;
-  for (index = 0; index < a2_length; index ++) {
-    sum += a2[index];
-  }
-
-  for (index = 0; index < a4_length; index ++) {
-    sum += a4[index];
-  }
-
-  for (index = 0; index < a6_length; index ++) {
-    sum += a6[index];
-  }
-
-  for (index = 0; index < a8_length; index ++) {
-    sum += a8[index];
-  }
-  return sum;
-}
-
-JNIEXPORT jlong JNICALL Java_CriticalNativeStress_sum1
-  (JNIEnv *env, jclass jclazz, jlongArray a) {
-  jlong sum = 0;
-  jsize len = (*env)->GetArrayLength(env, a);
-  jsize index;
-  jlong* arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a, 0);
-  for (index = 0; index < len; index ++) {
-    sum += arr[index];
-  }
-
-  (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0);
-  return sum;
-}
-
-JNIEXPORT jlong JNICALL Java_CriticalNativeStress_sum2
-  (JNIEnv *env, jclass jclazz, jlong a1, jintArray a2, jintArray a3, jlongArray a4, jintArray a5) {
-  jlong sum = a1;
-  jsize index;
-  jsize len;
-  jint* a2_arr;
-  jint* a3_arr;
-  jlong* a4_arr;
-  jint* a5_arr;
-
-  len = (*env)->GetArrayLength(env, a2);
-  a2_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a2, 0);
-  for (index = 0; index < len; index ++) {
-    sum += a2_arr[index];
-  }
-  (*env)->ReleasePrimitiveArrayCritical(env, a2, a2_arr, 0);
-
-  len = (*env)->GetArrayLength(env, a3);
-  a3_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a3, 0);
-  for (index = 0; index < len; index ++) {
-    sum += a3_arr[index];
-  }
-  (*env)->ReleasePrimitiveArrayCritical(env, a3, a3_arr, 0);
-
-  len = (*env)->GetArrayLength(env, a4);
-  a4_arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a4, 0);
-  for (index = 0; index < len; index ++) {
-    sum += a4_arr[index];
-  }
-  (*env)->ReleasePrimitiveArrayCritical(env, a4, a4_arr, 0);
-
-  len = (*env)->GetArrayLength(env, a5);
-  a5_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a5, 0);
-  for (index = 0; index < len; index ++) {
-    sum += a5_arr[index];
-  }
-  (*env)->ReleasePrimitiveArrayCritical(env, a5, a5_arr, 0);
-
-  return sum;
-}
-
-
-JNIEXPORT jboolean JNICALL JavaCritical_CriticalNativeArgs_isNull
-  (jint length, jint* a) {
-  return (a == NULL) && (length == 0);
-}
-
-JNIEXPORT jboolean JNICALL Java_CriticalNativeArgs_isNull
-  (JNIEnv *env, jclass jclazz, jintArray a) {
-  jboolean is_null;
-  jsize len = (*env)->GetArrayLength(env, a);
-  jint* arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a, 0);
-  is_null = (arr == NULL) && (len == 0);
-  (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0);
-  return is_null;
-}
-
--- a/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,14 @@
  * @key gc
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestDynamicNumberOfGCThreads
  */
 
 import jdk.test.lib.process.OutputAnalyzer;
 import jdk.test.lib.process.ProcessTools;
+import sun.hotspot.gc.GC;
 
 public class TestDynamicNumberOfGCThreads {
   public static void main(String[] args) throws Exception {
@@ -42,6 +46,10 @@
     testDynamicNumberOfGCThreads("UseG1GC");
 
     testDynamicNumberOfGCThreads("UseParallelGC");
+
+    if (GC.Shenandoah.isSupported()) {
+        testDynamicNumberOfGCThreads("UseShenandoahGC");
+    }
   }
 
   private static void verifyDynamicNumberOfGCThreads(OutputAnalyzer output) {
@@ -51,7 +59,7 @@
 
   private static void testDynamicNumberOfGCThreads(String gcFlag) throws Exception {
     // UseDynamicNumberOfGCThreads and TraceDynamicGCThreads enabled
-    String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", GCTest.class.getName()};
+    String[] baseArgs = {"-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", GCTest.class.getName()};
 
     // Base test with gc and +UseDynamicNumberOfGCThreads:
     ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(baseArgs);
--- a/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,14 @@
  * @key gc
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestInitialGCThreadLogging
  */
 
 import jdk.test.lib.process.ProcessTools;
 import jdk.test.lib.process.OutputAnalyzer;
+import sun.hotspot.gc.GC;
 
 public class TestInitialGCThreadLogging {
   public static void main(String[] args) throws Exception {
@@ -42,6 +46,10 @@
     testInitialGCThreadLogging("UseG1GC", "GC Thread");
 
     testInitialGCThreadLogging("UseParallelGC", "ParGC Thread");
+
+    if (GC.Shenandoah.isSupported()) {
+        testInitialGCThreadLogging("UseShenandoahGC", "Shenandoah GC Thread");
+    }
   }
 
   private static void verifyDynamicNumberOfGCThreads(OutputAnalyzer output, String threadName) {
@@ -51,7 +59,7 @@
 
   private static void testInitialGCThreadLogging(String gcFlag, String threadName) throws Exception {
     // UseDynamicNumberOfGCThreads and TraceDynamicGCThreads enabled
-    String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", "-version"};
+    String[] baseArgs = {"-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", "-version"};
 
     // Base test with gc and +UseDynamicNumberOfGCThreads:
     ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(baseArgs);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/libCriticalNative.c	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "jni.h"
+
+JNIEXPORT jlong JNICALL JavaCritical_CriticalNativeStress_sum1
+  (jint length, jlong* a) {
+  jlong sum = 0;
+  jint index;
+  for (index = 0; index < length; index ++) {
+    sum += a[index];
+  }
+
+  return sum;
+}
+
+JNIEXPORT jlong JNICALL  JavaCritical_CriticalNativeStress_sum2
+  (jlong a1, jint a2_length, jint* a2, jint a4_length, jint* a4, jint a6_length, jlong* a6, jint a8_length, jint* a8) {
+  jlong sum = a1;
+  jint index;
+  for (index = 0; index < a2_length; index ++) {
+    sum += a2[index];
+  }
+
+  for (index = 0; index < a4_length; index ++) {
+    sum += a4[index];
+  }
+
+  for (index = 0; index < a6_length; index ++) {
+    sum += a6[index];
+  }
+
+  for (index = 0; index < a8_length; index ++) {
+    sum += a8[index];
+  }
+  return sum;
+}
+
+JNIEXPORT jlong JNICALL Java_CriticalNativeStress_sum1
+  (JNIEnv *env, jclass jclazz, jlongArray a) {
+  jlong sum = 0;
+  jsize len = (*env)->GetArrayLength(env, a);
+  jsize index;
+  jlong* arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a, 0);
+  for (index = 0; index < len; index ++) {
+    sum += arr[index];
+  }
+
+  (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0);
+  return sum;
+}
+
+JNIEXPORT jlong JNICALL Java_CriticalNativeStress_sum2
+  (JNIEnv *env, jclass jclazz, jlong a1, jintArray a2, jintArray a3, jlongArray a4, jintArray a5) {
+  jlong sum = a1;
+  jsize index;
+  jsize len;
+  jint* a2_arr;
+  jint* a3_arr;
+  jlong* a4_arr;
+  jint* a5_arr;
+
+  len = (*env)->GetArrayLength(env, a2);
+  a2_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a2, 0);
+  for (index = 0; index < len; index ++) {
+    sum += a2_arr[index];
+  }
+  (*env)->ReleasePrimitiveArrayCritical(env, a2, a2_arr, 0);
+
+  len = (*env)->GetArrayLength(env, a3);
+  a3_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a3, 0);
+  for (index = 0; index < len; index ++) {
+    sum += a3_arr[index];
+  }
+  (*env)->ReleasePrimitiveArrayCritical(env, a3, a3_arr, 0);
+
+  len = (*env)->GetArrayLength(env, a4);
+  a4_arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a4, 0);
+  for (index = 0; index < len; index ++) {
+    sum += a4_arr[index];
+  }
+  (*env)->ReleasePrimitiveArrayCritical(env, a4, a4_arr, 0);
+
+  len = (*env)->GetArrayLength(env, a5);
+  a5_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a5, 0);
+  for (index = 0; index < len; index ++) {
+    sum += a5_arr[index];
+  }
+  (*env)->ReleasePrimitiveArrayCritical(env, a5, a5_arr, 0);
+
+  return sum;
+}
+
+
+JNIEXPORT jboolean JNICALL JavaCritical_CriticalNativeArgs_isNull
+  (jint length, jint* a) {
+  return (a == NULL) && (length == 0);
+}
+
+JNIEXPORT jboolean JNICALL Java_CriticalNativeArgs_isNull
+  (JNIEnv *env, jclass jclazz, jintArray a) {
+  jboolean is_null;
+  jsize len = (*env)->GetArrayLength(env, a);
+  jint* arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a, 0);
+  is_null = (arr == NULL) && (len == 0);
+  (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0);
+  return is_null;
+}
+
--- a/test/hotspot/jtreg/gc/logging/TestGCId.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/logging/TestGCId.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,10 +30,14 @@
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestGCId
  */
 
 import jdk.test.lib.process.OutputAnalyzer;
 import jdk.test.lib.process.ProcessTools;
+import sun.hotspot.gc.GC;
 
 public class TestGCId {
   public static void main(String[] args) throws Exception {
@@ -41,6 +45,9 @@
     testGCId("UseG1GC");
     testGCId("UseConcMarkSweepGC");
     testGCId("UseSerialGC");
+    if (GC.Shenandoah.isSupported()) {
+        testGCId("UseShenandoahGC");
+    }
   }
 
   private static void verifyContainsGCIDs(OutputAnalyzer output) {
@@ -51,7 +58,7 @@
 
   private static void testGCId(String gcFlag) throws Exception {
     ProcessBuilder pb_default =
-      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-Xlog:gc", "-Xmx10M", GCTest.class.getName());
+      ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-Xlog:gc", "-Xmx10M", GCTest.class.getName());
     verifyContainsGCIDs(new OutputAnalyzer(pb_default.start()));
   }
 
--- a/test/hotspot/jtreg/gc/metaspace/TestMetaspacePerfCounters.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/metaspace/TestMetaspacePerfCounters.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,20 @@
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  */
+
+/* @test TestMetaspacePerfCountersShenandoah
+ * @bug 8014659
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib /
+ * @summary Tests that performance counters for metaspace and compressed class
+ *          space exists and works.
+ * @modules java.base/jdk.internal.misc
+ *          java.compiler
+ *          java.management/sun.management
+ *          jdk.internal.jvmstat/sun.jvmstat.monitor
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestMetaspacePerfCounters
+ */
 public class TestMetaspacePerfCounters {
     public static Class fooClass = null;
     private static final String[] counterNames = {"minCapacity", "maxCapacity", "capacity", "used"};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocHumongousFragment.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestAllocHumongousFragment
+ * @summary Make sure Shenandoah can recover from humongous allocation fragmentation
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=passive    -XX:-ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=passive    -XX:+ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=passive    -XX:-ShenandoahDegeneratedGC                           TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=passive    -XX:+ShenandoahDegeneratedGC                           TestAllocHumongousFragment
+ *
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahAllocFailureALot  -XX:+ShenandoahVerify TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahOOMDuringEvacALot                       TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahAllocFailureALot                        TestAllocHumongousFragment
+ *
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestAllocHumongousFragment
+ *
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=adaptive     TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=static       TestAllocHumongousFragment
+ * @run main/othervm -Xlog:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 -XX:ShenandoahGCHeuristics=traversal    TestAllocHumongousFragment
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+
+public class TestAllocHumongousFragment {
+
+    static final long TARGET_MB = Long.getLong("target", 30_000); // 30 Gb allocations
+    static final long LIVE_MB   = Long.getLong("occupancy", 700); // 700 Mb alive
+
+    static volatile Object sink;
+
+    static List<int[]> objects;
+
+    public static void main(String[] args) throws Exception {
+        final int min = 128 * 1024;
+        final int max = 16 * 1024 * 1024;
+        final long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
+
+        objects = new ArrayList<>();
+        long current = 0;
+
+        Random r = new Random();
+        for (long c = 0; c < count; c++) {
+            while (current > LIVE_MB * 1024 * 1024) {
+                int idx = ThreadLocalRandom.current().nextInt(objects.size());
+                int[] remove = objects.remove(idx);
+                current -= remove.length * 4 + 16;
+            }
+
+            int[] newObj = new int[min + r.nextInt(max - min)];
+            current += newObj.length * 4 + 16;
+            objects.add(newObj);
+            sink = new Object();
+
+            System.out.println("Allocated: " + (current / 1024 / 1024) + " Mb");
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestAllocIntArrays
+ * @summary Acceptance tests: collector can withstand allocation
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC                           TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC                           TestAllocIntArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot  -XX:+ShenandoahVerify TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot                       TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot                        TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive                                                          TestAllocIntArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestAllocIntArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive                           TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal                          TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=static                             TestAllocIntArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=compact                            TestAllocIntArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:-UseTLAB                            -XX:+ShenandoahVerify TestAllocIntArrays
+ */
+
+import java.util.Random;
+
+public class TestAllocIntArrays {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        final int min = 0;
+        final int max = 384 * 1024;
+        long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
+
+        Random r = new Random();
+        for (long c = 0; c < count; c++) {
+            sink = new int[min + r.nextInt(max - min)];
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocObjectArrays.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestAllocObjectArrays
+ * @summary Acceptance tests: collector can withstand allocation
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC                           TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC                           TestAllocObjectArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot  -XX:+ShenandoahVerify TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot                       TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot                        TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive                                                          TestAllocObjectArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestAllocObjectArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=static       TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=compact      TestAllocObjectArrays
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    TestAllocObjectArrays
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:-UseTLAB                            -XX:+ShenandoahVerify TestAllocObjectArrays
+ */
+
+import java.util.Random;
+
+public class TestAllocObjectArrays {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        final int min = 0;
+        final int max = 384 * 1024;
+        long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
+
+        Random r = new Random();
+        for (long c = 0; c < count; c++) {
+            sink = new Object[min + r.nextInt(max - min)];
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocObjects.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestAllocObjects
+ * @summary Acceptance tests: collector can withstand allocation
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC     -XX:+ShenandoahVerify TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC                           TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC                           TestAllocObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot  -XX:+ShenandoahVerify TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot                       TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot                        TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive                                                          TestAllocObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestAllocObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=static       TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=compact      TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    TestAllocObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive   -XX:+ShenandoahSuspendibleWorkers TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=static     -XX:+ShenandoahSuspendibleWorkers TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=compact    -XX:+ShenandoahSuspendibleWorkers TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal  -XX:+ShenandoahSuspendibleWorkers TestAllocObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahSuspendibleWorkers TestAllocObjects
+ */
+
+import java.util.Random;
+
+public class TestAllocObjects {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 16;
+        for (long c = 0; c < count; c++) {
+            sink = new Object();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyCheckCast.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestArrayCopyCheckCast
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyCheckCast
+ */
+public class TestArrayCopyCheckCast {
+
+    static class Foo {}
+    static class Bar {}
+
+    public static void main(String[] args) throws Exception {
+        try {
+            Object[] array1 = new Object[1];
+            array1[0] = new Bar();
+            Foo[] array2 = new Foo[1];
+            System.arraycopy(array1, 0, array2, 0, 1);
+            throw new RuntimeException();
+        } catch (ArrayStoreException ex) {
+            // expected
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestArrayCopyStress.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.util.concurrent.*;
+
+/*
+ * @test TestArrayCopyStress
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyStress
+ */
+public class TestArrayCopyStress {
+
+    private static final int ARRAY_SIZE = 1000;
+    private static final int ITERATIONS = 10000;
+
+    static class Foo {
+        int num;
+
+        Foo(int num) {
+            this.num = num;
+        }
+    }
+
+    static class Bar {}
+
+    public static void main(String[] args) throws Exception {
+        for (int i = 0; i < ITERATIONS; i++) {
+            testConjoint();
+        }
+    }
+
+    private static void testConjoint() {
+        Foo[] array = new Foo[ARRAY_SIZE];
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            array[i] = new Foo(i);
+        }
+
+        int src_idx = ThreadLocalRandom.current().nextInt(0, ARRAY_SIZE);
+        int dst_idx = ThreadLocalRandom.current().nextInt(0, ARRAY_SIZE);
+        int len = ThreadLocalRandom.current().nextInt(0, Math.min(ARRAY_SIZE - src_idx, ARRAY_SIZE - dst_idx));
+        System.arraycopy(array, src_idx, array, dst_idx, len);
+
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            if (i >= dst_idx && i < dst_idx + len) {
+                assertEquals(array[i].num, i - (dst_idx - src_idx));
+            } else {
+                assertEquals(array[i].num, i);
+            }
+        }
+    }
+
+    private static void assertEquals(int a, int b) {
+        if (a != b) throw new RuntimeException("assert failed");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestElasticTLAB.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestElasticTLAB
+ * @summary Test that Shenandoah is able to work with elastic TLABs
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:-ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:-ShenandoahElasticTLAB                       TestElasticTLAB
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:+ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:+ShenandoahElasticTLAB                       TestElasticTLAB
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:-ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:-ShenandoahElasticTLAB                       TestElasticTLAB
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:+ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:+ShenandoahElasticTLAB                       TestElasticTLAB
+ */
+
+import java.util.Random;
+
+public class TestElasticTLAB {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        final int min = 0;
+        final int max = 384 * 1024;
+        long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
+
+        Random r = new Random();
+        for (long c = 0; c < count; c++) {
+            sink = new int[min + r.nextInt(max - min)];
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestEvilSyncBug.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestEvilSyncBug
+ * @summary Tests for crash/assert when attaching init thread during shutdown
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver/timeout=480 TestEvilSyncBug
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestEvilSyncBug {
+
+    private static final int NUM_RUNS = 100;
+
+    static Thread[] hooks = new MyHook[10000];
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            test();
+        } else {
+            ExecutorService pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
+
+            Future<?>[] fs = new Future<?>[NUM_RUNS];
+
+            for (int c = 0; c < NUM_RUNS; c++) {
+                Callable<Void> task = () -> {
+                    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xms128m",
+                            "-Xmx128m",
+                            "-XX:+UnlockExperimentalVMOptions",
+                            "-XX:+UnlockDiagnosticVMOptions",
+                            "-XX:+UseShenandoahGC",
+                            "-XX:ShenandoahGCHeuristics=aggressive",
+                            "-XX:+ShenandoahStoreCheck",
+                            "TestEvilSyncBug", "test");
+                    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+                    output.shouldHaveExitValue(0);
+                    return null;
+                };
+                fs[c] = pool.submit(task);
+            }
+
+            for (Future<?> f : fs) {
+                f.get();
+            }
+
+            pool.shutdown();
+            pool.awaitTermination(1, TimeUnit.HOURS);
+        }
+    }
+
+    private static void test() throws Exception {
+
+        for (int t = 0; t < hooks.length; t++) {
+            hooks[t] = new MyHook();
+        }
+
+        ExecutorService service = Executors.newFixedThreadPool(
+                2,
+                r -> {
+                    Thread t = new Thread(r);
+                    t.setDaemon(true);
+                    return t;
+                }
+        );
+
+        List<Future<?>> futures = new ArrayList<>();
+        for (int c = 0; c < 100; c++) {
+            Runtime.getRuntime().addShutdownHook(hooks[c]);
+            final Test[] tests = new Test[1000];
+            for (int t = 0; t < tests.length; t++) {
+                tests[t] = new Test();
+            }
+
+            Future<?> f1 = service.submit(() -> {
+                Runtime.getRuntime().addShutdownHook(new MyHook());
+                IntResult2 r = new IntResult2();
+                for (Test test : tests) {
+                    test.RL_Us(r);
+                }
+            });
+            Future<?> f2 = service.submit(() -> {
+                Runtime.getRuntime().addShutdownHook(new MyHook());
+                for (Test test : tests) {
+                    test.WLI_Us();
+                }
+            });
+
+            futures.add(f1);
+            futures.add(f2);
+        }
+
+        for (Future<?> f : futures) {
+            f.get();
+        }
+    }
+
+    public static class IntResult2 {
+        int r1, r2;
+    }
+
+    public static class Test {
+        final StampedLock lock = new StampedLock();
+
+        int x, y;
+
+        public void RL_Us(IntResult2 r) {
+            StampedLock lock = this.lock;
+            long stamp = lock.readLock();
+            r.r1 = x;
+            r.r2 = y;
+            lock.unlock(stamp);
+        }
+
+        public void WLI_Us() {
+            try {
+                StampedLock lock = this.lock;
+                long stamp = lock.writeLockInterruptibly();
+                x = 1;
+                y = 2;
+                lock.unlock(stamp);
+            } catch (InterruptedException e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    private static class MyHook extends Thread {
+        @Override
+        public void run() {
+            try {
+                Thread.sleep(10);
+            } catch (Exception e) {}
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestGCThreadGroups.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestGCThreadGroups
+ * @summary Test Shenandoah GC uses concurrent/parallel threads correctly
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 -Xmx16m                                         -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-UseDynamicNumberOfGCThreads            -Xmx16m                                         -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -XX:+ForceDynamicNumberOfGCThreads -Xmx16m                   -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 -Xmx16m -XX:ShenandoahGCHeuristics=passive      -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 -Xmx16m -XX:ShenandoahGCHeuristics=adaptive     -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 -Xmx16m -XX:ShenandoahGCHeuristics=static       -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 -Xmx16m -XX:ShenandoahGCHeuristics=compact      -Dtarget=100  TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 -Xmx16m -XX:ShenandoahGCHeuristics=aggressive   -Dtarget=100  TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 -Xmx16m -XX:ShenandoahGCHeuristics=traversal    -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2 -Xmx16m -XX:ShenandoahGCHeuristics=passive      -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2 -Xmx16m -XX:ShenandoahGCHeuristics=adaptive     -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2 -Xmx16m -XX:ShenandoahGCHeuristics=static       -Dtarget=1000 TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2 -Xmx16m -XX:ShenandoahGCHeuristics=compact      -Dtarget=100  TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2 -Xmx16m -XX:ShenandoahGCHeuristics=aggressive   -Dtarget=100  TestGCThreadGroups
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ConcGCThreads=4 -XX:ParallelGCThreads=2 -Xmx16m -XX:ShenandoahGCHeuristics=traversal    -Dtarget=1000 TestGCThreadGroups
+*/
+
+public class TestGCThreadGroups {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation, around 1K cycles to handle
+    static final long STRIDE = 100_000;
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 16;
+        for (long c = 0; c < count; c += STRIDE) {
+            for (long s = 0; s < STRIDE; s++) {
+                sink = new Object();
+            }
+            Thread.sleep(1);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestHeapUncommit.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestHeapUncommit
+ * @summary Acceptance tests: collector can withstand allocation
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=passive                 -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=passive                 -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=passive                 -XX:+ShenandoahDegeneratedGC                       TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=passive                 -XX:-ShenandoahDegeneratedGC                       TestHeapUncommit
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0                                                    -XX:+ShenandoahVerify TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:+UseLargePages                                 -XX:+ShenandoahVerify TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=adaptive                 -XX:+ShenandoahVerify TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=static                   -XX:+ShenandoahVerify TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=traversal                -XX:+ShenandoahVerify TestHeapUncommit
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:-UseTLAB                                                    -XX:+ShenandoahVerify TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:-UseTLAB                                 -XX:+UseLargePages -XX:+ShenandoahVerify TestHeapUncommit
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0                                                    TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0                            -XX:+UseLargePages      TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=adaptive                TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=static                  TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=compact                 TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=aggressive              TestHeapUncommit
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:ShenandoahGCHeuristics=traversal               TestHeapUncommit
+ */
+
+import java.util.Random;
+
+public class TestHeapUncommit {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        final int min = 0;
+        final int max = 384 * 1024;
+        long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
+
+        Random r = new Random();
+        for (long c = 0; c < count; c++) {
+            sink = new int[min + r.nextInt(max - min)];
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestHumongousThreshold
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g                                      -XX:+ShenandoahVerify TestHumongousThreshold
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:ShenandoahHumongousThreshold=50  -XX:+ShenandoahVerify TestHumongousThreshold
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:ShenandoahHumongousThreshold=90  -XX:+ShenandoahVerify TestHumongousThreshold
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:ShenandoahHumongousThreshold=99  -XX:+ShenandoahVerify TestHumongousThreshold
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:ShenandoahHumongousThreshold=100 -XX:+ShenandoahVerify TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:ShenandoahHumongousThreshold=90  -XX:ShenandoahGCHeuristics=aggressive TestHumongousThreshold
+ */
+
+import java.util.Random;
+
+public class TestHumongousThreshold {
+
+    static final long TARGET_MB = Long.getLong("target", 20_000); // 20 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        final int min = 0;
+        final int max = 384 * 1024;
+        long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
+
+        Random r = new Random();
+        for (long c = 0; c < count; c++) {
+            sink = new int[min + r.nextInt(max - min)];
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestLargeObjectAlignment.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestLargeObjectAlignment
+ * @summary Shenandoah crashes with -XX:ObjectAlignmentInBytes=16
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xint                   TestLargeObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:-TieredCompilation  TestLargeObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=1 TestLargeObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=4 TestLargeObjectAlignment
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+public class TestLargeObjectAlignment {
+
+    static final int SLABS_COUNT = Integer.getInteger("slabs", 10000);
+    static final int NODE_COUNT = Integer.getInteger("nodes", 10000);
+    static final long TIME_NS = 1000L * 1000L * Integer.getInteger("timeMs", 5000);
+
+    static Object[] objects;
+
+    public static void main(String[] args) throws Exception {
+        objects = new Object[SLABS_COUNT];
+
+        long start = System.nanoTime();
+        while (System.nanoTime() - start < TIME_NS) {
+            objects[ThreadLocalRandom.current().nextInt(SLABS_COUNT)] = createSome();
+        }
+    }
+
+    public static Object createSome() {
+        List<Integer> result = new ArrayList<Integer>();
+        for (int c = 0; c < NODE_COUNT; c++) {
+            result.add(new Integer(c));
+        }
+        return result;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestLotsOfCycles.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestLotsOfCycles
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=passive      -Dtarget=10000 -XX:+ShenandoahDegeneratedGC     TestLotsOfCycles
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=passive      -Dtarget=10000 -XX:-ShenandoahDegeneratedGC     TestLotsOfCycles
+ *
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=aggressive   -Dtarget=1000  -XX:+ShenandoahOOMDuringEvacALot TestLotsOfCycles
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=aggressive   -Dtarget=1000  -XX:+ShenandoahAllocFailureALot  TestLotsOfCycles
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=aggressive   -Dtarget=1000                                   TestLotsOfCycles
+ *
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=adaptive     -Dtarget=10000 TestLotsOfCycles
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=traversal    -Dtarget=10000 TestLotsOfCycles
+ *
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=static       -Dtarget=10000 TestLotsOfCycles
+ * @run main/othervm/timeout=480 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m -XX:ShenandoahGCHeuristics=compact      -Dtarget=1000  TestLotsOfCycles
+ */
+
+public class TestLotsOfCycles {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation, around 1K cycles to handle
+    static final long STRIDE = 100_000;
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 16;
+        for (long c = 0; c < count; c += STRIDE) {
+            for (long s = 0; s < STRIDE; s++) {
+                sink = new Object();
+            }
+            Thread.sleep(1);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestParallelRefprocSanity.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestParallelRefprocSanity
+ * @summary Test that reference processing works with both parallel and non-parallel variants.
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g                              TestParallelRefprocSanity
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g  -XX:-ParallelRefProcEnabled TestParallelRefprocSanity
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g  -XX:+ParallelRefProcEnabled TestParallelRefprocSanity
+ */
+
+import java.lang.ref.*;
+
+public class TestParallelRefprocSanity {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 32;
+        for (long c = 0; c < count; c++) {
+            sink = new WeakReference<Object>(new Object());
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestPeriodicGC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestPeriodicGC
+ * @summary Test that periodic GC is working
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run driver TestPeriodicGC
+ */
+
+import java.util.*;
+
+import jdk.test.lib.Asserts;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestPeriodicGC {
+
+    public static void testWith(String msg, boolean periodic, String... args) throws Exception {
+        String[] cmds = Arrays.copyOf(args, args.length + 2);
+        cmds[args.length] = TestPeriodicGC.class.getName();
+        cmds[args.length + 1] = "test";
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds);
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        if (periodic && !output.getOutput().contains("Trigger: Time since last GC")) {
+            throw new AssertionError(msg + ": Should have periodic GC in logs");
+        }
+        if (!periodic && output.getOutput().contains("Trigger: Time since last GC")) {
+            throw new AssertionError(msg + ": Should not have periodic GC in logs");
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0 && args[0].equals("test")) {
+            Thread.sleep(5000); // stay idle
+            return;
+        }
+
+        String[] enabled = new String[] {
+                "adaptive",
+                "compact",
+                "static",
+                "traversal",
+        };
+
+        String[] disabled = new String[] {
+                "aggressive",
+                "passive",
+        };
+
+        for (String h : enabled) {
+            testWith("Short period with " + h,
+                    true,
+                    "-Xlog:gc",
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-XX:ShenandoahGuaranteedGCInterval=1000"
+            );
+
+            testWith("Long period with " + h,
+                    false,
+                    "-Xlog:gc",
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-XX:ShenandoahGuaranteedGCInterval=100000" // deliberately too long
+            );
+        }
+
+        for (String h : disabled) {
+            testWith("Short period with " + h,
+                    false,
+                    "-Xlog:gc",
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-XX:ShenandoahGuaranteedGCInterval=1000"
+            );
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestRefprocSanity.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestRefprocSanity
+ * @summary Test that null references/referents work fine
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g                                                             TestRefprocSanity
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:+ShenandoahVerify                                       TestRefprocSanity
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g                       -XX:ShenandoahGCHeuristics=aggressive TestRefprocSanity
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:+ShenandoahVerify -XX:ShenandoahGCHeuristics=traversal  TestRefprocSanity
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g                       -XX:ShenandoahGCHeuristics=traversal  TestRefprocSanity
+ */
+
+import java.lang.ref.*;
+
+public class TestRefprocSanity {
+
+    static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation
+    static final int WINDOW = 10_000;
+
+    static final Reference<MyObject>[] refs = new Reference[WINDOW];
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 32;
+        int rIdx = 0;
+
+        ReferenceQueue rq = new ReferenceQueue();
+
+        for (int c = 0; c < WINDOW; c++) {
+            refs[c] = select(c, new MyObject(c), rq);
+        }
+
+        for (int c = 0; c < count; c++) {
+            verifyRefAt(rIdx);
+            refs[rIdx] = select(c, new MyObject(rIdx), rq);
+
+            rIdx++;
+            if (rIdx >= WINDOW) {
+                rIdx = 0;
+            }
+            while (rq.poll() != null); // drain
+        }
+    }
+
+    static Reference<MyObject> select(int v, MyObject ext, ReferenceQueue rq) {
+        switch (v % 10) {
+            case 0:  return new SoftReference<MyObject>(null);
+            case 1:  return new SoftReference<MyObject>(null, rq);
+            case 2:  return new SoftReference<MyObject>(ext);
+            case 3:  return new SoftReference<MyObject>(ext, rq);
+            case 4:  return new WeakReference<MyObject>(null);
+            case 5:  return new WeakReference<MyObject>(null, rq);
+            case 6:  return new WeakReference<MyObject>(ext);
+            case 7:  return new WeakReference<MyObject>(ext, rq);
+            case 8:  return new PhantomReference<MyObject>(null, rq);
+            case 9:  return new PhantomReference<MyObject>(ext, rq);
+            default: throw new IllegalStateException();
+        }
+    }
+
+    static void verifyRefAt(int idx) {
+        Reference<MyObject> ref = refs[idx];
+        MyObject mo = ref.get();
+        if (mo != null && mo.x != idx) {
+            throw new IllegalStateException("Referent tag is incorrect: " + mo.x + ", should be " + idx);
+        }
+    }
+
+    static class MyObject {
+        final int x;
+
+        public MyObject(int x) {
+            this.x = x;
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestRegionSampling.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestRegionSampling
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g                                         -XX:+ShenandoahRegionSampling TestRegionSampling
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahRegionSampling TestRegionSampling
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahRegionSampling TestRegionSampling
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=static       -XX:+ShenandoahRegionSampling TestRegionSampling
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=compact      -XX:+ShenandoahRegionSampling TestRegionSampling
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahRegionSampling TestRegionSampling
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahRegionSampling TestRegionSampling
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahRegionSampling TestRegionSampling
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahRegionSampling TestRegionSampling
+ */
+
+public class TestRegionSampling {
+
+    static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 16;
+        for (long c = 0; c < count; c++) {
+            sink = new Object();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestRetainObjects.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestRetainObjects
+ * @summary Acceptance tests: collector can deal with retained objects
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC                       TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC                       TestRetainObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot  TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive                                    TestRetainObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestRetainObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=static       TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=compact      TestRetainObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    TestRetainObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:-UseTLAB                            -XX:+ShenandoahVerify TestRetainObjects
+ */
+
+public class TestRetainObjects {
+
+    static final int COUNT = 10_000_000;
+    static final int WINDOW = 10_000;
+
+    static final String[] reachable = new String[WINDOW];
+
+    public static void main(String[] args) throws Exception {
+        int rIdx = 0;
+        for (int c = 0; c < COUNT; c++) {
+            reachable[rIdx] = ("LargeString" + c);
+            rIdx++;
+            if (rIdx >= WINDOW) {
+                rIdx = 0;
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestSieveObjects
+ * @summary Acceptance tests: collector can deal with retained objects
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC                       TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC                       TestSieveObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahOOMDuringEvacALot TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive   -XX:+ShenandoahAllocFailureALot  TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=aggressive                                    TestSieveObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestSieveObjects
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=adaptive     TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=static       TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=compact      TestSieveObjects
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:ShenandoahGCHeuristics=traversal    TestSieveObjects
+ *
+ * @run main/othervm/timeout=240 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:-UseTLAB                -XX:+ShenandoahVerify TestSieveObjects
+ */
+
+import java.util.concurrent.ThreadLocalRandom;
+
+public class TestSieveObjects {
+
+    static final int COUNT = 100_000_000;
+    static final int WINDOW = 1_000_000;
+    static final int PAYLOAD = 100;
+
+    static final MyObject[] arr = new MyObject[WINDOW];
+
+    public static void main(String[] args) throws Exception {
+        int rIdx = 0;
+        for (int c = 0; c < COUNT; c++) {
+            MyObject v = arr[rIdx];
+            if (v != null) {
+                if (v.x != rIdx) {
+                    throw new IllegalStateException("Illegal value at index " + rIdx + ": " + v.x);
+                }
+                if (ThreadLocalRandom.current().nextInt(1000) > 100) {
+                    arr[rIdx] = null;
+                }
+            } else {
+                if (ThreadLocalRandom.current().nextInt(1000) > 500) {
+                    arr[rIdx] = new MyObject(rIdx);
+                }
+            }
+            rIdx++;
+            if (rIdx >= WINDOW) {
+                rIdx = 0;
+            }
+        }
+    }
+
+    public static class MyObject {
+        public int x;
+        public byte[] payload;
+
+        public MyObject(int x) {
+            this.x = x;
+            this.payload = new byte[PAYLOAD];
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestSmallHeap.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestSmallHeap
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC         TestSmallHeap
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx64m TestSmallHeap
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx32m TestSmallHeap
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m TestSmallHeap
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx8m  TestSmallHeap
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx4m  TestSmallHeap
+ */
+
+public class TestSmallHeap {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Hello World!");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestStringDedup.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+ /*
+ * @test TestStringDedup
+ * @summary Test Shenandoah string deduplication implementation
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc:open
+ * @modules java.base/java.lang:open
+ *          java.management
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC -XX:+UseStringDeduplication -Xmx256M -Xlog:gc+stats TestStringDedup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC -XX:+UseStringDeduplication -Xmx256M -Xlog:gc+stats TestStringDedup
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive   -XX:+UseStringDeduplication -Xmx256M -Xlog:gc+stats TestStringDedup
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC                                         -XX:+UseStringDeduplication -Xmx256M -Xlog:gc+stats TestStringDedup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal    -XX:+UseStringDeduplication -Xmx256M -Xlog:gc+stats TestStringDedup
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact      -XX:+UseStringDeduplication -Xmx256M -Xlog:gc+stats TestStringDedup
+ */
+
+import java.lang.reflect.*;
+import java.util.*;
+
+import sun.misc.*;
+
+public class TestStringDedup {
+    private static Field valueField;
+    private static Unsafe unsafe;
+
+    private static final int UniqueStrings = 20;
+
+    static {
+        try {
+            Field field = Unsafe.class.getDeclaredField("theUnsafe");
+            field.setAccessible(true);
+            unsafe = (Unsafe) field.get(null);
+
+            valueField = String.class.getDeclaredField("value");
+            valueField.setAccessible(true);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static Object getValue(String string) {
+        try {
+            return valueField.get(string);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    static class StringAndId {
+        private String str;
+        private int id;
+
+        public StringAndId(String str, int id) {
+            this.str = str;
+            this.id = id;
+        }
+
+        public String str() {
+            return str;
+        }
+
+        public int id() {
+            return id;
+        }
+    }
+
+    private static void generateStrings(ArrayList<StringAndId> strs, int unique_strs) {
+        Random rn = new Random();
+        for (int u = 0; u < unique_strs; u++) {
+            int n = rn.nextInt() % 10;
+            n = Math.max(n, 2);
+            for (int index = 0; index < n; index++) {
+                strs.add(new StringAndId("Unique String " + u, u));
+            }
+        }
+    }
+
+    private static int verifyDedepString(ArrayList<StringAndId> strs) {
+        HashMap<Object, StringAndId> seen = new HashMap<>();
+        int total = 0;
+        int dedup = 0;
+
+        for (StringAndId item : strs) {
+            total++;
+            StringAndId existing_item = seen.get(getValue(item.str()));
+            if (existing_item == null) {
+                seen.put(getValue(item.str()), item);
+            } else {
+                if (item.id() != existing_item.id() ||
+                        !item.str().equals(existing_item.str())) {
+                    System.out.println("StringDedup error:");
+                    System.out.println("String: " + item.str() + " != " + existing_item.str());
+                    throw new RuntimeException("StringDedup Test failed");
+                } else {
+                    dedup++;
+                }
+            }
+        }
+        System.out.println("Dedup: " + dedup + "/" + total + " unique: " + (total - dedup));
+        return (total - dedup);
+    }
+
+    public static void main(String[] args) {
+        ArrayList<StringAndId> astrs = new ArrayList<>();
+        generateStrings(astrs, UniqueStrings);
+        System.gc();
+        System.gc();
+        System.gc();
+        System.gc();
+        System.gc();
+
+        if (verifyDedepString(astrs) != UniqueStrings) {
+            // Can not guarantee all strings are deduplicated, there can
+            // still have pending items in queues.
+            System.out.println("Not all strings are deduplicated");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestStringDedupStress.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+ /*
+ * @test TestStringDedupStress
+ * @summary Test Shenandoah string deduplication implementation
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc:open
+ * @modules java.base/java.lang:open
+ *          java.management
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -DtargetStrings=3000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=aggressive -DtargetStrings=2000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahOOMDuringEvacALot -DtargetStrings=2000000
+ *                    TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=static -DtargetStrings=4000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=compact
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=passive -XX:+ShenandoahDegeneratedGC -DtargetOverwrites=40000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=passive -XX:-ShenandoahDegeneratedGC -DtargetOverwrites=40000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=traversal
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahUpdateRefsEarly=off -DtargetStrings=3000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=compact -XX:ShenandoahUpdateRefsEarly=off -DtargetStrings=2000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=aggressive -XX:ShenandoahUpdateRefsEarly=off -DtargetStrings=2000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=static -XX:ShenandoahUpdateRefsEarly=off -DtargetOverwrites=4000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=aggressive -XX:ShenandoahUpdateRefsEarly=off -XX:+ShenandoahOOMDuringEvacALot -DtargetStrings=2000000
+ *                   TestStringDedupStress
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseStringDeduplication -Xmx512M -Xlog:gc+stats
+ *                   -XX:ShenandoahGCHeuristics=traversal -XX:+ShenandoahOOMDuringEvacALot -DtargetStrings=2000000
+ *                   TestStringDedupStress
+ */
+
+import java.lang.management.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+import sun.misc.*;
+
+public class TestStringDedupStress {
+    private static Field valueField;
+    private static Unsafe unsafe;
+
+    private static long TARGET_STRINGS = Long.getLong("targetStrings", 2_500_000);
+    private static long TARGET_OVERWRITES = Long.getLong("targetOverwrites", 600_000);
+    private static final long MAX_REWRITE_GC_CYCLES = 6;
+
+    private static final int UNIQUE_STRINGS = 20;
+
+    static {
+        try {
+            Field field = Unsafe.class.getDeclaredField("theUnsafe");
+            field.setAccessible(true);
+            unsafe = (Unsafe) field.get(null);
+
+            valueField = String.class.getDeclaredField("value");
+            valueField.setAccessible(true);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static Object getValue(String string) {
+        try {
+            return valueField.get(string);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    static class StringAndId {
+        private String str;
+        private int id;
+
+        public StringAndId(String str, int id) {
+            this.str = str;
+            this.id = id;
+        }
+
+        public String str() {
+            return str;
+        }
+
+        public int id() {
+            return id;
+        }
+    }
+
+    // Generate uniqueStrings number of strings
+    private static void generateStrings(ArrayList<StringAndId> strs, int uniqueStrings) {
+        Random rn = new Random();
+        for (int u = 0; u < uniqueStrings; u++) {
+            int n = rn.nextInt(uniqueStrings);
+            strs.add(new StringAndId("Unique String " + n, n));
+        }
+    }
+
+    private static int verifyDedepString(ArrayList<StringAndId> strs) {
+        HashMap<Object, StringAndId> seen = new HashMap<>();
+        int total = 0;
+        int dedup = 0;
+
+        for (StringAndId item : strs) {
+            total++;
+            StringAndId existingItem = seen.get(getValue(item.str()));
+            if (existingItem == null) {
+                seen.put(getValue(item.str()), item);
+            } else {
+                if (item.id() != existingItem.id() ||
+                        !item.str().equals(existingItem.str())) {
+                    System.out.println("StringDedup error:");
+                    System.out.println("id: " + item.id() + " != " + existingItem.id());
+                    System.out.println("or String: " + item.str() + " != " + existingItem.str());
+                    throw new RuntimeException("StringDedup Test failed");
+                } else {
+                    dedup++;
+                }
+            }
+        }
+        System.out.println("Dedup: " + dedup + "/" + total + " unique: " + (total - dedup));
+        return (total - dedup);
+    }
+
+    static volatile ArrayList<StringAndId> astrs = new ArrayList<>();
+    static GarbageCollectorMXBean gcCycleMBean;
+
+    public static void main(String[] args) {
+        Random rn = new Random();
+
+        for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) {
+            if ("Shenandoah Cycles".equals(bean.getName())) {
+                gcCycleMBean = bean;
+                break;
+            }
+        }
+
+        if (gcCycleMBean == null) {
+            throw new RuntimeException("Can not find Shenandoah GC cycle mbean");
+        }
+
+        // Generate roughly TARGET_STRINGS strings, only UNIQUE_STRINGS are unique
+        long genIters = TARGET_STRINGS / UNIQUE_STRINGS;
+        for (long index = 0; index < genIters; index++) {
+            generateStrings(astrs, UNIQUE_STRINGS);
+        }
+
+        long cycleBeforeRewrite = gcCycleMBean.getCollectionCount();
+
+        for (long loop = 1; loop < TARGET_OVERWRITES; loop++) {
+            int arrSize = astrs.size();
+            int index = rn.nextInt(arrSize);
+            StringAndId item = astrs.get(index);
+            int n = rn.nextInt(UNIQUE_STRINGS);
+            item.str = "Unique String " + n;
+            item.id = n;
+
+            if (loop % 1000 == 0) {
+                // enough GC cycles for rewritten strings to be deduplicated
+                if (gcCycleMBean.getCollectionCount() - cycleBeforeRewrite >= MAX_REWRITE_GC_CYCLES) {
+                    break;
+                }
+            }
+        }
+        verifyDedepString(astrs);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestStringInternCleanup.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestStringInternCleanup
+ * @summary Check that Shenandoah cleans up interned strings
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestStringInternCleanup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestStringInternCleanup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC                       TestStringInternCleanup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC                       TestStringInternCleanup
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=aggressive                         TestStringInternCleanup
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestStringInternCleanup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestStringInternCleanup
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=adaptive                           TestStringInternCleanup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=static                             TestStringInternCleanup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=compact                            TestStringInternCleanup
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ClassUnloadingWithConcurrentMark -Xmx64m -XX:ShenandoahGCHeuristics=traversal                          TestStringInternCleanup
+ *
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-ClassUnloadingWithConcurrentMark -Xmx64m                                                               TestStringInternCleanup
+ */
+
+public class TestStringInternCleanup {
+
+    static final int COUNT = 1_000_000;
+    static final int WINDOW = 1_000;
+
+    static final String[] reachable = new String[WINDOW];
+
+    public static void main(String[] args) throws Exception {
+        int rIdx = 0;
+        for (int c = 0; c < COUNT; c++) {
+            reachable[rIdx] = ("LargeInternedString" + c).intern();
+            rIdx++;
+            if (rIdx >= WINDOW) {
+                rIdx = 0;
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestVerifyJCStress.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestVerifyJCStress
+ * @summary Tests that we pass at least one jcstress-like test with all verification turned on
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main/othervm  -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions
+ *                    -XX:+UseShenandoahGC -Xmx1g -Xms1g
+ *                    -XX:+ShenandoahStoreCheck -XX:+ShenandoahVerify -XX:+VerifyObjectEquals
+ *                    -XX:ShenandoahGCHeuristics=passive -XX:+ShenandoahDegeneratedGC
+ *                    TestVerifyJCStress
+ *
+ * @run main/othervm  -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions
+ *                    -XX:+UseShenandoahGC -Xmx1g -Xms1g
+ *                    -XX:+ShenandoahStoreCheck -XX:+ShenandoahVerify -XX:+VerifyObjectEquals
+ *                    -XX:ShenandoahGCHeuristics=passive -XX:-ShenandoahDegeneratedGC
+ *                    TestVerifyJCStress
+ *
+ * @run main/othervm  -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions
+ *                    -XX:+UseShenandoahGC -Xmx1g -Xms1g
+ *                    -XX:+ShenandoahStoreCheck -XX:+ShenandoahVerify -XX:+VerifyObjectEquals -XX:+ShenandoahVerifyOptoBarriers
+ *                    -XX:ShenandoahGCHeuristics=adaptive
+ *                    TestVerifyJCStress
+ *
+ * @run main/othervm  -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions
+ *                    -XX:+UseShenandoahGC -Xmx1g -Xms1g
+ *                    -XX:+ShenandoahStoreCheck -XX:+ShenandoahVerify -XX:+VerifyObjectEquals -XX:+ShenandoahVerifyOptoBarriers
+ *                    -XX:ShenandoahGCHeuristics=static
+ *                    TestVerifyJCStress
+ *
+ * @run main/othervm  -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions
+ *                    -XX:+UseShenandoahGC -Xmx1g -Xms1g
+ *                    -XX:+ShenandoahStoreCheck -XX:+ShenandoahVerify -XX:+VerifyObjectEquals -XX:+ShenandoahVerifyOptoBarriers
+ *                    -XX:ShenandoahGCHeuristics=traversal
+ *                    TestVerifyJCStress
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+
+public class TestVerifyJCStress {
+
+    public static void main(String[] args) throws Exception {
+        ExecutorService service = Executors.newFixedThreadPool(
+                2,
+                r -> {
+                    Thread t = new Thread(r);
+                    t.setDaemon(true);
+                    return t;
+                }
+        );
+
+        for (int c = 0; c < 10000; c++) {
+            final Test[] tests = new Test[10000];
+            for (int t = 0; t < tests.length; t++) {
+                tests[t] = new Test();
+            }
+
+            Future<?> f1 = service.submit(() -> {
+                IntResult2 r = new IntResult2();
+                for (Test test : tests) {
+                    test.RL_Us(r);
+                }
+            });
+            Future<?> f2 = service.submit(() -> {
+                for (Test test : tests) {
+                    test.WLI_Us();
+                }
+            });
+
+            f1.get();
+            f2.get();
+        }
+    }
+
+    public static class IntResult2 {
+        int r1, r2;
+    }
+
+    public static class Test {
+        final StampedLock lock = new StampedLock();
+
+        int x, y;
+
+        public void RL_Us(IntResult2 r) {
+            StampedLock lock = this.lock;
+            long stamp = lock.readLock();
+            r.r1 = x;
+            r.r2 = y;
+            lock.unlock(stamp);
+        }
+
+        public void WLI_Us() {
+            try {
+                StampedLock lock = this.lock;
+                long stamp = lock.writeLockInterruptibly();
+                x = 1;
+                y = 2;
+                lock.unlock(stamp);
+            } catch (InterruptedException e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestVerifyLevels.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestVerifyLevels
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=0 TestVerifyLevels
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=1 TestVerifyLevels
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=2 TestVerifyLevels
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=3 TestVerifyLevels
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=4 TestVerifyLevels
+ */
+
+public class TestVerifyLevels {
+
+    static final long TARGET_MB = Long.getLong("target", 1_000); // 1 Gb allocation
+
+    static Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 16;
+        for (long c = 0; c < count; c++) {
+            sink = new Object();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestWithLogLevel.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+ /*
+ * @test TestWithLogLevel
+ * @summary Test Shenandoah with different log levels
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=error   TestWithLogLevel
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=warning TestWithLogLevel
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=info    TestWithLogLevel
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=debug   TestWithLogLevel
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -Xlog:gc*=trace   TestWithLogLevel
+ */
+
+import java.util.*;
+
+public class TestWithLogLevel {
+    public static void main(String[] args) {
+        ArrayList<Object> list = new ArrayList<>();
+        long count = 300 * 1024 * 1024 / 16; // 300MB allocation
+        for (long index = 0; index < count; index++) {
+            Object sink = new Object();
+            list.add(sink);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/TestWrongArrayMember.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestWrongArrayMember
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC                                      TestWrongArrayMember
+ * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal TestWrongArrayMember
+ */
+
+public class TestWrongArrayMember {
+    public static void main(String... args) throws Exception {
+        Object[] src = new Object[3];
+        src[0] = new Integer(0);
+        src[1] = new Object();
+        src[2] = new Object();
+        Object[] dst = new Integer[3];
+        dst[0] = new Integer(1);
+        dst[1] = new Integer(2);
+        dst[2] = new Integer(3);
+        try {
+            System.arraycopy(src, 0, dst, 0, 3);
+            throw new RuntimeException("Expected ArrayStoreException");
+        } catch (ArrayStoreException e) {
+            if (src[0] != dst[0]) {
+                throw new RuntimeException("First element not copied");
+            } else if (src[1] == dst[1] || src[2] == dst[2]) {
+                throw new RuntimeException("Second and third elements are affected");
+            } else {
+                return; // Passed!
+            }
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestC1ArrayCopyNPE.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestC1ArrayCopyNPE
+ * @summary test C1 arraycopy intrinsic
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm -XX:TieredStopAtLevel=1 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestC1ArrayCopyNPE
+ */
+
+public class TestC1ArrayCopyNPE {
+
+    private static final int NUM_RUNS = 10000;
+    private static final int ARRAY_SIZE = 10000;
+    private static int[] a;
+    private static int[] b;
+
+    public static void main(String[] args) {
+        a = null;
+        b = new int[ARRAY_SIZE];
+        for (int i = 0; i < NUM_RUNS; i++) {
+            test();
+        }
+        a = new int[ARRAY_SIZE];
+        b = null;
+        for (int i = 0; i < NUM_RUNS; i++) {
+            test();
+        }
+    }
+
+    private static void test() {
+        try {
+            System.arraycopy(a, 0, b, 0, ARRAY_SIZE);
+            throw new RuntimeException("test failed");
+        } catch (NullPointerException ex) {
+            // Ok
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestC1VectorizedMismatch.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestC1VectorizedMismatch
+ * @summary test C1 vectorized mismatch intrinsic
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm -XX:TieredStopAtLevel=1 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestC1VectorizedMismatch
+ */
+
+import java.util.Arrays;
+
+public class TestC1VectorizedMismatch {
+
+    private static final int NUM_RUNS = 10000;
+    private static final int ARRAY_SIZE = 10000;
+    private static int[] a;
+    private static int[] b;
+
+    public static void main(String[] args) {
+        a = new int[ARRAY_SIZE];
+        b = new int[ARRAY_SIZE];
+        for (int i = 0; i < NUM_RUNS; i++) {
+            test();
+        }
+    }
+
+    private static void test() {
+        int[] a1 = new int[ARRAY_SIZE];
+        int[] b1 = new int[ARRAY_SIZE];
+        fillArray(a);
+        System.arraycopy(a, 0, b, 0, ARRAY_SIZE);
+        if (!Arrays.equals(a, b)) {
+            throw new RuntimeException("arrays not equal");
+        }
+    }
+
+    private static void fillArray(int[] array) {
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            int val = (int) (Math.random() * Integer.MAX_VALUE);
+            array[i] = val;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestCommonGCLoads.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestCommonGCLoads
+ * @summary Test GC state load commoning works
+ * @key gc
+ * @requires vm.flavor == "server"
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation
+ *                   -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC
+ *                   -XX:-ShenandoahCommonGCStateLoads
+ *                   TestCommonGCLoads
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation
+ *                   -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC
+ *                   -XX:+ShenandoahCommonGCStateLoads
+ *                   TestCommonGCLoads
+ */
+
+public class TestCommonGCLoads {
+
+    static Object d = new Object();
+
+    static Target t1 = new Target();
+    static Target t2 = new Target();
+    static Target t3 = new Target();
+    static Target t4 = new Target();
+    static Target t5 = new Target();
+
+    static void test() {
+        t1.field = d;
+        t2.field = d;
+        t3.field = d;
+        t4.field = d;
+        t5.field = d;
+    }
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 100_000; i++) {
+            test();
+        }
+    }
+
+    static class Target {
+        Object field;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestExpandedWBLostNullCheckDep.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestExpandedWBLostNullCheckDep
+ * @summary Logic that moves a null check in the expanded barrier may cause a memory access that doesn't depend on the barrier to bypass the null check
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @requires vm.flavor == "server"
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation
+ *                   -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC
+ *                   -XX:+StressGCM -XX:+StressLCM TestExpandedWBLostNullCheckDep
+ */
+
+public class TestExpandedWBLostNullCheckDep {
+
+    static void test(int i, int[] arr) {
+        // arr.length depends on a null check for arr
+        if (i < 0 || i >= arr.length) {
+        }
+        // The write barrier here also depends on the null check. The
+        // null check is moved in the barrier to enable implicit null
+        // checks. The null check must not be moved arr.length
+        arr[i] = 0x42;
+    }
+
+    static public void main(String[] args) {
+        int[] int_arr = new int[10];
+        for (int i = 0; i < 20000; i++) {
+            test(0, int_arr);
+        }
+        try {
+            test(0, null);
+        } catch (NullPointerException npe) {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestMaybeNullUnsafeAccess.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestMaybeNullUnsafeAccess
+ * @summary cast before unsafe access moved in dominating null check null path causes crash
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @modules java.base/jdk.internal.misc:+open
+ *
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:-TieredCompilation TestMaybeNullUnsafeAccess
+ *
+ */
+
+import jdk.internal.misc.Unsafe;
+
+import java.lang.reflect.Field;
+
+public class TestMaybeNullUnsafeAccess {
+
+    static final jdk.internal.misc.Unsafe UNSAFE = Unsafe.getUnsafe();
+    static final long F_OFFSET;
+
+    static class A {
+        int f;
+    }
+
+    static {
+        try {
+            Field fField = A.class.getDeclaredField("f");
+            F_OFFSET = UNSAFE.objectFieldOffset(fField);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    static A test_helper(Object o) {
+        return (A) o;
+    }
+
+    static int test(Object o) {
+        int f = 0;
+        for (int i = 0; i < 100; i++) {
+            A a = test_helper(o);
+            f = UNSAFE.getInt(a, F_OFFSET);
+        }
+        return f;
+    }
+
+    static public void main(String[] args) {
+        A a = new A();
+        for (int i = 0; i < 20000; i++) {
+            test_helper(null);
+            test_helper(a);
+            test(a);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestNullCheck.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestNullCheck
+ * @summary implicit null check on brooks pointer must not cause crash
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation
+ *                   -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC
+ *                   -Xmx4G -XX:HeapBaseMinAddress=0x800000000 TestNullCheck
+ */
+
+// HeapBaseMinAddress above forces compressed oops with a base
+
+public class TestNullCheck {
+
+    int f;
+
+    static int test1(TestNullCheck o) {
+        return o.f;
+    }
+
+    static TestNullCheck static_obj = new TestNullCheck();
+
+    static int test2() {
+        return static_obj.f;
+    }
+
+    static public void main(String[] args) {
+        TestNullCheck o = new TestNullCheck();
+        for (int i = 0; i < 20000; i++) {
+            test1(o);
+            test2();
+        }
+        try {
+            test1(null);
+        } catch (NullPointerException npe) {}
+        static_obj = null;
+        try {
+            test2();
+        } catch (NullPointerException npe) {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestReferenceCAS.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * Run standalone with: --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/jdk.internal.misc=ALL-UNNAMED
+ */
+
+/*
+ * @test TestReferenceCAS
+ * @summary Shenandoah reference CAS test
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @modules java.base/jdk.internal.misc:+open
+ *
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC                                                 TestReferenceCAS
+ * @run main/othervm -Diters=100   -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -Xint                                           TestReferenceCAS
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-TieredCompilation                          TestReferenceCAS
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:TieredStopAtLevel=1                         TestReferenceCAS
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:TieredStopAtLevel=4                         TestReferenceCAS
+ *
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops                          TestReferenceCAS
+ * @run main/othervm -Diters=100   -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -Xint                    TestReferenceCAS
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:-TieredCompilation   TestReferenceCAS
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:TieredStopAtLevel=1  TestReferenceCAS
+ * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:TieredStopAtLevel=4  TestReferenceCAS
+ */
+
+import java.lang.reflect.Field;
+
+public class TestReferenceCAS {
+
+    static final int ITERS = Integer.getInteger("iters", 1);
+    static final int WEAK_ATTEMPTS = Integer.getInteger("weakAttempts", 10);
+
+    static final jdk.internal.misc.Unsafe UNSAFE;
+    static final long V_OFFSET;
+
+    static {
+        try {
+            Field f = jdk.internal.misc.Unsafe.class.getDeclaredField("theUnsafe");
+            f.setAccessible(true);
+            UNSAFE = (jdk.internal.misc.Unsafe) f.get(null);
+        } catch (Exception e) {
+            throw new RuntimeException("Unable to get Unsafe instance.", e);
+        }
+
+        try {
+            Field vField = TestReferenceCAS.class.getDeclaredField("v");
+            V_OFFSET = UNSAFE.objectFieldOffset(vField);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    Object v;
+
+    private static void assertEquals(boolean a, boolean b, String msg) {
+        if (a != b) {
+            throw new RuntimeException("a (" + a + ") != b (" + b + "): " + msg);
+        }
+    }
+
+    private static void assertEquals(Object a, Object b, String msg) {
+        if (!a.equals(b)) {
+            throw new RuntimeException("a (" + a.toString() + ") != b (" + b.toString() + "): " + msg);
+        }
+    }
+
+    public static void main(String[] args) {
+        TestReferenceCAS t = new TestReferenceCAS();
+        for (int c = 0; c < ITERS; c++) {
+            testAccess(t, V_OFFSET);
+        }
+    }
+
+    static void testAccess(Object base, long offset) {
+        String foo = new String("foo");
+        String bar = new String("bar");
+        String baz = new String("baz");
+        UNSAFE.putReference(base, offset, "foo");
+        {
+            String newval = bar;
+            boolean r = UNSAFE.compareAndSetReference(base, offset, "foo", newval);
+            assertEquals(r, true, "success compareAndSet Object");
+            assertEquals(newval, "bar", "must not destroy newval");
+            Object x = UNSAFE.getReference(base, offset);
+            assertEquals(x, "bar", "success compareAndSet Object value");
+        }
+
+        {
+            String newval = baz;
+            boolean r = UNSAFE.compareAndSetReference(base, offset, "foo", newval);
+            assertEquals(r, false, "failing compareAndSet Object");
+            assertEquals(newval, "baz", "must not destroy newval");
+            Object x = UNSAFE.getReference(base, offset);
+            assertEquals(x, "bar", "failing compareAndSet Object value");
+        }
+
+        UNSAFE.putReference(base, offset, "bar");
+        {
+            String newval = foo;
+            Object r = UNSAFE.compareAndExchangeReference(base, offset, "bar", newval);
+            assertEquals(r, "bar", "success compareAndExchange Object");
+            assertEquals(newval, "foo", "must not destroy newval");
+            Object x = UNSAFE.getReference(base, offset);
+            assertEquals(x, "foo", "success compareAndExchange Object value");
+        }
+
+        {
+            String newval = baz;
+            Object r = UNSAFE.compareAndExchangeReference(base, offset, "bar", newval);
+            assertEquals(r, "foo", "failing compareAndExchange Object");
+            assertEquals(newval, "baz", "must not destroy newval");
+            Object x = UNSAFE.getReference(base, offset);
+            assertEquals(x, "foo", "failing compareAndExchange Object value");
+        }
+
+        UNSAFE.putReference(base, offset, "bar");
+        {
+            String newval = foo;
+            boolean success = false;
+            for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
+                success = UNSAFE.weakCompareAndSetReference(base, offset, "bar", newval);
+                assertEquals(newval, "foo", "must not destroy newval");
+            }
+            assertEquals(success, true, "weakCompareAndSet Object");
+            Object x = UNSAFE.getReference(base, offset);
+            assertEquals(x, "foo", "weakCompareAndSet Object");
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/compiler/TestWriteBarrierClearControl.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestWriteBarrierClearControl
+ * @summary Clearing control during final graph reshape causes memory barrier to loose dependency on null check
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation
+ *                   -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC
+ *                   -XX:+UnlockDiagnosticVMOptions -XX:+StressLCM -XX:+StressGCM
+ *                   TestWriteBarrierClearControl
+ *
+ */
+public class TestWriteBarrierClearControl {
+
+    int f;
+
+    static void test1(TestWriteBarrierClearControl o) {
+        o.f = 0x42;
+    }
+
+    static TestWriteBarrierClearControl fo = new TestWriteBarrierClearControl();
+
+    static void test2() {
+        TestWriteBarrierClearControl o = fo;
+        o.f = 0x42;
+    }
+
+    static public void main(String[] args) {
+        TestWriteBarrierClearControl o = new TestWriteBarrierClearControl();
+        for (int i = 0; i < 20000; i++) {
+            test1(o);
+            test2();
+        }
+        try {
+            test1(null);
+        } catch (NullPointerException npe) {}
+        fo = null;
+        try {
+            test2();
+        } catch (NullPointerException npe) {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jni/TestJNICritical.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestJNICritical
+ * @summary test JNI critical arrays support in Shenandoah
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahVerify                 TestJNICritical
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestJNICritical
+ */
+
+import java.util.Arrays;
+
+public class TestJNICritical {
+    static {
+        System.loadLibrary("TestJNICritical");
+    }
+
+    private static final int NUM_RUNS   = 10000;
+    private static final int ARRAY_SIZE = 10000;
+    private static int[] a;
+    private static int[] b;
+
+    private static native void copyAtoB(int[] a, int[] b);
+
+    public static void main(String[] args) {
+        a = new int[ARRAY_SIZE];
+        b = new int[ARRAY_SIZE];
+        for (int i = 0; i < NUM_RUNS; i++) {
+            test();
+        }
+    }
+
+    private static void test() {
+        int[] a1 = new int[ARRAY_SIZE];
+        int[] b1 = new int[ARRAY_SIZE];
+        fillArray(a);
+        copyAtoB(a, b);
+        copyAtoB(a1, b1); // Don't optimize out garbage arrays.
+        if (!Arrays.equals(a, b)) {
+            throw new RuntimeException("arrays not equal");
+        }
+    }
+
+    private static void fillArray(int[] array) {
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            int val = (int) (Math.random() * Integer.MAX_VALUE);
+            array[i] = val;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jni/TestJNIGlobalRefs.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestJNIGlobalRefs
+ * @summary Test JNI Global Refs with Shenandoah
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xlog:gc -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahVerify TestJNIGlobalRefs
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xlog:gc -XX:ShenandoahGCHeuristics=aggressive                       TestJNIGlobalRefs
+ */
+
+import java.util.Arrays;
+import java.util.Random;
+
+public class TestJNIGlobalRefs {
+    static {
+        System.loadLibrary("TestJNIGlobalRefs");
+    }
+
+    private static final int TIME_MSEC = 120000;
+    private static final int ARRAY_SIZE = 10000;
+
+    private static native void makeGlobalRef(Object o);
+    private static native void makeWeakGlobalRef(Object o);
+    private static native Object readGlobalRef();
+    private static native Object readWeakGlobalRef();
+
+    public static void main(String[] args) throws Throwable {
+        seedGlobalRef();
+        seedWeakGlobalRef();
+        long start = System.currentTimeMillis();
+        long current = start;
+        while (current - start < TIME_MSEC) {
+            testGlobal();
+            testWeakGlobal();
+            Thread.sleep(1);
+            current = System.currentTimeMillis();
+        }
+    }
+
+    private static void seedGlobalRef() {
+        int[] a = new int[ARRAY_SIZE];
+        fillArray(a, 1337);
+        makeGlobalRef(a);
+    }
+
+    private static void seedWeakGlobalRef() {
+        int[] a = new int[ARRAY_SIZE];
+        fillArray(a, 8080);
+        makeWeakGlobalRef(a);
+    }
+
+    private static void testGlobal() {
+        int[] a = (int[]) readGlobalRef();
+        checkArray(a, 1337);
+    }
+
+    private static void testWeakGlobal() {
+        int[] a = (int[]) readWeakGlobalRef();
+        if (a != null) {
+            checkArray(a, 8080);
+        } else {
+            // weak reference is cleaned, recreate:
+            seedWeakGlobalRef();
+        }
+    }
+
+    private static void fillArray(int[] array, int seed) {
+        Random r = new Random(seed);
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            array[i] = r.nextInt();
+        }
+    }
+
+    private static void checkArray(int[] array, int seed) {
+        Random r = new Random(seed);
+        if (array.length != ARRAY_SIZE) {
+            throw new IllegalStateException("Illegal array length: " + array.length + ", but expected " + ARRAY_SIZE);
+        }
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            int actual = array[i];
+            int expected = r.nextInt();
+            if (actual != expected) {
+                throw new IllegalStateException("Incorrect array data: " + actual + ", but expected " + expected);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jni/TestPinnedGarbage.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestPinnedGarbage
+ * @summary Test that garbage in the pinned region does not crash VM
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx512m -XX:+ShenandoahVerify -XX:ShenandoahGCHeuristics=passive    -XX:+ShenandoahDegeneratedGC TestPinnedGarbage
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx512m -XX:+ShenandoahVerify -XX:ShenandoahGCHeuristics=passive    -XX:-ShenandoahDegeneratedGC TestPinnedGarbage
+ *
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx512m                       -XX:ShenandoahGCHeuristics=aggressive TestPinnedGarbage
+ *
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx512m -XX:+ShenandoahVerify                                       TestPinnedGarbage
+ */
+
+import java.util.Arrays;
+import java.util.concurrent.*;
+
+public class TestPinnedGarbage {
+    static {
+        System.loadLibrary("TestPinnedGarbage");
+    }
+
+    private static final int NUM_RUNS      = 1_000;
+    private static final int OBJS_COUNT    = 1_000;
+    private static final int GARBAGE_COUNT = 1_000_000;
+
+    private static native void pin(int[] a);
+    private static native void unpin(int[] a);
+
+    public static void main(String[] args) {
+        for (int i = 0; i < NUM_RUNS; i++) {
+            test();
+        }
+    }
+
+    private static void test() {
+        Object[] objs = new Object[OBJS_COUNT];
+        for (int i = 0; i < OBJS_COUNT; i++) {
+            objs[i] = new MyClass();
+        }
+
+        int[] cog = new int[10];
+        int cogIdx = ThreadLocalRandom.current().nextInt(OBJS_COUNT);
+        objs[cogIdx] = cog;
+        pin(cog);
+
+        for (int i = 0; i < GARBAGE_COUNT; i++) {
+            int rIdx = ThreadLocalRandom.current().nextInt(OBJS_COUNT);
+            if (rIdx != cogIdx) {
+                objs[rIdx] = new MyClass();
+            }
+        }
+
+        unpin(cog);
+    }
+
+    public static class MyClass {
+        public Object ref = new Object();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jni/libTestJNICritical.c	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <jni.h>
+#include <string.h>
+
+JNIEXPORT void JNICALL
+Java_TestJNICritical_copyAtoB(JNIEnv *env, jclass unused, jintArray a, jintArray b) {
+  jint len = (*env)->GetArrayLength(env, a);
+  jint* aa = (*env)->GetPrimitiveArrayCritical(env, a, 0);
+  jint* bb = (*env)->GetPrimitiveArrayCritical(env, b, 0);
+  memcpy(bb, aa, len * sizeof(jint));
+  (*env)->ReleasePrimitiveArrayCritical(env, b, bb, 0);
+  (*env)->ReleasePrimitiveArrayCritical(env, a, aa, 0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jni/libTestJNIGlobalRefs.c	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <jni.h>
+#include <string.h>
+
+jobject global_ref = NULL;
+jobject weak_global_ref = NULL;
+
+JNIEXPORT void JNICALL
+Java_TestJNIGlobalRefs_makeGlobalRef(JNIEnv *env, jclass unused, jobject o) {
+  global_ref = (*env)->NewGlobalRef(env, o);
+}
+
+JNIEXPORT void JNICALL
+Java_TestJNIGlobalRefs_makeWeakGlobalRef(JNIEnv *env, jclass unused, jobject o) {
+  weak_global_ref = (*env)->NewWeakGlobalRef(env, o);
+}
+
+JNIEXPORT jobject JNICALL
+Java_TestJNIGlobalRefs_readGlobalRef(JNIEnv *env, jclass unused) {
+  return global_ref;
+}
+
+JNIEXPORT jobject JNICALL
+Java_TestJNIGlobalRefs_readWeakGlobalRef(JNIEnv *env, jclass unused) {
+  return weak_global_ref;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jni/libTestPinnedGarbage.c	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <jni.h>
+#include <string.h>
+
+static jint* pinned;
+
+JNIEXPORT void JNICALL
+Java_TestPinnedGarbage_pin(JNIEnv *env, jclass unused, jintArray a) {
+  pinned = (*env)->GetPrimitiveArrayCritical(env, a, 0);
+}
+
+JNIEXPORT void JNICALL
+Java_TestPinnedGarbage_unpin(JNIEnv *env, jclass unused, jintArray a) {
+  (*env)->ReleasePrimitiveArrayCritical(env, a, pinned, 0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestHeapDump
+ * @summary Tests JVMTI heap dumps
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @compile TestHeapDump.java
+ * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx128m -XX:ShenandoahGCHeuristics=aggressive -XX:+UseCompressedOops TestHeapDump
+ * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx128m -XX:ShenandoahGCHeuristics=aggressive -XX:-UseCompressedOops TestHeapDump
+ */
+
+public class TestHeapDump {
+
+    private static final int NUM_ITER = 10000;
+
+    private static final int ARRAY_SIZE = 1000;
+
+    private static final int EXPECTED_OBJECTS =
+            ARRAY_SIZE +   // array reachable from instance field
+                    1 +            // static field root
+                    1;             // local field root
+
+    static {
+        try {
+            System.loadLibrary("TestHeapDump");
+        } catch (UnsatisfiedLinkError ule) {
+            System.err.println("Could not load TestHeapDump library");
+            System.err.println("java.library.path: "
+                    + System.getProperty("java.library.path"));
+            throw ule;
+        }
+    }
+
+    native static int heapdump(Class<?> filterClass);
+
+    public static void main(String args[]) {
+        new TestHeapDump().run();
+    }
+
+    // This root needs to be discovered
+    static Object root = new TestObject();
+
+    // This field needs to be discovered
+    TestObject[] array;
+
+    public void run() {
+        array = new TestObject[ARRAY_SIZE];
+        for (int i = 0; i < ARRAY_SIZE; i++) {
+            array[i] = new TestObject();
+        }
+        TestObject localRoot = new TestObject();
+        for (int i = 0; i < NUM_ITER; i++) {
+            int numObjs = heapdump(TestObject.class);
+            if (numObjs != EXPECTED_OBJECTS) {
+                throw new RuntimeException("Expected " + EXPECTED_OBJECTS + " objects, but got " + numObjs);
+            }
+        }
+    }
+
+    // We look for the instances of this class during the heap scan
+    public static class TestObject {}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/jvmti/libTestHeapDump.c	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include "jvmti.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef JNI_ENV_ARG
+
+#ifdef __cplusplus
+#define JNI_ENV_ARG(x, y) y
+#define JNI_ENV_PTR(x) x
+#else
+#define JNI_ENV_ARG(x,y) x, y
+#define JNI_ENV_PTR(x) (*x)
+#endif
+
+#endif
+
+#define TranslateError(err) "JVMTI error"
+
+#define PASSED 0
+#define FAILED 2
+
+static const char *EXC_CNAME = "java/lang/Exception";
+
+static jvmtiEnv *jvmti = NULL;
+static jint result = PASSED;
+
+static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved);
+
+JNIEXPORT
+jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
+    return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT
+jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) {
+    return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT
+jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
+    return JNI_VERSION_1_8;
+}
+
+static
+jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) {
+    jvmtiCapabilities capabilities;
+    jint res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti),
+                                        JVMTI_VERSION_9);
+    if (res != JNI_OK || jvmti == NULL) {
+        printf("    Error: wrong result of a valid call to GetEnv!\n");
+        return JNI_ERR;
+    }
+
+    (void)memset(&capabilities, 0, sizeof(capabilities));
+    capabilities.can_tag_objects = 1;
+    capabilities.can_generate_garbage_collection_events = 1;
+    (*jvmti)->AddCapabilities(jvmti, &capabilities);
+
+    return JNI_OK;
+}
+
+static
+void throw_exc(JNIEnv *env, char *msg) {
+    jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME));
+    jint rt = JNI_OK;
+
+    if (exc_class == NULL) {
+        printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME);
+        return;
+    }
+    rt = JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg);
+    if (rt == JNI_ERR) {
+        printf("throw_exc: Error in JNI ThrowNew(env, %s)\n", msg);
+    }
+}
+
+static jint JNICALL heap_iter_callback(jlong class_tag,
+                               jlong size,
+                               jlong* tag_ptr,
+                               jint length,
+                               void* user_data) {
+  (*((jint*)(user_data)))++;
+  return JVMTI_VISIT_OBJECTS;
+}
+
+JNIEXPORT jint JNICALL
+Java_TestHeapDump_heapdump(JNIEnv *env, jclass cls, jclass filter_cls) {
+    jvmtiHeapCallbacks callbacks;
+    jint totalCount = 0;
+    if (jvmti == NULL) {
+        throw_exc(env, "JVMTI client was not properly loaded!\n");
+        return 0;
+    }
+
+    (void)memset(&callbacks, 0, sizeof(callbacks));
+    callbacks.heap_iteration_callback = &heap_iter_callback;
+    (*jvmti)->IterateThroughHeap(jvmti, 0, filter_cls, &callbacks, (const void *)&totalCount);
+    return totalCount;
+}
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestChurnNotifications
+ * @summary Check that MX notifications are reported for all cycles
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahDegeneratedGC -Dprecise=true  TestChurnNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive      -XX:-ShenandoahDegeneratedGC -Dprecise=true  TestChurnNotifications
+ *
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive   -Dprecise=false TestChurnNotifications
+ *
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive     -Dprecise=false TestChurnNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static       -Dprecise=false TestChurnNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact      -Dprecise=false TestChurnNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal    -Dprecise=false TestChurnNotifications
+ */
+
+import java.util.*;
+import java.util.concurrent.atomic.*;
+import javax.management.*;
+import java.lang.management.*;
+import javax.management.openmbean.*;
+
+import com.sun.management.GarbageCollectionNotificationInfo;
+
+public class TestChurnNotifications {
+
+    static final long HEAP_MB = 128;                           // adjust for test configuration above
+    static final long TARGET_MB = Long.getLong("target", 8_000); // 8 Gb allocation
+
+    // Should we track the churn precisely?
+    // Precise tracking is only reliable when GC is fully stop-the-world. Otherwise,
+    // we cannot tell, looking at heap used before/after, what was the GC churn.
+    static final boolean PRECISE = Boolean.getBoolean("precise");
+
+    static final long M = 1024 * 1024;
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        final AtomicLong churnBytes = new AtomicLong();
+
+        NotificationListener listener = new NotificationListener() {
+            @Override
+            public void handleNotification(Notification n, Object o) {
+                if (n.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) {
+                    GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) n.getUserData());
+                    Map<String, MemoryUsage> mapBefore = info.getGcInfo().getMemoryUsageBeforeGc();
+                    Map<String, MemoryUsage> mapAfter = info.getGcInfo().getMemoryUsageAfterGc();
+
+                    MemoryUsage before = mapBefore.get("Shenandoah");
+                    MemoryUsage after = mapAfter.get("Shenandoah");
+
+                    if ((before != null) && (after != null)) {
+                        long diff = before.getUsed() - after.getUsed();
+                        if (diff > 0) {
+                            churnBytes.addAndGet(diff);
+                        }
+                    }
+                }
+            }
+        };
+
+        for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) {
+            ((NotificationEmitter) bean).addNotificationListener(listener, null, null);
+        }
+
+        final int size = 100_000;
+        long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size);
+
+        long mem = count * (16 + 4 * size);
+
+        for (int c = 0; c < count; c++) {
+            sink = new int[size];
+        }
+
+        System.gc();
+
+        Thread.sleep(1000);
+
+        long actual = churnBytes.get();
+
+        long minExpected = PRECISE ? (mem - HEAP_MB * 1024 * 1024) : 1;
+        long maxExpected = mem + HEAP_MB * 1024 * 1024;
+
+        String msg = "Expected = [" + minExpected / M + "; " + maxExpected / M + "] (" + mem / M + "), actual = " + actual / M;
+        if (minExpected < actual && actual < maxExpected) {
+            System.out.println(msg);
+        } else {
+            throw new IllegalStateException(msg);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryMXBeans.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestMemoryMXBeans
+ * @summary Test JMX memory beans
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC          -Xmx1g TestMemoryMXBeans   -1 1024
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms1g   -Xmx1g TestMemoryMXBeans 1024 1024
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms128m -Xmx1g TestMemoryMXBeans  128 1024
+ */
+
+import java.lang.management.*;
+import java.util.*;
+
+public class TestMemoryMXBeans {
+
+    public static void main(String[] args) throws Exception {
+        if (args.length < 2) {
+            throw new IllegalStateException("Should provide expected heap sizes");
+        }
+
+        long initSize = 1L * Integer.parseInt(args[0]) * 1024 * 1024;
+        long maxSize  = 1L * Integer.parseInt(args[1]) * 1024 * 1024;
+
+        testMemoryBean(initSize, maxSize);
+    }
+
+    public static void testMemoryBean(long initSize, long maxSize) {
+        MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
+        long heapInit = memoryMXBean.getHeapMemoryUsage().getInit();
+        long heapMax = memoryMXBean.getHeapMemoryUsage().getMax();
+        long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit();
+        long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax();
+
+        if (initSize > 0 && heapInit != initSize) {
+            throw new IllegalStateException("Init heap size is wrong: " + heapInit + " vs " + initSize);
+        }
+        if (maxSize > 0 && heapMax != maxSize) {
+            throw new IllegalStateException("Max heap size is wrong: " + heapMax + " vs " + maxSize);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestMemoryPools.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestMemoryPools
+ * @summary Test JMX memory pools
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g TestMemoryPools
+ */
+
+import java.lang.management.*;
+import java.util.*;
+
+public class TestMemoryPools {
+
+    public static void main(String[] args) throws Exception {
+        List<MemoryManagerMXBean> mms = ManagementFactory.getMemoryManagerMXBeans();
+        if (mms == null) {
+            throw new RuntimeException("getMemoryManagerMXBeans is null");
+        }
+        if (mms.isEmpty()) {
+            throw new RuntimeException("getMemoryManagerMXBeans is empty");
+        }
+        for (MemoryManagerMXBean mmBean : mms) {
+            String[] names = mmBean.getMemoryPoolNames();
+            if (names == null) {
+                throw new RuntimeException("getMemoryPoolNames() is null");
+            }
+            if (names.length == 0) {
+                throw new RuntimeException("getMemoryPoolNames() is empty");
+            }
+            for (String name : names) {
+                if (name == null) {
+                    throw new RuntimeException("pool name is null");
+                }
+                if (name.length() == 0) {
+                    throw new RuntimeException("pool name is empty");
+                }
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestPauseNotifications.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestPauseNotifications
+ * @summary Check that MX notifications are reported for all cycles
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive       -XX:+ShenandoahDegeneratedGC TestPauseNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive       -XX:-ShenandoahDegeneratedGC TestPauseNotifications
+ *
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive    TestPauseNotifications
+ *
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive      TestPauseNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static        TestPauseNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact       TestPauseNotifications
+ * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal     TestPauseNotifications
+ */
+
+import java.util.*;
+import java.util.concurrent.atomic.*;
+import javax.management.*;
+import java.lang.management.*;
+import javax.management.openmbean.*;
+
+import com.sun.management.GarbageCollectionNotificationInfo;
+
+public class TestPauseNotifications {
+
+    static final long HEAP_MB = 128;                           // adjust for test configuration above
+    static final long TARGET_MB = Long.getLong("target", 8_000); // 8 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        final AtomicLong pausesDuration = new AtomicLong();
+        final AtomicLong cyclesDuration = new AtomicLong();
+
+        NotificationListener listener = new NotificationListener() {
+            @Override
+            public void handleNotification(Notification n, Object o) {
+                if (n.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) {
+                    GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) n.getUserData());
+
+                    System.out.println(info.getGcInfo().toString());
+                    System.out.println(info.getGcName());
+                    System.out.println();
+
+                    long d = info.getGcInfo().getDuration();
+
+                    String name = info.getGcName();
+                    if (name.contains("Shenandoah")) {
+                        if (name.equals("Shenandoah Pauses")) {
+                            pausesDuration.addAndGet(d);
+                        } else if (name.equals("Shenandoah Cycles")) {
+                            cyclesDuration.addAndGet(d);
+                        } else {
+                            throw new IllegalStateException("Unknown name: " + name);
+                        }
+                    }
+                }
+            }
+        };
+
+        for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) {
+            ((NotificationEmitter) bean).addNotificationListener(listener, null, null);
+        }
+
+        final int size = 100_000;
+        long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size);
+
+        for (int c = 0; c < count; c++) {
+            sink = new int[size];
+        }
+
+        Thread.sleep(1000);
+
+        long pausesActual = pausesDuration.get();
+        long cyclesActual = cyclesDuration.get();
+
+        long minExpected = 1;
+        long maxExpected = Long.MAX_VALUE;
+
+        {
+            String msg = "Pauses expected = [" + minExpected + "; " + maxExpected + "], actual = " + pausesActual;
+            if (minExpected < pausesActual && pausesActual < maxExpected) {
+                System.out.println(msg);
+            } else {
+                throw new IllegalStateException(msg);
+            }
+        }
+
+        {
+            String msg = "Cycles expected = [" + minExpected + "; " + maxExpected + "], actual = " + cyclesActual;
+            if (minExpected < cyclesActual && cyclesActual < maxExpected) {
+                System.out.println(msg);
+            } else {
+                throw new IllegalStateException(msg);
+            }
+        }
+
+        {
+            String msg = "Cycle duration (" + cyclesActual + "), pause duration (" + pausesActual + ")";
+            if (pausesActual < cyclesActual) {
+                System.out.println(msg);
+            } else {
+                throw new IllegalStateException(msg);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargeObj.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestAllocLargeObj
+ * @summary Test allocation of small object to result OOM, but not to crash JVM
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run main TestAllocLargeObj
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class TestAllocLargeObj {
+
+    static final int SIZE = 1 * 1024 * 1024;
+    static final int COUNT = 16;
+
+    static volatile Object sink;
+
+    public static void work() throws Exception {
+        Object[] root = new Object[COUNT];
+        sink = root;
+        for (int c = 0; c < COUNT; c++) {
+            root[c] = new Object[SIZE];
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            work();
+            return;
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx16m",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestAllocLargeObj.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(1);
+            analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space");
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx1g",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestAllocLargeObj.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(0);
+            analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocLargerThanHeap.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestAllocLargerThanHeap
+ * @summary Test that allocation of the object larger than heap fails predictably
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run main TestAllocLargerThanHeap
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class TestAllocLargerThanHeap {
+
+    static final int SIZE = 16 * 1024 * 1024;
+
+    static volatile Object sink;
+
+    public static void work() throws Exception {
+        sink = new Object[SIZE];
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            work();
+            return;
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx16m",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestAllocLargerThanHeap.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(1);
+            analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space");
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx1g",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestAllocLargerThanHeap.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(0);
+            analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestAllocSmallObj.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestAllocSmallObj
+ * @summary Test allocation of small object to result OOM, but not to crash JVM
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run main TestAllocSmallObj
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class TestAllocSmallObj {
+
+    static final int COUNT = 16 * 1024 * 1024;
+
+    static volatile Object sink;
+
+    public static void work() throws Exception {
+        Object[] root = new Object[COUNT];
+        sink = root;
+        for (int c = 0; c < COUNT; c++) {
+            root[c] = new Object();
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            work();
+            return;
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx16m",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestAllocSmallObj.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(1);
+            analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space");
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx1g",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestAllocSmallObj.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(0);
+            analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestClassLoaderLeak.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestClassLoaderLeak
+ * @summary Test OOME in due to classloader leak
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run main TestClassLoaderLeak
+ */
+
+import java.util.*;
+import java.io.*;
+import java.nio.*;
+import java.nio.file.*;
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class TestClassLoaderLeak {
+
+    static final int SIZE = 1 * 1024 * 1024;
+    static final int COUNT = 128;
+
+    static volatile Object sink;
+
+    static class Dummy {
+        static final int[] PAYLOAD = new int[SIZE];
+    }
+
+    static class MyClassLoader extends ClassLoader {
+        final String path;
+
+        MyClassLoader(String path) {
+            this.path = path;
+        }
+
+        public Class<?> loadClass(String name) throws ClassNotFoundException {
+            try {
+                File f = new File(path, name + ".class");
+                if (!f.exists()) {
+                    return super.loadClass(name);
+                }
+
+                Path path = Paths.get(f.getAbsolutePath());
+                byte[] cls = Files.readAllBytes(path);
+                return defineClass(name, cls, 0, cls.length, null);
+            } catch (IOException e) {
+                throw new ClassNotFoundException(name);
+            }
+        }
+    }
+
+    static void load(String path) throws Exception {
+        ClassLoader cl = new MyClassLoader(path);
+        Class<Dummy> c = (Class<Dummy>) Class.forName("TestClassLoaderLeak$Dummy", true, cl);
+        if (c.getClassLoader() != cl) {
+            throw new IllegalStateException("Should have loaded by target loader");
+        }
+        sink = c;
+    }
+
+    public static void passWith(String... args) throws Exception {
+        testWith(true, args);
+    }
+
+    public static void failWith(String... args) throws Exception {
+        testWith(false, args);
+    }
+
+    public static void testWith(boolean shouldPass, String... args) throws Exception {
+        List<String> pbArgs = new ArrayList<>();
+        pbArgs.add("-Xmx128m");
+        pbArgs.add("-XX:+UnlockExperimentalVMOptions");
+        pbArgs.add("-XX:+UnlockDiagnosticVMOptions");
+        pbArgs.add("-XX:+UseShenandoahGC");
+        pbArgs.addAll(Arrays.asList(args));
+        pbArgs.add(TestClassLoaderLeak.class.getName());
+        pbArgs.add("test");
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(pbArgs.toArray(new String[0]));
+
+        OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+
+        if (shouldPass) {
+            analyzer.shouldHaveExitValue(0);
+            analyzer.shouldNotContain("java.lang.OutOfMemoryError");
+            analyzer.shouldContain("All good");
+        } else {
+            analyzer.shouldHaveExitValue(1);
+            analyzer.shouldContain("java.lang.OutOfMemoryError");
+            analyzer.shouldNotContain("All good");
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            String classDir = TestClassLoaderLeak.class.getProtectionDomain().getCodeSource().getLocation().getPath();
+            for (int c = 0; c < COUNT; c++) {
+                load(classDir);
+            }
+            System.out.println("All good");
+            return;
+        }
+
+        String[] heuristics = new String[] {
+                "adaptive",
+                "compact",
+                "static",
+                "traversal",
+                "aggressive",
+                "passive",
+        };
+
+        for (String h : heuristics) {
+            // Forceful enabling should work
+            passWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading");
+            passWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloadingWithConcurrentMark");
+
+            // Even when concurrent unloading is disabled, Full GC has to recover
+            passWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark");
+            passWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=0");
+            passWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=0");
+
+            // Should OOME when unloading forcefully disabled, even if local flags try to enable it back
+            failWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading");
+            failWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark");
+            failWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=1");
+            failWith("-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=1");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test TestThreadFailure
+ * @summary Test OOME in separate thread is recoverable
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run main TestThreadFailure
+ */
+
+import java.util.*;
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class TestThreadFailure {
+
+    static final int SIZE = 1024;
+    static final int COUNT = 16;
+
+    static class NastyThread extends Thread {
+        @Override
+        public void run() {
+            List<Object> root = new ArrayList<Object>();
+            while (true) {
+                root.add(new Object[SIZE]);
+            }
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            for (int t = 0; t < COUNT; t++) {
+                Thread thread = new NastyThread();
+                thread.start();
+                thread.join();
+            }
+            System.out.println("All good");
+            return;
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx16m",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestThreadFailure.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(0);
+            analyzer.shouldContain("java.lang.OutOfMemoryError");
+            analyzer.shouldContain("All good");
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-Xmx128m",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    TestThreadFailure.class.getName(),
+                    "test");
+
+            OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+            analyzer.shouldHaveExitValue(0);
+            analyzer.shouldContain("java.lang.OutOfMemoryError");
+            analyzer.shouldContain("All good");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestAlwaysPreTouch.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestAlwaysPreTouch
+ * @summary Check that Shenandoah's AlwaysPreTouch does not fire asserts
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch                                  -Xmx1g TestAlwaysPreTouch
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -XX:ConcGCThreads=2              -Xmx1g TestAlwaysPreTouch
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -XX:ParallelGCThreads=2          -Xmx1g TestAlwaysPreTouch
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch                         -Xms128m -Xmx1g TestAlwaysPreTouch
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch                           -Xms1g -Xmx1g TestAlwaysPreTouch
+ */
+
+public class TestAlwaysPreTouch {
+
+    public static void main(String[] args) throws Exception {
+        // checking the initialization before entering main()
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestArgumentRanges.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestArgumentRanges
+ * @summary Test that Shenandoah arguments are checked for ranges where applicable
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver TestArgumentRanges
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestArgumentRanges {
+    public static void main(String[] args) throws Exception {
+        testRange("ShenandoahGarbageThreshold", 0, 100);
+        testRange("ShenandoahFreeThreshold", 0, 100);
+        testRange("ShenandoahAllocationThreshold", 0, 100);
+        testHeuristics();
+    }
+
+    private static void testHeuristics() throws Exception {
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=aggressive",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=static",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=fluff",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Unknown -XX:ShenandoahGCHeuristics option");
+            output.shouldHaveExitValue(1);
+        }
+    }
+
+    private static void testRange(String option, int min, int max) throws Exception {
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:" + option + "=" + (max + 1),
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(1);
+        }
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:" + option + "=" + max,
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:" + option + "=" + (min - 1),
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(1);
+        }
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:" + option + "=" + min,
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestClassUnloadingArguments.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestClassUnloadingArguments
+ * @summary Test that loop mining arguments are sane
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run driver TestClassUnloadingArguments
+ */
+
+import java.util.*;
+
+import jdk.test.lib.Asserts;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestClassUnloadingArguments {
+
+    public static void testWith(String msg, boolean cu, boolean cuConc, String... args) throws Exception {
+        String[] cmds = Arrays.copyOf(args, args.length + 2);
+        cmds[args.length] = "-XX:+PrintFlagsFinal";
+        cmds[args.length + 1] = "-version";
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldContain("ClassUnloading");
+        output.shouldContain("ClassUnloadingWithConcurrentMark");
+
+        Asserts.assertEQ(output.firstMatch("(.+?) ClassUnloading.+?= (.+?) (.+?)", 2),
+                Boolean.toString(cu),
+                msg + ", but got wrong ClassUnloading");
+        Asserts.assertEQ(output.firstMatch("(.+?) ClassUnloadingWithConcurrentMark.+?= (.+?) (.+?)", 2),
+                Boolean.toString(cuConc),
+                msg + ", but got wrong ClassUnloadingWithConcurrentMark");
+    }
+
+    public static void main(String[] args) throws Exception {
+        testDefaultGC();
+        testShenandoah();
+    }
+
+    public static void testDefaultGC() throws Exception {
+        testWith("Default GC should have class unloading enabled",
+                true, true);
+
+        testWith("Default GC should disable everything",
+                false, false,
+                "-XX:-ClassUnloading");
+
+        testWith("Default GC should disable conc unload",
+                true, false,
+                "-XX:-ClassUnloadingWithConcurrentMark");
+
+        testWith("Default GC should not let conc unload to be enabled separately",
+                false, false,
+                "-XX:-ClassUnloading",
+                "-XX:+ClassUnloadingWithConcurrentMark");
+    }
+
+    public static void testShenandoah() throws Exception {
+        testWith("Shenandoah GC should have class unloading enabled",
+                true, false,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC");
+
+        testWith("Shenandoah GC should disable everything",
+                false, false,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:-ClassUnloading");
+
+        testWith("Shenandoah GC should enable conc unload",
+                true, true,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:+ClassUnloadingWithConcurrentMark");
+
+        testWith("Shenandoah GC should not let conc unload to be enabled separately",
+                false, false,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:-ClassUnloading",
+                "-XX:+ClassUnloadingWithConcurrentMark");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestCodeCacheRootStyles.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestCodeCacheRootStyles
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=0 TestCodeCacheRootStyles
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=1 TestCodeCacheRootStyles
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=2 TestCodeCacheRootStyles
+ */
+
+public class TestCodeCacheRootStyles {
+    public static void main(String[] args) {
+        // Bug should crash before we get here.
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestEnabled.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+
+/*
+ * @test TestEnabled
+ * @key gc
+ * @requires vm.gc.Shenandoah & vm.gc == "null"
+ * @run main/othervm -Dexpected=false -Xmx64m                                                       TestEnabled
+ * @run main/othervm -Dexpected=true  -Xmx64m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestEnabled
+ */
+
+/*
+ * @test TestEnabledAlready
+ * @key gc
+ * @requires vm.gc.Shenandoah & vm.gc == "Shenandoah"
+ * @run main/othervm -Dexpected=true -Xmx64m                                                        TestEnabled
+ */
+public class TestEnabled {
+
+    public static void main(String... args) {
+        boolean expected = Boolean.getBoolean("expected");
+        boolean actual = isEnabled();
+        if (expected != actual) {
+            throw new IllegalStateException("Error: expected = " + expected + ", actual = " + actual);
+        }
+    }
+
+    public static boolean isEnabled() {
+        for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) {
+            if (bean.getName().contains("Shenandoah")) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestExplicitGC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestExplicitGC
+ * @summary Test that Shenandoah reacts to explicit GC flags appropriately
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver TestExplicitGC
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestExplicitGC {
+
+    enum Mode {
+        PRODUCT,
+        DIAGNOSTIC,
+        EXPERIMENTAL,
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            System.out.println("Calling System.gc()");
+            System.gc();
+            return;
+        }
+
+        String[] full = new String[] {
+                "Pause Full"
+        };
+
+        String[] concNormal = new String[] {
+                "Pause Init Mark",
+                "Pause Final Mark",
+        };
+
+        String[] concTraversal = new String[] {
+                "Pause Init Traversal",
+                "Pause Final Traversal",
+        };
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xlog:gc",
+                    TestExplicitGC.class.getName(),
+                    "test");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            for (String p : full) {
+                output.shouldNotContain(p);
+            }
+            for (String p : concNormal) {
+                output.shouldContain(p);
+            }
+            for (String p : concTraversal) {
+                output.shouldNotContain(p);
+            }
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xlog:gc",
+                    "-XX:+DisableExplicitGC",
+                    TestExplicitGC.class.getName(),
+                    "test");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            for (String p : full) {
+                output.shouldNotContain(p);
+            }
+            for (String p : concNormal) {
+                output.shouldNotContain(p);
+            }
+            for (String p : concTraversal) {
+                output.shouldNotContain(p);
+            }
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xlog:gc",
+                    "-XX:+ExplicitGCInvokesConcurrent",
+                    TestExplicitGC.class.getName(),
+                    "test");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            for (String p : full) {
+                output.shouldNotContain(p);
+            }
+            for (String p : concNormal) {
+                output.shouldContain(p);
+            }
+            for (String p : concTraversal) {
+                output.shouldNotContain(p);
+            }
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xlog:gc",
+                    "-XX:+ExplicitGCInvokesConcurrent",
+                    "-XX:ShenandoahGCHeuristics=traversal",
+                    TestExplicitGC.class.getName(),
+                    "test");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            for (String p : full) {
+                output.shouldNotContain(p);
+            }
+            for (String p : concNormal) {
+                output.shouldNotContain(p);
+            }
+            for (String p : concTraversal) {
+                output.shouldContain(p);
+            }
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xlog:gc",
+                    "-XX:-ExplicitGCInvokesConcurrent",
+                    TestExplicitGC.class.getName(),
+                    "test");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            for (String p : full) {
+                output.shouldContain(p);
+            }
+            for (String p : concNormal) {
+                output.shouldNotContain(p);
+            }
+            for (String p : concTraversal) {
+                output.shouldNotContain(p);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestExplicitGCNoConcurrent.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestExplicitGCNoConcurrent
+ * @summary Test that Shenandoah reacts to explicit GC flags appropriately
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver TestExplicitGCNoConcurrent
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestExplicitGCNoConcurrent {
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0) {
+            System.out.println("Calling System.gc()");
+            System.gc();
+            return;
+        }
+
+        String[] concurrent = new String[] {
+                "Pause Init Mark",
+                "Pause Final Mark",
+                "Pause Init Update Refs",
+                "Pause Final Update Refs",
+                "Pause Init Traversal",
+                "Pause Final Traversal",
+        };
+
+        String[] opts = new String[] {
+                "",
+                "-XX:-ExplicitGCInvokesConcurrent",
+                "-XX:+ExplicitGCInvokesConcurrent"
+        };
+
+        for (String opt : opts) {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xlog:gc",
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    opt,
+                    "-XX:ShenandoahGCHeuristics=passive",
+                    TestExplicitGCNoConcurrent.class.getName(),
+                    "test");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            for (String p : concurrent) {
+                output.shouldNotContain(p);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestHeuristicsUnlock.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestHeuristicsUnlock
+ * @summary Test that Shenandoah heuristics are unlocked properly
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver TestHeuristicsUnlock
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestHeuristicsUnlock {
+
+    enum Mode {
+        PRODUCT,
+        DIAGNOSTIC,
+        EXPERIMENTAL,
+    }
+
+    public static void main(String[] args) throws Exception {
+        testWith("adaptive", Mode.PRODUCT);
+        testWith("static", Mode.PRODUCT);
+        testWith("compact", Mode.PRODUCT);
+
+        testWith("traversal", Mode.EXPERIMENTAL);
+
+        testWith("aggressive", Mode.DIAGNOSTIC);
+        testWith("passive", Mode.DIAGNOSTIC);
+    }
+
+    private static void testWith(String h, Mode mode) throws Exception {
+        if (false) { // When ShenandoahGC is experimental flag, this makes no sense to test
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:-UnlockDiagnosticVMOptions",
+                    "-XX:-UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-version"
+            );
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            switch (mode) {
+                case PRODUCT:
+                    output.shouldHaveExitValue(0);
+                    break;
+                case DIAGNOSTIC:
+                case EXPERIMENTAL:
+                    output.shouldNotHaveExitValue(0);
+                    break;
+            }
+        }
+
+        if (false) { // When ShenandoahGC is experimental flag, this makes no sense to test
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:-UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-version"
+            );
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            switch (mode) {
+                case PRODUCT:
+                case DIAGNOSTIC:
+                    output.shouldHaveExitValue(0);
+                    break;
+                case EXPERIMENTAL:
+                    output.shouldNotHaveExitValue(0);
+                    break;
+            }
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:-UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-version"
+            );
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            switch (mode) {
+                case PRODUCT:
+                case EXPERIMENTAL:
+                    output.shouldHaveExitValue(0);
+                    break;
+                case DIAGNOSTIC:
+                    output.shouldNotHaveExitValue(0);
+                    break;
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestHumongousThresholdArgs.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestHumongousThresholdArgs
+ * @summary Test that Shenandoah humongous threshold args are checked
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver TestHumongousThresholdArgs
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestHumongousThresholdArgs {
+    public static void main(String[] args) throws Exception {
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        int[] valid = new int[] {1, 10, 50, 90, 100};
+        int[] invalid = new int[] {-100, -1, 0, 101, 1000};
+
+        for (int v : valid) {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahHumongousThreshold=" + v,
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        for (int v : invalid) {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahHumongousThreshold=" + v,
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(1);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestLoopMiningArguments.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestLoopMiningArguments
+ * @summary Test that loop mining arguments are sane
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run driver TestLoopMiningArguments
+ */
+
+import java.util.*;
+
+import jdk.test.lib.Asserts;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestLoopMiningArguments {
+
+    public static void testWith(String msg, boolean cls, int iters, String... args) throws Exception {
+        String[] cmds = Arrays.copyOf(args, args.length + 2);
+        cmds[args.length] = "-XX:+PrintFlagsFinal";
+        cmds[args.length + 1] = "-version";
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+        output.shouldContain("UseCountedLoopSafepoints");
+        output.shouldContain("LoopStripMiningIter");
+
+        Asserts.assertEQ(output.firstMatch("(.+?) UseCountedLoopSafepoints.+?= (.+?) (.+?)", 2), Boolean.toString(cls), msg + ", but got wrong CLS");
+        Asserts.assertEQ(output.firstMatch("(.+?) LoopStripMiningIter.+?= (.+?) (.+?)", 2), String.valueOf(iters), msg + ", but got wrong LSM");
+    }
+
+    public static void main(String[] args) throws Exception {
+        testDefaultGC();
+        testShenandoah();
+    }
+
+    public static void testDefaultGC() throws Exception {
+        testWith("Default GC should have CLS enabled, LSM = 1000",
+                true, 1000);
+
+        testWith("Default GC with +CLS should set LSM = 1",
+                true, 1,
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Default GC with +CLS should not override LSM>1",
+                true, 10,
+                "-XX:LoopStripMiningIter=10",
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Default GC with +CLS should not override LSM=1",
+                true, 1,
+                "-XX:LoopStripMiningIter=1",
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Default GC with +CLS should override LSM=0 to 1",
+                true, 1,
+                "-XX:LoopStripMiningIter=0",
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Default GC with -CLS should set LSM = 0",
+                false, 0,
+                "-XX:-UseCountedLoopSafepoints"
+        );
+
+        testWith("Default GC with -CLS should override LSM to 0",
+                false, 0,
+                "-XX:LoopStripMiningIter=10",
+                "-XX:-UseCountedLoopSafepoints"
+        );
+    }
+
+    public static void testShenandoah() throws Exception {
+        testWith("Shenandoah should have CLS and LSM enabled",
+                true, 1000,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC"
+        );
+
+        testWith("Shenandoah with +CLS should set LSM = 1",
+                true, 1,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Shenandoah GC with +CLS should not override LSM>1",
+                true, 10,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:LoopStripMiningIter=10",
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Shenandoah GC with +CLS should not override LSM=1",
+                true, 1,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:LoopStripMiningIter=1",
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Shenandoah GC with +CLS should override LSM=0 to 1",
+                true, 1,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:LoopStripMiningIter=0",
+                "-XX:+UseCountedLoopSafepoints"
+        );
+
+        testWith("Shenandoah GC with -CLS should set LSM = 0",
+                false, 0,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:-UseCountedLoopSafepoints"
+        );
+
+        testWith("Shenandoah GC with -CLS should override LSM to 0",
+                false, 0,
+                "-XX:+UnlockExperimentalVMOptions",
+                "-XX:+UseShenandoahGC",
+                "-XX:LoopStripMiningIter=10",
+                "-XX:-UseCountedLoopSafepoints"
+        );
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestObjectAlignment.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestObjectAlignment
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16          TestObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx16m  TestObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx32m  TestObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx64m  TestObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx128m TestObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx256m TestObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx512m TestObjectAlignment
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx1g   TestObjectAlignment
+ */
+
+public class TestObjectAlignment {
+
+    public static void main(String[] args) throws Exception {
+        // Testing the checking code on startup
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestPacing.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestPacing
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-ShenandoahPacing -Xmx128m TestPacing
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahPacing -Xmx128m TestPacing
+ */
+
+public class TestPacing {
+    static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 16;
+        for (long c = 0; c < count; c++) {
+            sink = new Object();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestParallelRegionStride.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestParallelRegionStride
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ *
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=1    -Xmx128m TestParallelRegionStride
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=10   -Xmx128m TestParallelRegionStride
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=100  -Xmx128m TestParallelRegionStride
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=1024 -Xmx128m TestParallelRegionStride
+ */
+
+public class TestParallelRegionStride {
+    static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation
+
+    static volatile Object sink;
+
+    public static void main(String[] args) throws Exception {
+        long count = TARGET_MB * 1024 * 1024 / 16;
+        for (long c = 0; c < count; c++) {
+            sink = new Object();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestRegionSizeArgs.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestRegionSizeArgs
+ * @summary Test that Shenandoah region size args are checked
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @run driver TestRegionSizeArgs
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestRegionSizeArgs {
+    public static void main(String[] args) throws Exception {
+        testInvalidRegionSizes();
+        testMinRegionSize();
+        testMaxRegionSize();
+    }
+
+    private static void testInvalidRegionSizes() throws Exception {
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms2m",
+                    "-Xmx1g",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Initial heap size");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms4m",
+                    "-Xmx1g",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms8m",
+                    "-Xmx1g",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=200m",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=11m",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=9m",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=255K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=260K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms1g",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=32M",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms1g",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=64M",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms1g",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=256K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms1g",
+                    "-Xmx1g",
+                    "-XX:ShenandoahHeapRegionSize=128K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahHeapRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+    }
+
+    private static void testMinRegionSize() throws Exception {
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahMinRegionSize=255K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahMinRegionSize=1M",
+                    "-XX:ShenandoahMaxRegionSize=260K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize");
+            output.shouldHaveExitValue(1);
+        }
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahMinRegionSize=200m",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahMinRegionSize=11m",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahMinRegionSize=9m",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+
+    }
+
+    private static void testMaxRegionSize() throws Exception {
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahMaxRegionSize=255K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahMaxRegionSize option");
+            output.shouldHaveExitValue(1);
+        }
+
+        {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-Xms100m",
+                    "-Xmx1g",
+                    "-XX:ShenandoahMinRegionSize=1M",
+                    "-XX:ShenandoahMaxRegionSize=260K",
+                    "-version");
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize");
+            output.shouldHaveExitValue(1);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestSelectiveBarrierFlags.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestSelectiveBarrierFlags
+ * @summary Test selective barrier enabling works, by aggressively compiling HelloWorld with combinations
+ *          of barrier flags
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run main/othervm TestSelectiveBarrierFlags -Xint
+ * @run main/othervm TestSelectiveBarrierFlags -Xbatch -XX:CompileThreshold=100 -XX:TieredStopAtLevel=1
+ * @run main/othervm TestSelectiveBarrierFlags -Xbatch -XX:CompileThreshold=100 -XX:-TieredCompilation -XX:+IgnoreUnrecognizedVMOptions -XX:+ShenandoahVerifyOptoBarriers
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestSelectiveBarrierFlags {
+
+    public static void main(String[] args) throws Exception {
+        String[][] opts = {
+                new String[] { "ShenandoahKeepAliveBarrier" },
+                new String[] { "ShenandoahWriteBarrier" },
+                new String[] { "ShenandoahReadBarrier" },
+                // StoreValRead+SATB are actually compatible, but we need to protect against
+                // StorveValEnqueue+SATB. TODO: Make it better.
+                new String[] { "ShenandoahSATBBarrier", "ShenandoahStoreValReadBarrier", "ShenandoahStoreValEnqueueBarrier" },
+                new String[] { "ShenandoahCASBarrier" },
+                new String[] { "ShenandoahAcmpBarrier" },
+                new String[] { "ShenandoahCloneBarrier" },
+        };
+
+        int size = 1;
+        for (String[] l : opts) {
+            size *= (l.length + 1);
+        }
+
+        ExecutorService pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
+
+        for (int c = 0; c < size; c++) {
+            int t = c;
+
+            List<String> conf = new ArrayList<>();
+            conf.addAll(Arrays.asList(args));
+            conf.add("-Xmx128m");
+            conf.add("-XX:+UnlockDiagnosticVMOptions");
+            conf.add("-XX:+UnlockExperimentalVMOptions");
+            conf.add("-XX:+UseShenandoahGC");
+            conf.add("-XX:ShenandoahGCHeuristics=passive");
+
+            StringBuilder sb = new StringBuilder();
+            for (String[] l : opts) {
+                // Make a choice which flag to select from the group.
+                // Zero means no flag is selected from the group.
+                int choice = t % (l.length + 1);
+                for (int e = 0; e < l.length; e++) {
+                    conf.add("-XX:" + ((choice == (e + 1)) ? "+" : "-") + l[e]);
+                }
+                t = t / (l.length + 1);
+            }
+
+            conf.add("TestSelectiveBarrierFlags$Test");
+
+            pool.submit(() -> {
+                try {
+                    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(conf.toArray(new String[0]));
+                    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+                    output.shouldHaveExitValue(0);
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    System.exit(1);
+                }
+            });
+        }
+
+        pool.shutdown();
+        pool.awaitTermination(1, TimeUnit.HOURS);
+    }
+
+    public static class Test {
+        public static void main(String... args) {
+            System.out.println("HelloWorld");
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestSingleThreaded.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestSingleThreaded
+ * @summary test single worker threaded Shenandoah
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
+ *                   -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive
+ *                   -XX:ParallelGCThreads=1 -XX:ConcGCThreads=1 TestSingleThreaded
+ */
+
+public class TestSingleThreaded {
+
+    public static void main(String[] args) {
+        // Bug should crash before we get here.
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/shenandoah/options/TestWrongBarrierDisable.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* @test TestWrongBarrierDisable
+ * @summary Test that disabling wrong barriers fails early
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @library /test/lib
+ * @run main/othervm TestWrongBarrierDisable
+ */
+
+import java.util.*;
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestWrongBarrierDisable {
+
+    public static void main(String[] args) throws Exception {
+        String[] concurrent = {
+                "ShenandoahReadBarrier",
+                "ShenandoahWriteBarrier",
+                "ShenandoahCASBarrier",
+                "ShenandoahAcmpBarrier",
+                "ShenandoahCloneBarrier",
+                "ShenandoahSATBBarrier",
+                "ShenandoahKeepAliveBarrier",
+                "ShenandoahStoreValReadBarrier",
+        };
+
+        String[] traversal = {
+                "ShenandoahReadBarrier",
+                "ShenandoahWriteBarrier",
+                "ShenandoahCASBarrier",
+                "ShenandoahAcmpBarrier",
+                "ShenandoahCloneBarrier",
+        };
+
+        shouldFailAll("adaptive",   concurrent);
+        shouldFailAll("static",     concurrent);
+        shouldFailAll("compact",    concurrent);
+        shouldFailAll("aggressive", concurrent);
+        shouldFailAll("traversal",  traversal);
+        shouldPassAll("passive",    concurrent);
+        shouldPassAll("passive",    traversal);
+    }
+
+    private static void shouldFailAll(String h, String[] barriers) throws Exception {
+        for (String b : barriers) {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-XX:-" + b,
+                    "-version"
+            );
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldNotHaveExitValue(0);
+            output.shouldContain("Heuristics needs ");
+            output.shouldContain("to work correctly");
+        }
+    }
+
+    private static void shouldPassAll(String h, String[] barriers) throws Exception {
+        for (String b : barriers) {
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:+UnlockExperimentalVMOptions",
+                    "-XX:+UseShenandoahGC",
+                    "-XX:ShenandoahGCHeuristics=" + h,
+                    "-XX:-" + b,
+                    "-version"
+            );
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/startup_warnings/TestShenandoah.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+* @test TestShenandoah
+* @key gc
+* @requires vm.gc.Shenandoah
+* @bug 8006398
+* @summary Test that the Shenandoah collector does not print a warning message
+* @library /test/lib
+* @modules java.base/jdk.internal.misc
+*          java.management
+*/
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class TestShenandoah {
+
+  public static void main(String args[]) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+UseShenandoahGC", "-version");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldNotContain("deprecated");
+    output.shouldNotContain("error");
+    output.shouldHaveExitValue(0);
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/stress/CriticalNativeStress.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.util.Random;
+
+/*
+ * @test CriticalNativeStressEpsilon
+ * @key gc
+ * @bug 8199868
+ * @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Epsilon & !vm.graal.enabled
+ * @summary test argument pinning by nmethod wrapper of critical native method
+ * @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx1G -XX:+CriticalJNINatives CriticalNativeStress
+ */
+
+/*
+ * @test CriticalNativeStressShenandoah
+ * @key gc
+ * @bug 8199868
+ * @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Shenandoah & !vm.graal.enabled
+ * @summary test argument pinning by nmethod wrapper of critical native method
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive    -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive    -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress
+ *
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC                                       -Xcomp -Xmx256M -XX:+CriticalJNINatives CriticalNativeStress
+ * @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal  -Xcomp -Xmx512M -XX:+CriticalJNINatives CriticalNativeStress
+ */
+public class CriticalNativeStress {
+    private static Random rand = new Random();
+    static {
+        System.loadLibrary("CriticalNative");
+    }
+
+    // CYCLES and THREAD_PER_CASE are used to tune the tests for different GC settings,
+    // so that they can execrise enough GC cycles and not OOM
+    private static int CYCLES = Integer.getInteger("cycles", 3);
+    private static int THREAD_PER_CASE = Integer.getInteger("threadPerCase", 1);
+
+    static native long sum1(long[] a);
+
+    // More than 6 parameters
+    static native long sum2(long a1, int[] a2, int[] a3, long[] a4, int[] a5);
+
+    static long sum(long[] a) {
+        long sum = 0;
+        for (int index = 0; index < a.length; index ++) {
+            sum += a[index];
+        }
+        return sum;
+    }
+
+    static long sum(int[] a) {
+        long sum = 0;
+        for (int index = 0; index < a.length; index ++) {
+            sum += a[index];
+        }
+        return sum;
+    }
+
+    private static volatile String garbage_array[];
+
+    // GC potentially moves arrays passed to critical native methods
+    // if they are not pinned correctly.
+    // Create enough garbages to exercise GC cycles, verify
+    // the arrays are pinned correctly.
+    static void create_garbage(int len) {
+        len = Math.max(len, 1024);
+        String array[] = new String[len];
+        for (int index = 0; index < len; index ++) {
+            array[index] = "String " + index;
+        }
+        garbage_array = array;
+    }
+
+    // Two test cases with different method signatures:
+    // Tests generate arbitrary length of arrays with
+    // arbitrary values, then calcuate sum of the array
+    // elements with critical native JNI methods and java
+    // methods, and compare the results for correctness.
+    static void run_test_case1() {
+        // Create testing arary with arbitrary length and
+        // values
+        int length = rand.nextInt(50) + 1;
+        long[] arr = new long[length];
+        for (int index = 0; index < length; index ++) {
+            arr[index] = rand.nextLong() % 1002;
+        }
+
+        // Generate garbages to trigger GCs
+        for (int index = 0; index < length; index ++) {
+            create_garbage(index);
+        }
+
+        // Compare results for correctness.
+        long native_sum = sum1(arr);
+        long java_sum = sum(arr);
+        if (native_sum != java_sum) {
+            StringBuffer sb = new StringBuffer("Sums do not match: native = ")
+                .append(native_sum).append(" java = ").append(java_sum);
+
+            throw new RuntimeException(sb.toString());
+        }
+    }
+
+    static void run_test_case2() {
+        // Create testing arary with arbitrary length and
+        // values
+        int index;
+        long a1 = rand.nextLong() % 1025;
+
+        int a2_length = rand.nextInt(50) + 1;
+        int[] a2 = new int[a2_length];
+        for (index = 0; index < a2_length; index ++) {
+            a2[index] = rand.nextInt(106);
+        }
+
+        int a3_length = rand.nextInt(150) + 1;
+        int[] a3 = new int[a3_length];
+        for (index = 0; index < a3_length; index ++) {
+            a3[index] = rand.nextInt(3333);
+        }
+
+        int a4_length = rand.nextInt(200) + 1;
+        long[] a4 = new long[a4_length];
+        for (index = 0; index < a4_length; index ++) {
+            a4[index] = rand.nextLong() % 122;
+        }
+
+        int a5_length = rand.nextInt(350) + 1;
+        int[] a5 = new int[a5_length];
+        for (index = 0; index < a5_length; index ++) {
+            a5[index] = rand.nextInt(333);
+        }
+
+        // Generate garbages to trigger GCs
+        for (index = 0; index < a1; index ++) {
+            create_garbage(index);
+        }
+
+        // Compare results for correctness.
+        long native_sum = sum2(a1, a2, a3, a4, a5);
+        long java_sum = a1 + sum(a2) + sum(a3) + sum(a4) + sum(a5);
+        if (native_sum != java_sum) {
+            StringBuffer sb = new StringBuffer("Sums do not match: native = ")
+                .append(native_sum).append(" java = ").append(java_sum);
+
+            throw new RuntimeException(sb.toString());
+        }
+    }
+
+    static class Case1Runner extends Thread {
+        public Case1Runner() {
+            start();
+        }
+
+        public void run() {
+            for (int index = 0; index < CYCLES; index ++) {
+                run_test_case1();
+            }
+        }
+    }
+
+    static class Case2Runner extends Thread {
+        public Case2Runner() {
+            start();
+        }
+
+        public void run() {
+            for (int index = 0; index < CYCLES; index ++) {
+                run_test_case2();
+            }
+        }
+    }
+
+    public static void main(String[] args) {
+        Thread[] thrs = new Thread[THREAD_PER_CASE * 2];
+        for (int index = 0; index < thrs.length; index = index + 2) {
+            thrs[index] = new Case1Runner();
+            thrs[index + 1] = new Case2Runner();
+        }
+
+        for (int index = 0; index < thrs.length; index ++) {
+            try {
+                thrs[index].join();
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithShenandoah.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.io.IOException;
+
+/*
+ * @test TestGCBasherWithShenandoah
+ * @key gc
+ * @key stress
+ * @requires vm.gc.Shenandoah
+ * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled
+ * @summary Stress the Shenandoah GC by trying to make old objects more likely to be garbage than young objects.
+ *
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahVerify -XX:+ShenandoahDegeneratedGC TestGCBasherWithShenandoah 120000
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive      -XX:+ShenandoahVerify -XX:-ShenandoahDegeneratedGC TestGCBasherWithShenandoah 120000
+ *
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive                         -XX:+ShenandoahOOMDuringEvacALot TestGCBasherWithShenandoah 120000
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive                         -XX:+ShenandoahAllocFailureALot  TestGCBasherWithShenandoah 120000
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive                                                          TestGCBasherWithShenandoah 120000
+ *
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestGCBasherWithShenandoah 120000
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestGCBasherWithShenandoah 120000
+ *
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive                           TestGCBasherWithShenandoah 120000
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal                          TestGCBasherWithShenandoah 120000
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact                            TestGCBasherWithShenandoah 120000
+ */
+public class TestGCBasherWithShenandoah {
+    public static void main(String[] args) throws IOException {
+        TestGCBasher.main(args);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithShenandoah.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestGCLockerWithShenandoah
+ * @key gc
+ * @requires vm.gc.Shenandoah
+ * @summary Stress Shenandoah's JNI handling by calling GetPrimitiveArrayCritical while concurrently filling up old gen.
+ * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+ShenandoahVerify -XX:+UseShenandoahGC TestGCLockerWithShenandoah
+ * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions                                                      -XX:+UseShenandoahGC TestGCLockerWithShenandoah
+ * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions                       -XX:+UseShenandoahGC -XX:+ShenandoahOOMDuringEvacALot -XX:ShenandoahGCHeuristics=aggressive TestGCLockerWithShenandoah
+ * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions                       -XX:+UseShenandoahGC -XX:+ShenandoahAllocFailureALot  -XX:ShenandoahGCHeuristics=aggressive TestGCLockerWithShenandoah
+ */
+public class TestGCLockerWithShenandoah {
+    public static void main(String[] args) {
+        String[] testArgs = {"2", "Shenandoah heap"};
+        TestGCLocker.main(testArgs);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithShenandoah.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,54 @@
+/*
+* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestGCOldWithShenandoah
+ * @key gc
+ * @key stress
+ * @requires vm.gc.Shenandoah & !vm.graal.enabled
+ * @summary Stress the GC by trying to make old objects more likely to be garbage than young objects.
+ *
+ * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive       -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestGCOld 50 1 20 10 10000
+ * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive       -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify TestGCOld 50 1 20 10 10000
+ * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive       -XX:+ShenandoahDegeneratedGC                       TestGCOld 50 1 20 10 10000
+ * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=passive       -XX:-ShenandoahDegeneratedGC                       TestGCOld 50 1 20 10 10000
+ *
+ * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive    -XX:+ShenandoahOOMDuringEvacALot TestGCOld 50 1 20 10 10000
+ * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive    -XX:+ShenandoahAllocFailureALot  TestGCOld 50 1 20 10 10000
+ * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive                                     TestGCOld 50 1 20 10 10000
+ *
+ * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive     -XX:+ShenandoahVerify TestGCOld 50 1 20 10 10000
+ * @run main/othervm/timeout=600 -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal    -XX:+ShenandoahVerify TestGCOld 50 1 20 10 10000
+ *
+ * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive      TestGCOld 50 1 20 10 10000
+ * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static        TestGCOld 50 1 20 10 10000
+ * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact       TestGCOld 50 1 20 10 10000
+ * @run main/othervm -Xmx384M -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal     TestGCOld 50 1 20 10 10000
+ */
+
+public class TestGCOldWithShenandoah {
+
+    public static void main(String[] args) {
+        TestGCOld.main(args);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithShenandoah.java	Mon Dec 10 15:47:44 2018 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestSystemGCWithShenandoah
+ * @key gc
+ * @key stress
+ * @requires vm.gc.Shenandoah
+ * @summary Stress the Shenandoah GC full GC by allocating objects of different lifetimes concurrently with System.gc().
+ * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+ShenandoahVerify -XX:+UseShenandoahGC TestSystemGCWithShenandoah 270
+ * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+ShenandoahVerify -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=traversal TestSystemGCWithShenandoah 270
+ * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UnlockExperimentalVMOptions                                                      -XX:+UseShenandoahGC TestSystemGCWithShenandoah 270
+ */
+public class TestSystemGCWithShenandoah {
+    public static void main(String[] args) throws Exception {
+        TestSystemGC.main(args);
+    }
+}
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
  * @bug 8031323
  * @summary Verify that object's alignment in eden space is not affected by
  *          SurvivorAlignmentInBytes option.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
  * @bug 8031323
  * @summary Verify that objects promoted from eden space to tenured space during
  *          full GC are not aligned to SurvivorAlignmentInBytes value.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
  * @bug 8031323
  * @summary Verify that objects promoted from survivor space to tenured space
  *          during full GC are not aligned to SurvivorAlignmentInBytes value.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  * @summary Verify that objects promoted from survivor space to tenured space
  *          when their age exceeded tenuring threshold are not aligned to
  *          SurvivorAlignmentInBytes value.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
  * @bug 8031323
  * @summary Verify that objects promoted from eden space to survivor space after
  *          minor GC are aligned to SurvivorAlignmentInBytes.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/whitebox/TestWBGC.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/gc/whitebox/TestWBGC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
  * @test TestWBGC
  * @bug 8055098
  * @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java	Mon Dec 10 15:47:44 2018 +0100
@@ -37,6 +37,7 @@
 import jdk.test.lib.Platform;
 import jdk.test.lib.process.ProcessTools;
 import jdk.test.lib.process.OutputAnalyzer;
+import sun.hotspot.gc.GC;
 
 import sun.hotspot.code.Compiler;
 
@@ -61,6 +62,9 @@
         testCompressedOopsModes(args, "-XX:+UseSerialGC");
         testCompressedOopsModes(args, "-XX:+UseParallelGC");
         testCompressedOopsModes(args, "-XX:+UseParallelOldGC");
+        if (GC.Shenandoah.isSupported()) {
+            testCompressedOopsModes(args, "-XX:+UseShenandoahGC");
+        }
     }
 
     public static void testCompressedOopsModes(ArrayList<String> flags1, String... flags2) throws Exception {
--- a/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java	Mon Dec 10 15:47:44 2018 +0100
@@ -37,6 +37,7 @@
 import jdk.test.lib.process.ProcessTools;
 import sun.hotspot.WhiteBox;
 import sun.hotspot.code.Compiler;
+import sun.hotspot.gc.GC;
 
 public class MemberNameLeak {
     static class Leak {
@@ -76,6 +77,7 @@
         // Run this Leak class with logging
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
                                       "-Xlog:membername+table=trace",
+                                      "-XX:+UnlockExperimentalVMOptions",
                                       "-XX:+UnlockDiagnosticVMOptions",
                                       "-XX:+WhiteBoxAPI",
                                       "-Xbootclasspath/a:.",
@@ -99,6 +101,10 @@
         if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
             test("-XX:+UseConcMarkSweepGC", false);
             test("-XX:+UseConcMarkSweepGC", true);
+            if (GC.Shenandoah.isSupported()) {
+                test("-XX:+UseShenandoahGC", true);
+                test("-XX:+UseShenandoahGC", false);
+            }
         }
     }
 }
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbJhisto.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbJhisto.java	Mon Dec 10 15:47:44 2018 +0100
@@ -34,6 +34,7 @@
  * @bug 8191658
  * @summary Test clhsdb jhisto command
  * @requires vm.hasSA
+ * @requires vm.gc != "Shenandoah"
  * @requires vm.gc != "Z"
  * @library /test/lib
  * @run main/othervm ClhsdbJhisto
--- a/test/hotspot/jtreg/serviceability/sa/TestHeapDumpForLargeArray.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/sa/TestHeapDumpForLargeArray.java	Mon Dec 10 15:47:44 2018 +0100
@@ -47,6 +47,7 @@
  * @library /test/lib
  * @bug 8171084
  * @requires vm.hasSAandCanAttach & (vm.bits == "64" & os.maxMemory > 8g)
+ * @requires vm.gc != "Shenandoah"
  * @requires vm.gc != "Z"
  * @modules java.base/jdk.internal.misc
  *          jdk.hotspot.agent/sun.jvm.hotspot
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  * @test
  * @summary Test checks the consistency of the output
  * displayed with jstat -gccapacity.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
  * @library /test/lib
  * @library ../share
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @run main/othervm -XX:+UsePerfData -Xmx128M GcCauseTest01
  */
 import utils.*;
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java	Mon Dec 10 15:47:44 2018 +0100
@@ -28,7 +28,7 @@
  *          test forces debuggee application eat ~70% of heap and runs jstat.
  *          jstat should show actual usage of old gen (OC/OU ~= old gen usage).
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  *          Test scenario:
  *          test forces debuggee application call System.gc(), runs jstat and checks that
  *          cause of last garbage collection displayed by jstat (LGCC) is 'System.gc()'.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
  *          test several times provokes garbage collection in the debuggee application and after each garbage
  *          collection runs jstat. jstat should show that after garbage collection number of GC events and garbage
  *          collection time increase.
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java	Mon Dec 10 15:47:44 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
  * @library /test/lib
  * @library ../share
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @run main/othervm -XX:+UsePerfData -Xmx128M GcTest01
  */
 import utils.*;
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java	Mon Dec 10 15:47:44 2018 +0100
@@ -28,7 +28,7 @@
  *          test forces debuggee application eat ~70% of heap and runs jstat.
  *          jstat should show actual usage of old gen (OC/OU ~= old gen usage).
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @requires vm.gc != "Z"
+ * @requires vm.gc != "Z" & vm.gc != "Shenandoah"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java	Mon Dec 10 15:47:44 2018 +0100
@@ -72,15 +72,16 @@
         Boolean isUseG1GCon = wb.getBooleanVMFlag("UseG1GC");
         Boolean isUseConcMarkSweepGCon = wb.getBooleanVMFlag("UseConcMarkSweepGC");
         Boolean isUseZGCon = wb.getBooleanVMFlag("UseZGC");
+        Boolean isShenandoahGCon = wb.getBooleanVMFlag("UseShenandoahGC");
         Boolean isUseEpsilonGCon = wb.getBooleanVMFlag("UseEpsilonGC");
 
         if (Compiler.isGraalEnabled() &&
-            (isUseConcMarkSweepGCon || isUseZGCon || isUseEpsilonGCon)) {
+            (isUseConcMarkSweepGCon || isUseZGCon || isUseEpsilonGCon || isShenandoahGCon)) {
             return; // Graal does not support these GCs
         }
 
         String keyPhrase;
-        if ((isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) || isUseZGCon) {
+        if ((isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) || isUseZGCon || isShenandoahGCon) {
             keyPhrase = "GC";
         } else {
             keyPhrase = "Pause Full";
--- a/test/lib/sun/hotspot/gc/GC.java	Mon Dec 10 17:34:49 2018 +0300
+++ b/test/lib/sun/hotspot/gc/GC.java	Mon Dec 10 15:47:44 2018 +0100
@@ -38,7 +38,8 @@
     ConcMarkSweep(3),
     G1(4),
     Epsilon(5),
-    Z(6);
+    Z(6),
+    Shenandoah(7);
 
     private static final WhiteBox WB = WhiteBox.getWhiteBox();