8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental)
authorpliden
Tue, 12 Jun 2018 17:40:28 +0200
changeset 50525 767cdb97f103
parent 50524 04f4e983c2f7
child 50526 3a5aafb12ae6
8204210: Implementation: JEP 333: ZGC: A Scalable Low-Latency Garbage Collector (Experimental) Reviewed-by: pliden, stefank, eosterlund, ehelin, sjohanss, rbackman, coleenp, ihse, jgeorge, lmesnik, rkennke Contributed-by: per.liden@oracle.com, stefan.karlsson@oracle.com, erik.osterlund@oracle.com, mikael.gerdin@oracle.com, kim.barrett@oracle.com, nils.eliasson@oracle.com, rickard.backman@oracle.com, rwestrel@redhat.com, coleen.phillimore@oracle.com, robbin.ehn@oracle.com, gerard.ziemski@oracle.com, hugh.wilkinson@intel.com, sandhya.viswanathan@intel.com, bill.npo.wheeler@intel.com, vinay.k.awasthi@intel.com, yasuenag@gmail.com
make/autoconf/hotspot.m4
make/conf/jib-profiles.js
make/hotspot/lib/JvmFeatures.gmk
src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp
src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
src/hotspot/cpu/x86/x86.ad
src/hotspot/cpu/x86/x86_64.ad
src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp
src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp
src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp
src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp
src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp
src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp
src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp
src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp
src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp
src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp
src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp
src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp
src/hotspot/share/adlc/formssel.cpp
src/hotspot/share/classfile/vmSymbols.cpp
src/hotspot/share/compiler/compilerDirectives.hpp
src/hotspot/share/compiler/oopMap.cpp
src/hotspot/share/gc/shared/barrierSetConfig.hpp
src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
src/hotspot/share/gc/shared/collectedHeap.hpp
src/hotspot/share/gc/shared/gcCause.cpp
src/hotspot/share/gc/shared/gcCause.hpp
src/hotspot/share/gc/shared/gcConfig.cpp
src/hotspot/share/gc/shared/gcConfiguration.cpp
src/hotspot/share/gc/shared/gcName.hpp
src/hotspot/share/gc/shared/gcThreadLocalData.hpp
src/hotspot/share/gc/shared/gc_globals.hpp
src/hotspot/share/gc/shared/specialized_oop_closures.hpp
src/hotspot/share/gc/shared/vmStructs_gc.hpp
src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp
src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp
src/hotspot/share/gc/z/vmStructs_z.cpp
src/hotspot/share/gc/z/vmStructs_z.hpp
src/hotspot/share/gc/z/zAddress.cpp
src/hotspot/share/gc/z/zAddress.hpp
src/hotspot/share/gc/z/zAddress.inline.hpp
src/hotspot/share/gc/z/zAddressRangeMap.hpp
src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp
src/hotspot/share/gc/z/zAllocationFlags.hpp
src/hotspot/share/gc/z/zArguments.cpp
src/hotspot/share/gc/z/zArguments.hpp
src/hotspot/share/gc/z/zArray.hpp
src/hotspot/share/gc/z/zArray.inline.hpp
src/hotspot/share/gc/z/zBarrier.cpp
src/hotspot/share/gc/z/zBarrier.hpp
src/hotspot/share/gc/z/zBarrier.inline.hpp
src/hotspot/share/gc/z/zBarrierSet.cpp
src/hotspot/share/gc/z/zBarrierSet.hpp
src/hotspot/share/gc/z/zBarrierSet.inline.hpp
src/hotspot/share/gc/z/zBarrierSetAssembler.cpp
src/hotspot/share/gc/z/zBarrierSetAssembler.hpp
src/hotspot/share/gc/z/zBarrierSetRuntime.cpp
src/hotspot/share/gc/z/zBarrierSetRuntime.hpp
src/hotspot/share/gc/z/zBitField.hpp
src/hotspot/share/gc/z/zBitMap.hpp
src/hotspot/share/gc/z/zBitMap.inline.hpp
src/hotspot/share/gc/z/zCPU.cpp
src/hotspot/share/gc/z/zCPU.hpp
src/hotspot/share/gc/z/zCollectedHeap.cpp
src/hotspot/share/gc/z/zCollectedHeap.hpp
src/hotspot/share/gc/z/zCollectorPolicy.cpp
src/hotspot/share/gc/z/zCollectorPolicy.hpp
src/hotspot/share/gc/z/zDebug.gdb
src/hotspot/share/gc/z/zDirector.cpp
src/hotspot/share/gc/z/zDirector.hpp
src/hotspot/share/gc/z/zDriver.cpp
src/hotspot/share/gc/z/zDriver.hpp
src/hotspot/share/gc/z/zErrno.cpp
src/hotspot/share/gc/z/zErrno.hpp
src/hotspot/share/gc/z/zForwardingTable.cpp
src/hotspot/share/gc/z/zForwardingTable.hpp
src/hotspot/share/gc/z/zForwardingTable.inline.hpp
src/hotspot/share/gc/z/zForwardingTableEntry.hpp
src/hotspot/share/gc/z/zFuture.hpp
src/hotspot/share/gc/z/zFuture.inline.hpp
src/hotspot/share/gc/z/zGlobals.cpp
src/hotspot/share/gc/z/zGlobals.hpp
src/hotspot/share/gc/z/zHash.hpp
src/hotspot/share/gc/z/zHash.inline.hpp
src/hotspot/share/gc/z/zHeap.cpp
src/hotspot/share/gc/z/zHeap.hpp
src/hotspot/share/gc/z/zHeap.inline.hpp
src/hotspot/share/gc/z/zHeapIterator.cpp
src/hotspot/share/gc/z/zHeapIterator.hpp
src/hotspot/share/gc/z/zInitialize.cpp
src/hotspot/share/gc/z/zInitialize.hpp
src/hotspot/share/gc/z/zLargePages.cpp
src/hotspot/share/gc/z/zLargePages.hpp
src/hotspot/share/gc/z/zLargePages.inline.hpp
src/hotspot/share/gc/z/zList.hpp
src/hotspot/share/gc/z/zList.inline.hpp
src/hotspot/share/gc/z/zLiveMap.cpp
src/hotspot/share/gc/z/zLiveMap.hpp
src/hotspot/share/gc/z/zLiveMap.inline.hpp
src/hotspot/share/gc/z/zLock.hpp
src/hotspot/share/gc/z/zLock.inline.hpp
src/hotspot/share/gc/z/zMark.cpp
src/hotspot/share/gc/z/zMark.hpp
src/hotspot/share/gc/z/zMark.inline.hpp
src/hotspot/share/gc/z/zMarkCache.cpp
src/hotspot/share/gc/z/zMarkCache.hpp
src/hotspot/share/gc/z/zMarkCache.inline.hpp
src/hotspot/share/gc/z/zMarkStack.cpp
src/hotspot/share/gc/z/zMarkStack.hpp
src/hotspot/share/gc/z/zMarkStack.inline.hpp
src/hotspot/share/gc/z/zMarkStackEntry.hpp
src/hotspot/share/gc/z/zMarkTerminate.hpp
src/hotspot/share/gc/z/zMarkTerminate.inline.hpp
src/hotspot/share/gc/z/zMemory.cpp
src/hotspot/share/gc/z/zMemory.hpp
src/hotspot/share/gc/z/zMemory.inline.hpp
src/hotspot/share/gc/z/zMessagePort.hpp
src/hotspot/share/gc/z/zMessagePort.inline.hpp
src/hotspot/share/gc/z/zMetronome.cpp
src/hotspot/share/gc/z/zMetronome.hpp
src/hotspot/share/gc/z/zNMethodTable.cpp
src/hotspot/share/gc/z/zNMethodTable.hpp
src/hotspot/share/gc/z/zNMethodTableEntry.hpp
src/hotspot/share/gc/z/zNUMA.cpp
src/hotspot/share/gc/z/zNUMA.hpp
src/hotspot/share/gc/z/zObjectAllocator.cpp
src/hotspot/share/gc/z/zObjectAllocator.hpp
src/hotspot/share/gc/z/zOop.hpp
src/hotspot/share/gc/z/zOop.inline.hpp
src/hotspot/share/gc/z/zOopClosures.cpp
src/hotspot/share/gc/z/zOopClosures.hpp
src/hotspot/share/gc/z/zOopClosures.inline.hpp
src/hotspot/share/gc/z/zPage.cpp
src/hotspot/share/gc/z/zPage.hpp
src/hotspot/share/gc/z/zPage.inline.hpp
src/hotspot/share/gc/z/zPageAllocator.cpp
src/hotspot/share/gc/z/zPageAllocator.hpp
src/hotspot/share/gc/z/zPageCache.cpp
src/hotspot/share/gc/z/zPageCache.hpp
src/hotspot/share/gc/z/zPageCache.inline.hpp
src/hotspot/share/gc/z/zPageTable.cpp
src/hotspot/share/gc/z/zPageTable.hpp
src/hotspot/share/gc/z/zPageTable.inline.hpp
src/hotspot/share/gc/z/zPageTableEntry.hpp
src/hotspot/share/gc/z/zPhysicalMemory.cpp
src/hotspot/share/gc/z/zPhysicalMemory.hpp
src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
src/hotspot/share/gc/z/zPreMappedMemory.cpp
src/hotspot/share/gc/z/zPreMappedMemory.hpp
src/hotspot/share/gc/z/zPreMappedMemory.inline.hpp
src/hotspot/share/gc/z/zReferenceProcessor.cpp
src/hotspot/share/gc/z/zReferenceProcessor.hpp
src/hotspot/share/gc/z/zRelocate.cpp
src/hotspot/share/gc/z/zRelocate.hpp
src/hotspot/share/gc/z/zRelocationSet.cpp
src/hotspot/share/gc/z/zRelocationSet.hpp
src/hotspot/share/gc/z/zRelocationSet.inline.hpp
src/hotspot/share/gc/z/zRelocationSetSelector.cpp
src/hotspot/share/gc/z/zRelocationSetSelector.hpp
src/hotspot/share/gc/z/zResurrection.cpp
src/hotspot/share/gc/z/zResurrection.hpp
src/hotspot/share/gc/z/zResurrection.inline.hpp
src/hotspot/share/gc/z/zRootsIterator.cpp
src/hotspot/share/gc/z/zRootsIterator.hpp
src/hotspot/share/gc/z/zRuntimeWorkers.cpp
src/hotspot/share/gc/z/zRuntimeWorkers.hpp
src/hotspot/share/gc/z/zServiceability.cpp
src/hotspot/share/gc/z/zServiceability.hpp
src/hotspot/share/gc/z/zStat.cpp
src/hotspot/share/gc/z/zStat.hpp
src/hotspot/share/gc/z/zTask.cpp
src/hotspot/share/gc/z/zTask.hpp
src/hotspot/share/gc/z/zThread.cpp
src/hotspot/share/gc/z/zThread.hpp
src/hotspot/share/gc/z/zThreadLocalData.hpp
src/hotspot/share/gc/z/zTracer.cpp
src/hotspot/share/gc/z/zTracer.hpp
src/hotspot/share/gc/z/zTracer.inline.hpp
src/hotspot/share/gc/z/zUtils.cpp
src/hotspot/share/gc/z/zUtils.hpp
src/hotspot/share/gc/z/zUtils.inline.hpp
src/hotspot/share/gc/z/zValue.hpp
src/hotspot/share/gc/z/zVirtualMemory.cpp
src/hotspot/share/gc/z/zVirtualMemory.hpp
src/hotspot/share/gc/z/zVirtualMemory.inline.hpp
src/hotspot/share/gc/z/zWeakRootsProcessor.cpp
src/hotspot/share/gc/z/zWeakRootsProcessor.hpp
src/hotspot/share/gc/z/zWorkers.cpp
src/hotspot/share/gc/z/zWorkers.hpp
src/hotspot/share/gc/z/zWorkers.inline.hpp
src/hotspot/share/gc/z/z_globals.hpp
src/hotspot/share/gc/z/z_specialized_oop_closures.hpp
src/hotspot/share/jfr/metadata/metadata.xml
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp
src/hotspot/share/logging/logPrefix.hpp
src/hotspot/share/logging/logTag.hpp
src/hotspot/share/memory/metaspace.hpp
src/hotspot/share/opto/classes.cpp
src/hotspot/share/opto/classes.hpp
src/hotspot/share/opto/compile.cpp
src/hotspot/share/opto/compile.hpp
src/hotspot/share/opto/escape.cpp
src/hotspot/share/opto/idealKit.cpp
src/hotspot/share/opto/idealKit.hpp
src/hotspot/share/opto/lcm.cpp
src/hotspot/share/opto/loopnode.cpp
src/hotspot/share/opto/loopnode.hpp
src/hotspot/share/opto/loopopts.cpp
src/hotspot/share/opto/macro.cpp
src/hotspot/share/opto/matcher.cpp
src/hotspot/share/opto/memnode.cpp
src/hotspot/share/opto/node.cpp
src/hotspot/share/opto/node.hpp
src/hotspot/share/opto/opcodes.cpp
src/hotspot/share/opto/opcodes.hpp
src/hotspot/share/opto/phasetype.hpp
src/hotspot/share/opto/vectornode.cpp
src/hotspot/share/prims/jvmtiTagMap.cpp
src/hotspot/share/prims/whitebox.cpp
src/hotspot/share/runtime/jniHandles.cpp
src/hotspot/share/runtime/stackValue.cpp
src/hotspot/share/runtime/vmStructs.cpp
src/hotspot/share/runtime/vm_operations.hpp
src/hotspot/share/utilities/macros.hpp
src/java.base/share/legal/c-libutl.md
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeap.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddress.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddressRangeMapForPageTable.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZBarrier.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZCollectedHeap.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTable.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTableCursor.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTableEntry.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobalsForVMStructs.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZHash.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZHeap.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZOop.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPage.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageAllocator.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageTable.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageTableEntry.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPhysicalMemoryManager.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZVirtualMemory.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/OopField.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java
src/jdk.jfr/share/conf/jfr/default.jfc
src/jdk.jfr/share/conf/jfr/profile.jfc
test/hotspot/gtest/gc/z/test_zAddress.cpp
test/hotspot/gtest/gc/z/test_zArray.cpp
test/hotspot/gtest/gc/z/test_zBitField.cpp
test/hotspot/gtest/gc/z/test_zBitMap.cpp
test/hotspot/gtest/gc/z/test_zForwardingTable.cpp
test/hotspot/gtest/gc/z/test_zList.cpp
test/hotspot/gtest/gc/z/test_zLiveMap.cpp
test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp
test/hotspot/gtest/gc/z/test_zUtils.cpp
test/hotspot/gtest/gc/z/test_zVirtualMemory.cpp
test/hotspot/jtreg/TEST.ROOT
test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java
test/hotspot/jtreg/gc/TestAllocateHeapAt.java
test/hotspot/jtreg/gc/TestAllocateHeapAtError.java
test/hotspot/jtreg/gc/TestAllocateHeapAtMultiple.java
test/hotspot/jtreg/gc/TestSoftReferencesBehaviorOnOOME.java
test/hotspot/jtreg/gc/TestVerifyDuringStartup.java
test/hotspot/jtreg/gc/TestVerifySilently.java
test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java
test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java
test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java
test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java
test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java
test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java
test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java
test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java
test/hotspot/jtreg/gc/logging/TestUnifiedLoggingSwitchStress.java
test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithZ.java
test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithZ.java
test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java
test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java
test/hotspot/jtreg/gc/whitebox/TestWBGC.java
test/hotspot/jtreg/runtime/Metaspace/PrintMetaspaceDcmd.java
test/hotspot/jtreg/runtime/memory/LargePages/TestLargePagesFlags.java
test/hotspot/jtreg/serviceability/dcmd/gc/RunGCTest.java
test/hotspot/jtreg/serviceability/sa/TestUniverse.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java
test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java
test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/CompressedClassSpaceSize/TestDescription.java
test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowTest/ShrinkGrowTest.java
test/hotspot/jtreg/vmTestbase/nsk/jdi/ObjectReference/referringObjects/referringObjects001/referringObjects001.java
test/hotspot/jtreg/vmTestbase/nsk/jdi/ReferenceType/instances/instances003/instances003.java
test/hotspot/jtreg/vmTestbase/nsk/jdwp/ReferenceType/Instances/instances001/instances001.java
test/hotspot/jtreg/vmTestbase/nsk/jdwp/VirtualMachine/InstanceCounts/instanceCounts001/instanceCounts001.java
test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java
test/jdk/TEST.ROOT
test/jdk/com/sun/jdi/OomDebugTest.java
test/jdk/com/sun/management/OperatingSystemMXBean/GetCommittedVirtualMemorySize.java
test/jdk/java/lang/management/ManagementFactory/MXBeanException.java
test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java
test/jdk/java/lang/management/MemoryMXBean/MemoryTestZGC.sh
test/lib/sun/hotspot/gc/GC.java
--- a/make/autoconf/hotspot.m4	Tue Jun 12 07:52:30 2018 -0700
+++ b/make/autoconf/hotspot.m4	Tue Jun 12 17:40:28 2018 +0200
@@ -25,7 +25,7 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc epsilongc nmt cds \
+    graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \
     static-build link-time-opt aot jfr"
 
 # Deprecated JVM features (these are ignored, but with a warning)
@@ -328,6 +328,19 @@
     fi
   fi
 
+  # Only enable ZGC on Linux x86_64
+  AC_MSG_CHECKING([if zgc should be built])
+  if HOTSPOT_CHECK_JVM_FEATURE(zgc); then
+    if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
+      AC_MSG_RESULT([yes])
+    else
+      DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
+      AC_MSG_RESULT([no, platform not supported])
+    fi
+  else
+    AC_MSG_RESULT([no])
+  fi
+
   # Turn on additional features based on other parts of configure
   if test "x$INCLUDE_DTRACE" = "xtrue"; then
     JVM_FEATURES="$JVM_FEATURES dtrace"
--- a/make/conf/jib-profiles.js	Tue Jun 12 07:52:30 2018 -0700
+++ b/make/conf/jib-profiles.js	Tue Jun 12 17:40:28 2018 +0200
@@ -694,6 +694,14 @@
                        profiles[openName].artifacts["jdk"].remote));
     });
 
+    // Enable ZGC in linux-x64-open builds
+    [ "linux-x64-open" ].forEach(function (name) {
+        var configureArgs = { configure_args: [ "--with-jvm-features=zgc" ] };
+        var debugName = name + common.debug_suffix;
+        profiles[name] = concatObjects(profiles[name], configureArgs);
+        profiles[debugName] = concatObjects(profiles[debugName], configureArgs);
+    });
+
     // Profiles used to run tests. Used in JPRT and Mach 5.
     var testOnlyProfiles = {
         "run-test-jprt": {
--- a/make/hotspot/lib/JvmFeatures.gmk	Tue Jun 12 07:52:30 2018 -0700
+++ b/make/hotspot/lib/JvmFeatures.gmk	Tue Jun 12 17:40:28 2018 +0200
@@ -160,6 +160,11 @@
   JVM_EXCLUDE_PATTERNS += gc/epsilon
 endif
 
+ifneq ($(call check-jvm-feature, zgc), true)
+  JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
+  JVM_EXCLUDE_PATTERNS += gc/z
+endif
+
 ifneq ($(call check-jvm-feature, jfr), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
   JVM_EXCLUDE_PATTERNS += jfr
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -1346,7 +1346,11 @@
       __ decode_heap_oop(dest->as_register());
     }
 #endif
-    __ verify_oop(dest->as_register());
+
+    // Load barrier has not yet been applied, so ZGC can't verify the oop here
+    if (!UseZGC) {
+      __ verify_oop(dest->as_register());
+    }
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 #ifdef _LP64
     if (UseCompressedClassPointers) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#endif // COMPILER1
+
+#undef __
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+static void call_vm(MacroAssembler* masm,
+                    address entry_point,
+                    Register arg0,
+                    Register arg1) {
+  // Setup arguments
+  if (arg1 == c_rarg0) {
+    if (arg0 == c_rarg1) {
+      __ xchgptr(c_rarg1, c_rarg0);
+    } else {
+      __ movptr(c_rarg1, arg1);
+      __ movptr(c_rarg0, arg0);
+    }
+  } else {
+    if (arg0 != c_rarg0) {
+      __ movptr(c_rarg0, arg0);
+    }
+    if (arg1 != c_rarg1) {
+      __ movptr(c_rarg1, arg1);
+    }
+  }
+
+  // Call VM
+  __ MacroAssembler::call_VM_leaf_base(entry_point, 2);
+}
+
+void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
+                                   DecoratorSet decorators,
+                                   BasicType type,
+                                   Register dst,
+                                   Address src,
+                                   Register tmp1,
+                                   Register tmp_thread) {
+  if (!ZBarrierSet::barrier_needed(decorators, type)) {
+    // Barrier not needed
+    BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+    return;
+  }
+
+  BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
+
+  // Allocate scratch register
+  Register scratch = tmp1;
+  if (tmp1 == noreg) {
+    scratch = r12;
+    __ push(scratch);
+  }
+
+  assert_different_registers(dst, scratch);
+
+  Label done;
+
+  //
+  // Fast Path
+  //
+
+  // Load address
+  __ lea(scratch, src);
+
+  // Load oop at address
+  __ movptr(dst, Address(scratch, 0));
+
+  // Test address bad mask
+  __ testptr(dst, address_bad_mask_from_thread(r15_thread));
+  __ jcc(Assembler::zero, done);
+
+  //
+  // Slow path
+  //
+
+  // Save registers
+  __ push(rax);
+  __ push(rcx);
+  __ push(rdx);
+  __ push(rdi);
+  __ push(rsi);
+  __ push(r8);
+  __ push(r9);
+  __ push(r10);
+  __ push(r11);
+
+  // We may end up here from generate_native_wrapper, then the method may have
+  // floats as arguments, and we must spill them before calling the VM runtime
+  // leaf. From the interpreter all floats are passed on the stack.
+  assert(Argument::n_float_register_parameters_j == 8, "Assumption");
+  const int xmm_size = wordSize * 2;
+  const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
+  __ subptr(rsp, xmm_spill_size);
+  __ movdqu(Address(rsp, xmm_size * 7), xmm7);
+  __ movdqu(Address(rsp, xmm_size * 6), xmm6);
+  __ movdqu(Address(rsp, xmm_size * 5), xmm5);
+  __ movdqu(Address(rsp, xmm_size * 4), xmm4);
+  __ movdqu(Address(rsp, xmm_size * 3), xmm3);
+  __ movdqu(Address(rsp, xmm_size * 2), xmm2);
+  __ movdqu(Address(rsp, xmm_size * 1), xmm1);
+  __ movdqu(Address(rsp, xmm_size * 0), xmm0);
+
+  // Call VM
+  call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
+
+  // Restore registers
+  __ movdqu(xmm0, Address(rsp, xmm_size * 0));
+  __ movdqu(xmm1, Address(rsp, xmm_size * 1));
+  __ movdqu(xmm2, Address(rsp, xmm_size * 2));
+  __ movdqu(xmm3, Address(rsp, xmm_size * 3));
+  __ movdqu(xmm4, Address(rsp, xmm_size * 4));
+  __ movdqu(xmm5, Address(rsp, xmm_size * 5));
+  __ movdqu(xmm6, Address(rsp, xmm_size * 6));
+  __ movdqu(xmm7, Address(rsp, xmm_size * 7));
+  __ addptr(rsp, xmm_spill_size);
+
+  __ pop(r11);
+  __ pop(r10);
+  __ pop(r9);
+  __ pop(r8);
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(rdx);
+  __ pop(rcx);
+
+  if (dst == rax) {
+    __ addptr(rsp, wordSize);
+  } else {
+    __ movptr(dst, rax);
+    __ pop(rax);
+  }
+
+  __ bind(done);
+
+  // Restore scratch register
+  if (tmp1 == noreg) {
+    __ pop(scratch);
+  }
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
+}
+
+#ifdef ASSERT
+
+void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
+                                    DecoratorSet decorators,
+                                    BasicType type,
+                                    Address dst,
+                                    Register src,
+                                    Register tmp1,
+                                    Register tmp2) {
+  BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
+
+  // Verify oop store
+  if (type == T_OBJECT || type == T_ARRAY) {
+    // Note that src could be noreg, which means we
+    // are storing null and can skip verification.
+    if (src != noreg) {
+      Label done;
+      __ testptr(src, address_bad_mask_from_thread(r15_thread));
+      __ jcc(Assembler::zero, done);
+      __ stop("Verify oop store failed");
+      __ should_not_reach_here();
+      __ bind(done);
+    }
+  }
+
+  // Store value
+  BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
+}
+
+#endif // ASSERT
+
+void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
+                                              DecoratorSet decorators,
+                                              BasicType type,
+                                              Register src,
+                                              Register dst,
+                                              Register count) {
+  if (!ZBarrierSet::barrier_needed(decorators, type)) {
+    // Barrier not needed
+    return;
+  }
+
+  BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
+
+  // Save registers
+  __ pusha();
+
+  // Call VM
+  call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
+
+  // Restore registers
+  __ popa();
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
+}
+
+void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
+                                                         Register jni_env,
+                                                         Register obj,
+                                                         Register tmp,
+                                                         Label& slowpath) {
+  BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
+
+  // Resolve jobject
+  BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
+
+  // Test address bad mask
+  __ testptr(obj, address_bad_mask_from_jni_env(jni_env));
+  __ jcc(Assembler::notZero, slowpath);
+
+  BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
+}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
+                                                         LIR_Opr ref) const {
+  __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
+}
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
+                                                         ZLoadBarrierStubC1* stub) const {
+  // Stub entry
+  __ bind(*stub->entry());
+
+  Register ref = stub->ref()->as_register();
+  Register ref_addr = noreg;
+
+  if (stub->ref_addr()->is_register()) {
+    // Address already in register
+    ref_addr = stub->ref_addr()->as_pointer_register();
+  } else {
+    // Load address into tmp register
+    ce->leal(stub->ref_addr(), stub->tmp(), stub->patch_code(), stub->patch_info());
+    ref_addr = stub->tmp()->as_pointer_register();
+  }
+
+  assert_different_registers(ref, ref_addr, noreg);
+
+  // Save rax unless it is the result register
+  if (ref != rax) {
+    __ push(rax);
+  }
+
+  // Setup arguments and call runtime stub
+  __ subptr(rsp, 2 * BytesPerWord);
+  ce->store_parameter(ref_addr, 1);
+  ce->store_parameter(ref, 0);
+  __ call(RuntimeAddress(stub->runtime_stub()));
+  __ addptr(rsp, 2 * BytesPerWord);
+
+  // Verify result
+  __ verify_oop(rax, "Bad oop");
+
+  // Restore rax unless it is the result register
+  if (ref != rax) {
+    __ movptr(ref, rax);
+    __ pop(rax);
+  }
+
+  // Stub exit
+  __ jmp(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+                                                                 DecoratorSet decorators) const {
+  // Enter and save registers
+  __ enter();
+  __ save_live_registers_no_oop_map(true /* save_fpu_registers */);
+
+  // Setup arguments
+  __ load_parameter(1, c_rarg1);
+  __ load_parameter(0, c_rarg0);
+
+  // Call VM
+  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+  // Restore registers and return
+  __ restore_live_registers_except_rax(true /* restore_fpu_registers */);
+  __ leave();
+  __ ret(0);
+}
+
+#endif // COMPILER1
+
+#undef __
+#define __ cgen->assembler()->
+
+// Generates a register specific stub for calling
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+//
+// The raddr register serves as both input and output for this stub. When the stub is
+// called the raddr register contains the object field address (oop*) where the bad oop
+// was loaded from, which caused the slow path to be taken. On return from the stub the
+// raddr register contains the good/healed oop returned from
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
+  // Don't generate stub for invalid registers
+  if (raddr == rsp || raddr == r12 || raddr == r15) {
+    return NULL;
+  }
+
+  // Create stub name
+  char name[64];
+  const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
+  os::snprintf(name, sizeof(name), "load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
+
+  __ align(CodeEntryAlignment);
+  StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
+  address start = __ pc();
+
+  // Save live registers
+  if (raddr != rax) {
+    __ push(rax);
+  }
+  if (raddr != rcx) {
+    __ push(rcx);
+  }
+  if (raddr != rdx) {
+    __ push(rdx);
+  }
+  if (raddr != rsi) {
+    __ push(rsi);
+  }
+  if (raddr != rdi) {
+    __ push(rdi);
+  }
+  if (raddr != r8) {
+    __ push(r8);
+  }
+  if (raddr != r9) {
+    __ push(r9);
+  }
+  if (raddr != r10) {
+    __ push(r10);
+  }
+  if (raddr != r11) {
+    __ push(r11);
+  }
+
+  // Setup arguments
+  if (c_rarg1 != raddr) {
+    __ movq(c_rarg1, raddr);
+  }
+  __ movq(c_rarg0, Address(raddr, 0));
+
+  // Call barrier function
+  __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+  // Move result returned in rax to raddr, if needed
+  if (raddr != rax) {
+    __ movq(raddr, rax);
+  }
+
+  // Restore saved registers
+  if (raddr != r11) {
+    __ pop(r11);
+  }
+  if (raddr != r10) {
+    __ pop(r10);
+  }
+  if (raddr != r9) {
+    __ pop(r9);
+  }
+  if (raddr != r8) {
+    __ pop(r8);
+  }
+  if (raddr != rdi) {
+    __ pop(rdi);
+  }
+  if (raddr != rsi) {
+    __ pop(rsi);
+  }
+  if (raddr != rdx) {
+    __ pop(rdx);
+  }
+  if (raddr != rcx) {
+    __ pop(rcx);
+  }
+  if (raddr != rax) {
+    __ pop(rax);
+  }
+
+  __ ret(0);
+
+  return start;
+}
+
+#undef __
+
+void ZBarrierSetAssembler::barrier_stubs_init() {
+  // Load barrier stubs
+  int stub_code_size = 256 * 16; // Rough estimate of code size
+
+  ResourceMark rm;
+  BufferBlob* bb = BufferBlob::create("zgc_load_barrier_stubs", stub_code_size);
+  CodeBuffer buf(bb);
+  StubCodeGenerator cgen(&buf);
+
+  Register rr = as_Register(0);
+  for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
+    _load_barrier_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_STRONG_OOP_REF);
+    _load_barrier_weak_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_WEAK_OOP_REF);
+    rr = rr->successor();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+
+#ifdef COMPILER1
+class LIR_Assembler;
+class LIR_OprDesc;
+typedef LIR_OprDesc* LIR_Opr;
+class StubAssembler;
+class ZLoadBarrierStubC1;
+#endif // COMPILER1
+
+class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
+  address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
+  address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
+
+public:
+  ZBarrierSetAssembler() :
+    _load_barrier_slow_stub(),
+    _load_barrier_weak_slow_stub() {}
+
+  address load_barrier_slow_stub(Register reg) { return _load_barrier_slow_stub[reg->encoding()]; }
+  address load_barrier_weak_slow_stub(Register reg) { return _load_barrier_weak_slow_stub[reg->encoding()]; }
+
+  virtual void load_at(MacroAssembler* masm,
+                       DecoratorSet decorators,
+                       BasicType type,
+                       Register dst,
+                       Address src,
+                       Register tmp1,
+                       Register tmp_thread);
+
+#ifdef ASSERT
+  virtual void store_at(MacroAssembler* masm,
+                        DecoratorSet decorators,
+                        BasicType type,
+                        Address dst,
+                        Register src,
+                        Register tmp1,
+                        Register tmp2);
+#endif // ASSERT
+
+  virtual void arraycopy_prologue(MacroAssembler* masm,
+                                  DecoratorSet decorators,
+                                  BasicType type,
+                                  Register src,
+                                  Register dst,
+                                  Register count);
+
+  virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
+                                             Register jni_env,
+                                             Register obj,
+                                             Register tmp,
+                                             Label& slowpath);
+
+#ifdef COMPILER1
+  void generate_c1_load_barrier_test(LIR_Assembler* ce,
+                                     LIR_Opr ref) const;
+
+  void generate_c1_load_barrier_stub(LIR_Assembler* ce,
+                                     ZLoadBarrierStubC1* stub) const;
+
+  void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+                                             DecoratorSet decorators) const;
+#endif // COMPILER1
+
+  virtual void barrier_stubs_init();
+};
+
+#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -44,6 +44,9 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/zThreadLocalData.hpp"
+#endif
 
 // Declaration and definition of StubGenerator (no .hpp file).
 // For a more detailed description of the stub routine structure
@@ -1026,6 +1029,15 @@
     // make sure object is 'reasonable'
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
+
+#if INCLUDE_ZGC
+    if (UseZGC) {
+      // Check if metadata bits indicate a bad oop
+      __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+      __ jcc(Assembler::notZero, error);
+    }
+#endif
+
     // Check if the oop is in the right area of memory
     __ movptr(c_rarg2, rax);
     __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
--- a/src/hotspot/cpu/x86/x86.ad	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/cpu/x86/x86.ad	Tue Jun 12 17:40:28 2018 +0200
@@ -1067,6 +1067,138 @@
 #endif
                       );
 
+reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
+reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
+reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
+
+reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
+reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
+reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
+
+reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
+reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
+reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
+
+reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
+reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
+reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
+
+reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
+reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
+reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
+
+reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
+reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
+reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
+
+reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
+reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
+reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
+
+reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
+reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
+reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
+
+#ifdef _LP64
+
+reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
+reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
+reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
+
+reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
+reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
+reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
+
+reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
+reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
+reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
+
+reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
+reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
+reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
+
+reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
+reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
+reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
+
+reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
+reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
+reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
+
+reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
+reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
+reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
+
+reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
+reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
+reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
+
+reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
+reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
+reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
+
+reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
+reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
+reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
+
+reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
+reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
+reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
+
+reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
+reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
+reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
+
+reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
+reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
+reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
+
+reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
+reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
+reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
+
+reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
+reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
+reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
+
+reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
+reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
+reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
+
+reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
+reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
+reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
+
+reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
+reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
+reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
+
+reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
+reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
+reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
+
+reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
+reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
+reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
+
+reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
+reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
+reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
+
+reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
+reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
+reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
+
+reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
+reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
+reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
+
+reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
+reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
+reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
+
+#endif
+
 %}
 
 
--- a/src/hotspot/cpu/x86/x86_64.ad	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/cpu/x86/x86_64.ad	Tue Jun 12 17:40:28 2018 +0200
@@ -538,6 +538,12 @@
 
 %}
 
+source_hpp %{
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSetAssembler.hpp"
+#endif
+%}
+
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
@@ -4221,6 +4227,135 @@
   %}
 %}
 
+// Operands for bound floating pointer register arguments
+operand rxmm0() %{
+  constraint(ALLOC_IN_RC(xmm0_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX<= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm1() %{
+  constraint(ALLOC_IN_RC(xmm1_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm2() %{
+  constraint(ALLOC_IN_RC(xmm2_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm3() %{
+  constraint(ALLOC_IN_RC(xmm3_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm4() %{
+  constraint(ALLOC_IN_RC(xmm4_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm5() %{
+  constraint(ALLOC_IN_RC(xmm5_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm6() %{
+  constraint(ALLOC_IN_RC(xmm6_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm7() %{
+  constraint(ALLOC_IN_RC(xmm7_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm8() %{
+  constraint(ALLOC_IN_RC(xmm8_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm9() %{
+  constraint(ALLOC_IN_RC(xmm9_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm10() %{
+  constraint(ALLOC_IN_RC(xmm10_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm11() %{
+  constraint(ALLOC_IN_RC(xmm11_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm12() %{
+  constraint(ALLOC_IN_RC(xmm12_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm13() %{
+  constraint(ALLOC_IN_RC(xmm13_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm14() %{
+  constraint(ALLOC_IN_RC(xmm14_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm15() %{
+  constraint(ALLOC_IN_RC(xmm15_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm16() %{
+  constraint(ALLOC_IN_RC(xmm16_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm17() %{
+  constraint(ALLOC_IN_RC(xmm17_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm18() %{
+  constraint(ALLOC_IN_RC(xmm18_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm19() %{
+  constraint(ALLOC_IN_RC(xmm19_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm20() %{
+  constraint(ALLOC_IN_RC(xmm20_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm21() %{
+  constraint(ALLOC_IN_RC(xmm21_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm22() %{
+  constraint(ALLOC_IN_RC(xmm22_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm23() %{
+  constraint(ALLOC_IN_RC(xmm23_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm24() %{
+  constraint(ALLOC_IN_RC(xmm24_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm25() %{
+  constraint(ALLOC_IN_RC(xmm25_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm26() %{
+  constraint(ALLOC_IN_RC(xmm26_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm27() %{
+  constraint(ALLOC_IN_RC(xmm27_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm28() %{
+  constraint(ALLOC_IN_RC(xmm28_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm29() %{
+  constraint(ALLOC_IN_RC(xmm29_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm30() %{
+  constraint(ALLOC_IN_RC(xmm30_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm31() %{
+  constraint(ALLOC_IN_RC(xmm31_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
 
 //----------OPERAND CLASSES----------------------------------------------------
 // Operand Classes are groups of operands that are used as to simplify
@@ -11547,6 +11682,16 @@
   ins_pipe(ialu_cr_reg_mem);
 %}
 
+instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero)
+%{
+  match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero));
+
+  format %{ "testq   $src, $mem" %}
+  opcode(0x85);
+  ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
+  ins_pipe(ialu_cr_reg_mem);
+%}
+
 // Manifest a CmpL result in an integer register.  Very painful.
 // This is the test to avoid.
 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
@@ -12320,6 +12465,223 @@
   ins_pipe(pipe_jmp);
 %}
 
+//
+// Execute ZGC load barrier (strong) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                     rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                               rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                               rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                               rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                               rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                               rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                               rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                               rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                               rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+//
+// Execute ZGC load barrier (weak) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                         rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                         rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                         rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                         rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                                   rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                   rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                   rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                   rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                                   rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                                   rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                                   rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                                   rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
 
 // ============================================================================
 // This name is KNOWN by the ADLC and cannot be changed.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+
+inline uintptr_t ZAddress::address(uintptr_t value) {
+  return value;
+}
+
+#endif // OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Filesystem names
+#define ZFILESYSTEM_TMPFS                "tmpfs"
+#define ZFILESYSTEM_HUGETLBFS            "hugetlbfs"
+
+// Sysfs file for transparent huge page on tmpfs
+#define ZFILENAME_SHMEM_ENABLED          "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
+
+// Default mount points
+#define ZMOUNTPOINT_TMPFS                "/dev/shm"
+#define ZMOUNTPOINT_HUGETLBFS            "/hugepages"
+
+// Java heap filename
+#define ZFILENAME_HEAP                   "java_heap"
+
+// Support for building on older Linux systems
+#ifndef __NR_memfd_create
+#define __NR_memfd_create                319
+#endif
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC                      0x0001U
+#endif
+#ifndef MFD_HUGETLB
+#define MFD_HUGETLB                      0x0004U
+#endif
+#ifndef O_CLOEXEC
+#define O_CLOEXEC                        02000000
+#endif
+#ifndef O_TMPFILE
+#define O_TMPFILE                        (020000000 | O_DIRECTORY)
+#endif
+
+// Filesystem types, see statfs(2)
+#ifndef TMPFS_MAGIC
+#define TMPFS_MAGIC                      0x01021994
+#endif
+#ifndef HUGETLBFS_MAGIC
+#define HUGETLBFS_MAGIC                  0x958458f6
+#endif
+
+static int z_memfd_create(const char *name, unsigned int flags) {
+  return syscall(__NR_memfd_create, name, flags);
+}
+
+ZBackingFile::ZBackingFile() :
+    _fd(-1),
+    _filesystem(0),
+    _initialized(false) {
+
+  // Create backing file
+  _fd = create_fd(ZFILENAME_HEAP);
+  if (_fd == -1) {
+    return;
+  }
+
+  // Get filesystem type
+  struct statfs statfs_buf;
+  if (fstatfs(_fd, &statfs_buf) == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
+    return;
+  }
+  _filesystem = statfs_buf.f_type;
+
+  // Make sure we're on a supported filesystem
+  if (!is_tmpfs() && !is_hugetlbfs()) {
+    log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  // Make sure the filesystem type matches requested large page type
+  if (ZLargePages::is_transparent() && !is_tmpfs()) {
+    log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
+    log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
+    log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
+    log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  // Successfully initialized
+  _initialized = true;
+}
+
+int ZBackingFile::create_mem_fd(const char* name) const {
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
+
+  // Create file
+  const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
+  const int fd = z_memfd_create(filename, MFD_CLOEXEC | extra_flags);
+  if (fd == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create memfd file (%s)",
+                        ((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
+    return -1;
+  }
+
+  log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
+
+  return fd;
+}
+
+int ZBackingFile::create_file_fd(const char* name) const {
+  const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
+  const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
+
+  // Find mountpoint
+  ZBackingPath path(filesystem, mountpoint);
+  if (path.get() == NULL) {
+    log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
+    return -1;
+  }
+
+  // Try to create an anonymous file using the O_TMPFILE flag. Note that this
+  // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
+  const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd_anon == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
+                        (err == EINVAL ? "Not supported" : err.to_string()));
+  } else {
+    // Get inode number for anonymous file
+    struct stat stat_buf;
+    if (fstat(fd_anon, &stat_buf) == -1) {
+      ZErrno err;
+      log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
+      return -1;
+    }
+
+    log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
+
+    return fd_anon;
+  }
+
+  log_debug(gc, init)("Falling back to open/unlink");
+
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
+
+  // Create file
+  const int fd = open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  // Unlink file
+  if (unlink(filename) == -1) {
+    ZErrno err;
+    log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  log_debug(gc, init)("Heap backed by file %s", filename);
+
+  return fd;
+}
+
+int ZBackingFile::create_fd(const char* name) const {
+  if (ZPath == NULL) {
+    // If the path is not explicitly specified, then we first try to create a memfd file
+    // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
+    // not be supported at all (requires kernel >= 3.17), or it might not support large
+    // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
+    // file on an accessible tmpfs or hugetlbfs mount point.
+    const int fd = create_mem_fd(name);
+    if (fd != -1) {
+      return fd;
+    }
+
+    log_debug(gc, init)("Falling back to searching for an accessible moint point");
+  }
+
+  return create_file_fd(name);
+}
+
+bool ZBackingFile::is_initialized() const {
+  return _initialized;
+}
+
+int ZBackingFile::fd() const {
+  return _fd;
+}
+
+bool ZBackingFile::is_tmpfs() const {
+  return _filesystem == TMPFS_MAGIC;
+}
+
+bool ZBackingFile::is_hugetlbfs() const {
+  return _filesystem == HUGETLBFS_MAGIC;
+}
+
+bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
+  // If the shmem_enabled file exists and is readable then we
+  // know the kernel supports transparent huge pages for tmpfs.
+  return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
+}
+
+bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+  // Try first smaller part.
+  const size_t offset0 = offset;
+  const size_t length0 = align_up(length / 2, alignment);
+  if (!try_expand_tmpfs(offset0, length0, alignment)) {
+    return false;
+  }
+
+  // Try second smaller part.
+  const size_t offset1 = offset0 + length0;
+  const size_t length1 = length - length0;
+  if (!try_expand_tmpfs(offset1, length1, alignment)) {
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+  assert(length > 0, "Invalid length");
+  assert(is_aligned(length, alignment), "Invalid length");
+
+  ZErrno err = posix_fallocate(_fd, offset, length);
+
+  if (err == EINTR && length > alignment) {
+    // Calling posix_fallocate() with a large length can take a long
+    // time to complete. When running profilers, such as VTune, this
+    // syscall will be constantly interrupted by signals. Expanding
+    // the file in smaller steps avoids this problem.
+    return try_split_and_expand_tmpfs(offset, length, alignment);
+  }
+
+  if (err) {
+    log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
+  assert(is_tmpfs(), "Wrong filesystem");
+  return try_expand_tmpfs(offset, length, os::vm_page_size());
+}
+
+bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
+  assert(is_hugetlbfs(), "Wrong filesystem");
+
+  // Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
+  // Instead of posix_fallocate() we can use a well-known workaround,
+  // which involves truncating the file to requested size and then try
+  // to map it to verify that there are enough huge pages available to
+  // back it.
+  while (ftruncate(_fd, offset + length) == -1) {
+    ZErrno err;
+    if (err != EINTR) {
+      log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
+      return false;
+    }
+  }
+
+  // If we fail mapping during initialization, i.e. when we are pre-mapping
+  // the heap, then we wait and retry a few times before giving up. Otherwise
+  // there is a risk that running JVMs back-to-back will fail, since there
+  // is a delay between process termination and the huge pages owned by that
+  // process being returned to the huge page pool and made available for new
+  // allocations.
+  void* addr = MAP_FAILED;
+  const int max_attempts = 3;
+  for (int attempt = 1; attempt <= max_attempts; attempt++) {
+    addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
+    if (addr != MAP_FAILED || is_init_completed()) {
+      // Mapping was successful or initialization phase has completed
+      break;
+    }
+
+    ZErrno err;
+    log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
+                  err.to_string(), attempt, max_attempts);
+
+    // Wait and retry in one second, in the hope that
+    // huge pages will be available by then.
+    sleep(1);
+  }
+
+  if (addr == MAP_FAILED) {
+    // Not enough huge pages left
+    ZErrno err;
+    log_error(gc)("Failed to map backing file (%s)", err.to_string());
+    return false;
+  }
+
+  // Successful mapping, unmap again. From now on the pages we mapped
+  // will be reserved for this file.
+  if (munmap(addr, length) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
+    return false;
+  }
+
+  return true;
+}
+
+bool ZBackingFile::expand(size_t offset, size_t length) const {
+  return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+
+#include "memory/allocation.hpp"
+
+class ZBackingFile {
+private:
+  int      _fd;
+  uint64_t _filesystem;
+  bool     _initialized;
+
+  int create_mem_fd(const char* name) const;
+  int create_file_fd(const char* name) const;
+  int create_fd(const char* name) const;
+
+  bool is_tmpfs() const;
+  bool is_hugetlbfs() const;
+  bool tmpfs_supports_transparent_huge_pages() const;
+
+  bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+  bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+  bool expand_tmpfs(size_t offset, size_t length) const;
+
+  bool expand_hugetlbfs(size_t offset, size_t length) const;
+
+public:
+  ZBackingFile();
+
+  bool is_initialized() const;
+
+  int fd() const;
+  bool expand(size_t offset, size_t length) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "logging/log.hpp"
+
+#include <stdio.h>
+#include <unistd.h>
+
+// Mount information, see proc(5) for more details.
+#define PROC_SELF_MOUNTINFO        "/proc/self/mountinfo"
+
+ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
+  if (ZPath != NULL) {
+    // Use specified path
+    _path = strdup(ZPath);
+  } else {
+    // Find suitable path
+    _path = find_mountpoint(filesystem, preferred_path);
+  }
+}
+
+ZBackingPath::~ZBackingPath() {
+  free(_path);
+  _path = NULL;
+}
+
+char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
+  char* line_mountpoint = NULL;
+  char* line_filesystem = NULL;
+
+  // Parse line and return a newly allocated string containing the mountpoint if
+  // the line contains a matching filesystem and the mountpoint is accessible by
+  // the current user.
+  if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
+      strcmp(line_filesystem, filesystem) != 0 ||
+      access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
+    // Not a matching or accessible filesystem
+    free(line_mountpoint);
+    line_mountpoint = NULL;
+  }
+
+  free(line_filesystem);
+
+  return line_mountpoint;
+}
+
+void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const {
+  FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
+  if (fd == NULL) {
+    ZErrno err;
+    log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
+    return;
+  }
+
+  char* line = NULL;
+  size_t length = 0;
+
+  while (getline(&line, &length, fd) != -1) {
+    char* const mountpoint = get_mountpoint(line, filesystem);
+    if (mountpoint != NULL) {
+      mountpoints->add(mountpoint);
+    }
+  }
+
+  free(line);
+  fclose(fd);
+}
+
+void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
+  ZArrayIterator<char*> iter(mountpoints);
+  for (char* mountpoint; iter.next(&mountpoint);) {
+    free(mountpoint);
+  }
+  mountpoints->clear();
+}
+
+char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
+  char* path = NULL;
+  ZArray<char*> mountpoints;
+
+  get_mountpoints(&mountpoints, filesystem);
+
+  if (mountpoints.size() == 0) {
+    // No filesystem found
+    log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
+  } else if (mountpoints.size() == 1) {
+    // One filesystem found
+    path = strdup(mountpoints.at(0));
+  } else if (mountpoints.size() > 1) {
+    // More than one filesystem found
+    ZArrayIterator<char*> iter(&mountpoints);
+    for (char* mountpoint; iter.next(&mountpoint);) {
+      if (!strcmp(mountpoint, preferred_mountpoint)) {
+        // Preferred mount point found
+        path = strdup(mountpoint);
+        break;
+      }
+    }
+
+    if (path == NULL) {
+      // Preferred mount point not found
+      log_error(gc, init)("More than one %s filesystem found:", filesystem);
+      ZArrayIterator<char*> iter2(&mountpoints);
+      for (char* mountpoint; iter2.next(&mountpoint);) {
+        log_error(gc, init)("  %s", mountpoint);
+      }
+    }
+  }
+
+  free_mountpoints(&mountpoints);
+
+  return path;
+}
+
+const char* ZBackingPath::get() const {
+  return _path;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.hpp"
+
+class ZBackingPath : public StackObj {
+private:
+  char* _path;
+
+  char* get_mountpoint(const char* line, const char* filesystem) const;
+  void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const;
+  void free_mountpoints(ZArray<char*>* mountpoints) const;
+  char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
+
+public:
+  ZBackingPath(const char* filesystem, const char* preferred_path);
+  ~ZBackingPath();
+
+  const char* get() const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
+
+uintptr_t ZAddressReservedStart() {
+  return ZAddressMetadataMarked0;
+}
+
+uintptr_t ZAddressReservedEnd() {
+  return ZAddressMetadataRemapped + ZAddressOffsetMax;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+
+//
+// Page Allocation Tiers
+// ---------------------
+//
+//  Page Type     Page Size     Object Size Limit     Object Alignment
+//  ------------------------------------------------------------------
+//  Small         2M            <= 265K               <MinObjAlignmentInBytes>
+//  Medium        32M           <= 4M                 4K
+//  Large         X*M           > 4M                  2M
+//  ------------------------------------------------------------------
+//
+//
+// Address Space & Pointer Layout
+// ------------------------------
+//
+//  +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
+//  .                                .
+//  .                                .
+//  .                                .
+//  +--------------------------------+ 0x0000140000000000 (20TB)
+//  |         Remapped View          |
+//  +--------------------------------+ 0x0000100000000000 (16TB)
+//  |     (Reserved, but unused)     |
+//  +--------------------------------+ 0x00000c0000000000 (12TB)
+//  |         Marked1 View           |
+//  +--------------------------------+ 0x0000080000000000 (8TB)
+//  |         Marked0 View           |
+//  +--------------------------------+ 0x0000040000000000 (4TB)
+//  .                                .
+//  +--------------------------------+ 0x0000000000000000
+//
+//
+//   6                 4 4 4  4 4                                             0
+//   3                 7 6 5  2 1                                             0
+//  +-------------------+-+----+-----------------------------------------------+
+//  |00000000 00000000 0|0|1111|11 11111111 11111111 11111111 11111111 11111111|
+//  +-------------------+-+----+-----------------------------------------------+
+//  |                   | |    |
+//  |                   | |    * 41-0 Object Offset (42-bits, 4TB address space)
+//  |                   | |
+//  |                   | * 45-42 Metadata Bits (4-bits)  0001 = Marked0      (Address view 4-8TB)
+//  |                   |                                 0010 = Marked1      (Address view 8-12TB)
+//  |                   |                                 0100 = Remapped     (Address view 16-20TB)
+//  |                   |                                 1000 = Finalizable  (Address view N/A)
+//  |                   |
+//  |                   * 46-46 Unused (1-bit, always zero)
+//  |
+//  * 63-47 Fixed (17-bits, always zero)
+//
+
+const size_t    ZPlatformPageSizeSmallShift   = 21; // 2M
+
+const size_t    ZPlatformAddressOffsetBits    = 42; // 4TB
+
+const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
+
+const uintptr_t ZPlatformAddressSpaceStart    = (uintptr_t)1 << ZPlatformAddressOffsetBits;
+const uintptr_t ZPlatformAddressSpaceSize     = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
+
+const size_t    ZPlatformCacheLineSize        = 64;
+
+#endif // OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "runtime/globals.hpp"
+
+void ZLargePages::initialize_platform() {
+  if (UseLargePages) {
+    if (UseTransparentHugePages) {
+      _state = Transparent;
+    } else {
+      _state = Explicit;
+    }
+  } else {
+    _state = Disabled;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zCPU.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#ifndef MPOL_F_NODE
+#define MPOL_F_NODE     (1<<0)  /* return next IL mode instead of node mask */
+#endif
+
+#ifndef MPOL_F_ADDR
+#define MPOL_F_ADDR     (1<<1)  /* look up vma using address */
+#endif
+
+static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
+  return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
+}
+
+void ZNUMA::initialize_platform() {
+  _enabled = UseNUMA;
+}
+
+uint32_t ZNUMA::count() {
+  if (!_enabled) {
+    // NUMA support not enabled
+    return 1;
+  }
+
+  return os::Linux::numa_max_node() + 1;
+}
+
+uint32_t ZNUMA::id() {
+  if (!_enabled) {
+    // NUMA support not enabled
+    return 0;
+  }
+
+  return os::Linux::get_node_by_cpu(ZCPU::id());
+}
+
+uint32_t ZNUMA::memory_id(uintptr_t addr) {
+  if (!_enabled) {
+    // NUMA support not enabled, assume everything belongs to node zero
+    return 0;
+  }
+
+  uint32_t id = (uint32_t)-1;
+
+  if (z_get_mempolicy(&id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
+    ZErrno err;
+    fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
+  }
+
+  assert(id < count(), "Invalid NUMA id");
+
+  return id;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zMemory.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
+#include "logging/log.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+// Support for building on older Linux systems
+#ifndef MADV_HUGEPAGE
+#define MADV_HUGEPAGE                        14
+#endif
+
+// Proc file entry for max map mount
+#define ZFILENAME_PROC_MAX_MAP_COUNT         "/proc/sys/vm/max_map_count"
+
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
+    _manager(),
+    _file(),
+    _granule_size(granule_size) {
+
+  // Check and warn if max map count seems too low
+  check_max_map_count(max_capacity, granule_size);
+}
+
+void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
+  const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
+  FILE* const file = fopen(filename, "r");
+  if (file == NULL) {
+    // Failed to open file, skip check
+    log_debug(gc)("Failed to open %s", filename);
+    return;
+  }
+
+  size_t actual_max_map_count = 0;
+  const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
+  fclose(file);
+  if (result != 1) {
+    // Failed to read file, skip check
+    log_debug(gc)("Failed to read %s", filename);
+    return;
+  }
+
+  // The required max map count is impossible to calculate exactly since subsystems
+  // other than ZGC are also creating memory mappings, and we have no control over that.
+  // However, ZGC tends to create the most mappings and dominate the total count.
+  // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
+  // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
+  const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
+  if (actual_max_map_count < required_max_map_count) {
+    log_warning(gc)("The system limit on number of memory mappings "
+                    "per process might be too low for the given");
+    log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
+                    "adjust %s to allow for at least", max_capacity / M, filename);
+    log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
+                    "Continuing execution with the current limit could",
+                    required_max_map_count, actual_max_map_count);
+    log_warning(gc)("lead to a fatal error down the line, due to failed "
+                    "attempts to map memory.");
+  }
+}
+
+bool ZPhysicalMemoryBacking::is_initialized() const {
+  return _file.is_initialized();
+}
+
+bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
+  const size_t size = to - from;
+
+  // Expand
+  if (!_file.expand(from, size)) {
+    return false;
+  }
+
+  // Add expanded space to free list
+  _manager.free(from, size);
+
+  return true;
+}
+
+ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
+  assert(is_aligned(size, _granule_size), "Invalid size");
+
+  ZPhysicalMemory pmem;
+
+  // Allocate segments
+  for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
+    const uintptr_t start = _manager.alloc_from_front(_granule_size);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+    pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
+  }
+
+  return pmem;
+}
+
+void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
+  const size_t nsegments = pmem.nsegments();
+
+  // Free segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment segment = pmem.segment(i);
+    _manager.free(segment.start(), segment.size());
+  }
+}
+
+void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
+  if (err == ENOMEM) {
+    fatal("Failed to map memory. Please check the system limit on number of "
+          "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
+  } else {
+    fatal("Failed to map memory (%s)", err.to_string());
+  }
+}
+
+void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
+  if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
+  }
+}
+
+void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
+  const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
+  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
+  const size_t nsegments = pmem.nsegments();
+
+  // Map segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment segment = pmem.segment(i);
+    const size_t size = segment.size();
+    const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
+    if (res == MAP_FAILED) {
+      ZErrno err;
+      map_failed(err);
+    }
+
+    // Advise on use of transparent huge pages before touching it
+    if (ZLargePages::is_transparent()) {
+      advise_view(addr, size);
+    }
+
+    // NUMA interleave memory before touching it
+    ZNUMA::memory_interleave(addr, size);
+
+    if (pretouch) {
+      pretouch_view(addr, size);
+    }
+
+    addr += size;
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
+  // Note that we must keep the address space reservation intact and just detach
+  // the backing memory. For this reason we map a new anonymous, non-accessible
+  // and non-reserved page over the mapping instead of actually unmapping.
+  const size_t size = pmem.size();
+  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    map_failed(err);
+  }
+}
+
+uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
+  // From an NMT point of view we treat the first heap mapping (marked0) as committed
+  return ZAddress::marked0(offset);
+}
+
+void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
+  if (ZUnmapBadViews) {
+    // Only map the good view, for debugging only
+    map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+  } else {
+    // Map all views
+    map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
+  if (ZUnmapBadViews) {
+    // Only map the good view, for debugging only
+    unmap_view(pmem, ZAddress::good(offset));
+  } else {
+    // Unmap all views
+    unmap_view(pmem, ZAddress::marked0(offset));
+    unmap_view(pmem, ZAddress::marked1(offset));
+    unmap_view(pmem, ZAddress::remapped(offset));
+  }
+}
+
+void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
+  assert(ZUnmapBadViews, "Should be enabled");
+  const uintptr_t addr_good = ZAddress::good(offset);
+  const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
+  // Map/Unmap views
+  map_view(pmem, addr_good, false /* pretouch */);
+  unmap_view(pmem, addr_bad);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zMemory.hpp"
+
+class ZErrno;
+class ZPhysicalMemory;
+
+class ZPhysicalMemoryBacking {
+private:
+  ZMemoryManager _manager;
+  ZBackingFile   _file;
+  const size_t   _granule_size;
+
+  void check_max_map_count(size_t max_capacity, size_t granule_size) const;
+  void map_failed(ZErrno err) const;
+
+  void advise_view(uintptr_t addr, size_t size) const;
+  void pretouch_view(uintptr_t addr, size_t size) const;
+  void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
+  void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
+
+public:
+  ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size);
+
+  bool is_initialized() const;
+
+  bool expand(size_t from, size_t to);
+  ZPhysicalMemory alloc(size_t size);
+  void free(ZPhysicalMemory pmem);
+
+  uintptr_t nmt_address(uintptr_t offset) const;
+
+  void map(ZPhysicalMemory pmem, uintptr_t offset) const;
+  void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
+  void flip(ZPhysicalMemory pmem, uintptr_t offset) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "logging/log.hpp"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+bool ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) {
+  // Reserve address space
+  const uintptr_t actual_start = (uintptr_t)mmap((void*)start, size, PROT_NONE,
+                                                 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  if (actual_start != start) {
+    log_error(gc)("Failed to reserve address space for Java heap");
+    return false;
+  }
+
+  return true;
+}
--- a/src/hotspot/share/adlc/formssel.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/adlc/formssel.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -2282,6 +2282,9 @@
   if (strcmp(name, "RegD") == 0) size = 2;
   if (strcmp(name, "RegL") == 0) size = 2;
   if (strcmp(name, "RegN") == 0) size = 1;
+  if (strcmp(name, "VecX") == 0) size = 4;
+  if (strcmp(name, "VecY") == 0) size = 8;
+  if (strcmp(name, "VecZ") == 0) size = 16;
   if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
   if (size == 0) {
     return false;
@@ -3509,6 +3512,7 @@
     "ClearArray",
     "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
     "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
+    "LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
   };
   int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
   if( strcmp(_opType,"PrefetchAllocation")==0 )
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -756,6 +756,9 @@
 #endif // COMPILER1
 #ifdef COMPILER2
   case vmIntrinsics::_clone:
+#if INCLUDE_ZGC
+    if (UseZGC) return true;
+#endif
   case vmIntrinsics::_copyOf:
   case vmIntrinsics::_copyOfRange:
     // These intrinsics use both the objectcopy and the arraycopy
--- a/src/hotspot/share/compiler/compilerDirectives.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -66,7 +66,8 @@
     cflags(VectorizeDebug,          uintx, 0, VectorizeDebug) \
     cflags(CloneMapDebug,           bool, false, CloneMapDebug) \
     cflags(IGVPrintLevel,           intx, PrintIdealGraphLevel, IGVPrintLevel) \
-    cflags(MaxNodeLimit,            intx, MaxNodeLimit, MaxNodeLimit)
+    cflags(MaxNodeLimit,            intx, MaxNodeLimit, MaxNodeLimit) \
+ZGC_ONLY(cflags(ZOptimizeLoadBarriers, bool, ZOptimizeLoadBarriers, ZOptimizeLoadBarriers))
 #else
   #define compilerdirectives_c2_flags(cflags)
 #endif
--- a/src/hotspot/share/compiler/oopMap.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/compiler/oopMap.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -380,8 +380,12 @@
           continue;
         }
 #ifdef ASSERT
-        if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
-            !Universe::heap()->is_in_or_null(*loc)) {
+        // We can not verify the oop here if we are using ZGC, the oop
+        // will be bad in case we had a safepoint between a load and a
+        // load barrier.
+        if (!UseZGC &&
+            ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
+             !Universe::heap()->is_in_or_null(*loc))) {
           tty->print_cr("# Found non oop pointer.  Dumping state at failure");
           // try to dump out some helpful debugging information
           trace_codeblob_maps(fr, reg_map);
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -31,7 +31,8 @@
 #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f)          \
   f(CardTableBarrierSet)                             \
   EPSILONGC_ONLY(f(EpsilonBarrierSet))               \
-  G1GC_ONLY(f(G1BarrierSet))
+  G1GC_ONLY(f(G1BarrierSet))                         \
+  ZGC_ONLY(f(ZBarrierSet))
 
 #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f)          \
   f(ModRef)
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -31,10 +31,13 @@
 #include "gc/shared/cardTableBarrierSet.inline.hpp"
 
 #if INCLUDE_EPSILONGC
-#include "gc/epsilon/epsilonBarrierSet.hpp" // Epsilon support
+#include "gc/epsilon/epsilonBarrierSet.hpp"
 #endif
 #if INCLUDE_G1GC
-#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
+#include "gc/g1/g1BarrierSet.inline.hpp"
+#endif
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSet.inline.hpp"
 #endif
 
 #endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -89,6 +89,7 @@
 //     CMSHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
+//   ZCollectedHeap
 //
 class CollectedHeap : public CHeapObj<mtInternal> {
   friend class VMStructs;
@@ -207,7 +208,8 @@
     Parallel,
     CMS,
     G1,
-    Epsilon
+    Epsilon,
+    Z
   };
 
   static inline size_t filler_array_max_size() {
--- a/src/hotspot/share/gc/shared/gcCause.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcCause.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -105,6 +105,21 @@
     case _dcmd_gc_run:
       return "Diagnostic Command";
 
+    case _z_timer:
+      return "Timer";
+
+    case _z_warmup:
+      return "Warmup";
+
+    case _z_allocation_rate:
+      return "Allocation Rate";
+
+    case _z_allocation_stall:
+      return "Allocation Stall";
+
+    case _z_proactive:
+      return "Proactive";
+
     case _last_gc_cause:
       return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";
 
--- a/src/hotspot/share/gc/shared/gcCause.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcCause.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -78,6 +78,12 @@
 
     _dcmd_gc_run,
 
+    _z_timer,
+    _z_warmup,
+    _z_allocation_rate,
+    _z_allocation_stall,
+    _z_proactive,
+
     _last_gc_cause
   };
 
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -43,6 +43,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serialArguments.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/zArguments.hpp"
+#endif
 
 struct SupportedGC {
   bool&               _flag;
@@ -59,6 +62,7 @@
       G1GC_ONLY(static G1Arguments       g1Arguments;)
 PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
   SERIALGC_ONLY(static SerialArguments   serialArguments;)
+       ZGC_ONLY(static ZArguments        zArguments;)
 
 // Table of supported GCs, for translating between command
 // line flag, CollectedHeap::Name and GCArguments instance.
@@ -69,6 +73,7 @@
   PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel, parallelArguments, "parallel gc"))
   PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC,   CollectedHeap::Parallel, parallelArguments, "parallel gc"))
     SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC,        CollectedHeap::Serial,   serialArguments,   "serial gc"))
+         ZGC_ONLY_ARG(SupportedGC(UseZGC,             CollectedHeap::Z,        zArguments,        "z gc"))
 };
 
 #define FOR_EACH_SUPPORTED_GC(var) \
@@ -98,6 +103,7 @@
   NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
   NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
   NOT_SERIALGC(  UNSUPPORTED_OPTION(UseSerialGC);)
+  NOT_ZGC(       UNSUPPORTED_OPTION(UseZGC);)
 }
 
 bool GCConfig::is_no_gc_selected() {
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -43,6 +43,10 @@
     return ParNew;
   }
 
+  if (UseZGC) {
+    return NA;
+  }
+
   return DefNew;
 }
 
@@ -59,6 +63,10 @@
     return ParallelOld;
   }
 
+  if (UseZGC) {
+    return Z;
+  }
+
   return SerialOld;
 }
 
--- a/src/hotspot/share/gc/shared/gcName.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -38,6 +38,8 @@
   ConcurrentMarkSweep,
   G1Old,
   G1Full,
+  Z,
+  NA,
   GCNameEndSentinel
 };
 
@@ -55,6 +57,8 @@
       case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
       case G1Old: return "G1Old";
       case G1Full: return "G1Full";
+      case Z: return "Z";
+      case NA: return "N/A";
       default: ShouldNotReachHere(); return NULL;
     }
   }
--- a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -40,6 +40,6 @@
 // should consider placing frequently accessed fields first in
 // T, so that field offsets relative to Thread are small, which
 // often allows for a more compact instruction encoding.
-typedef uint64_t GCThreadLocalData[14]; // 112 bytes
+typedef uint64_t GCThreadLocalData[18]; // 144 bytes
 
 #endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -41,6 +41,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serial_globals.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/z_globals.hpp"
+#endif
 
 #define GC_FLAGS(develop,                                                   \
                  develop_pd,                                                \
@@ -137,6 +140,22 @@
     constraint,                                                             \
     writeable))                                                             \
                                                                             \
+  ZGC_ONLY(GC_Z_FLAGS(                                                      \
+    develop,                                                                \
+    develop_pd,                                                             \
+    product,                                                                \
+    product_pd,                                                             \
+    diagnostic,                                                             \
+    diagnostic_pd,                                                          \
+    experimental,                                                           \
+    notproduct,                                                             \
+    manageable,                                                             \
+    product_rw,                                                             \
+    lp64_product,                                                           \
+    range,                                                                  \
+    constraint,                                                             \
+    writeable))                                                             \
+                                                                            \
   /* gc */                                                                  \
                                                                             \
   product(bool, UseConcMarkSweepGC, false,                                  \
@@ -157,6 +176,9 @@
   experimental(bool, UseEpsilonGC, false,                                   \
           "Use the Epsilon (no-op) garbage collector")                      \
                                                                             \
+  experimental(bool, UseZGC, false,                                         \
+          "Use the Z garbage collector")                                    \
+                                                                            \
   product(uint, ParallelGCThreads, 0,                                       \
           "Number of parallel threads parallel gc will use")                \
           constraint(ParallelGCThreadsConstraintFunc,AfterErgo)             \
--- a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -35,6 +35,9 @@
 #if INCLUDE_SERIALGC
 #include "gc/serial/serial_specialized_oop_closures.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/z_specialized_oop_closures.hpp"
+#endif
 
 // The following OopClosure types get specialized versions of
 // "oop_oop_iterate" that invoke the closures' do_oop methods
@@ -67,7 +70,8 @@
   SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f))       \
      CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f))      \
       G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f))       \
-      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
+      G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))   \
+       ZGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(f))
 
 // We separate these out, because sometime the general one has
 // a different definition from the specialized ones, and sometimes it
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -50,6 +50,9 @@
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/vmStructs_serial.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/vmStructs_z.hpp"
+#endif
 
 #define VM_STRUCTS_GC(nonstatic_field,                                                                                               \
                       volatile_nonstatic_field,                                                                                      \
@@ -70,6 +73,10 @@
   SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field,                                                                                 \
                                     volatile_nonstatic_field,                                                                        \
                                     static_field))                                                                                   \
+  ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field,                                                                                           \
+                          volatile_nonstatic_field,                                                                                  \
+                          static_field))                                                                                             \
+                                                                                                                                     \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
@@ -171,6 +178,10 @@
   SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type,                           \
                                   declare_toplevel_type,                  \
                                   declare_integer_type))                  \
+  ZGC_ONLY(VM_TYPES_ZGC(declare_type,                                     \
+                        declare_toplevel_type,                            \
+                        declare_integer_type))                            \
+                                                                          \
   /******************************************/                            \
   /* Generation and space hierarchies       */                            \
   /* (needed for run-time type information) */                            \
@@ -242,6 +253,8 @@
                                               declare_constant_with_value)) \
   SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant,                 \
                                           declare_constant_with_value))     \
+  ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant,                           \
+                                declare_constant_with_value))               \
                                                                             \
   /********************************************/                            \
   /* Generation and Space Hierarchy Constants */                            \
@@ -285,5 +298,7 @@
   declare_constant(Generation::LogOfGenGrain)                               \
   declare_constant(Generation::GenGrain)                                    \
 
+#define VM_LONG_CONSTANTS_GC(declare_constant)                              \
+  ZGC_ONLY(VM_LONG_CONSTANTS_ZGC(declare_constant))
 
 #endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_LIR.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "c1/c1_CodeStubs.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "utilities/macros.hpp"
+
+ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
+    _decorators(access.decorators()),
+    _ref_addr(access.resolved_addr()),
+    _ref(ref),
+    _tmp(LIR_OprFact::illegalOpr),
+    _patch_info(access.patch_emit_info()),
+    _runtime_stub(runtime_stub) {
+
+  // Allocate tmp register if needed
+  if (!_ref_addr->is_register()) {
+    assert(_ref_addr->is_address(), "Must be an address");
+    if (_ref_addr->as_address_ptr()->index()->is_valid() ||
+        _ref_addr->as_address_ptr()->disp() != 0) {
+      // Has index or displacement, need tmp register to load address into
+      _tmp = access.gen()->new_pointer_register();
+    } else {
+      // No index or displacement, address available in base register
+      _ref_addr = _ref_addr->as_address_ptr()->base();
+    }
+  }
+
+  assert(_ref->is_register(), "Must be a register");
+  assert(_ref_addr->is_register() != _tmp->is_register(), "Only one should be a register");
+}
+
+DecoratorSet ZLoadBarrierStubC1::decorators() const {
+  return _decorators;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref() const {
+  return _ref;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref_addr() const {
+  return _ref_addr;
+}
+
+LIR_Opr ZLoadBarrierStubC1::tmp() const {
+  return _tmp;
+}
+
+LIR_PatchCode ZLoadBarrierStubC1::patch_code() const {
+  return (_decorators & C1_NEEDS_PATCHING) != 0 ? lir_patch_normal : lir_patch_none;
+}
+
+CodeEmitInfo*& ZLoadBarrierStubC1::patch_info() {
+  return _patch_info;
+}
+
+address ZLoadBarrierStubC1::runtime_stub() const {
+  return _runtime_stub;
+}
+
+void ZLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
+  if (_patch_info != NULL) {
+    visitor->do_slow_case(_patch_info);
+  } else {
+    visitor->do_slow_case();
+  }
+
+  visitor->do_input(_ref_addr);
+  visitor->do_output(_ref);
+
+  if (_tmp->is_valid()) {
+    visitor->do_temp(_tmp);
+  }
+}
+
+void ZLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
+  ZBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
+}
+
+#ifndef PRODUCT
+void ZLoadBarrierStubC1::print_name(outputStream* out) const {
+  out->print("ZLoadBarrierStubC1");
+}
+#endif // PRODUCT
+
+class LIR_OpZLoadBarrierTest : public LIR_Op {
+private:
+  LIR_Opr _opr;
+
+public:
+  LIR_OpZLoadBarrierTest(LIR_Opr opr) :
+      LIR_Op(),
+      _opr(opr) {}
+
+  virtual void visit(LIR_OpVisitState* state) {
+    state->do_input(_opr);
+  }
+
+  virtual void emit_code(LIR_Assembler* ce) {
+    ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
+  }
+
+  virtual void print_instr(outputStream* out) const {
+    _opr->print(out);
+    out->print(" ");
+  }
+
+#ifndef PRODUCT
+  virtual const char* name() const {
+    return "lir_z_load_barrier_test";
+  }
+#endif // PRODUCT
+};
+
+static bool barrier_needed(LIRAccess& access) {
+  return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+ZBarrierSetC1::ZBarrierSetC1() :
+    _load_barrier_on_oop_field_preloaded_runtime_stub(NULL),
+    _load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {}
+
+address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
+  assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
+  //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
+
+  if ((decorators & ON_WEAK_OOP_REF) != 0) {
+    return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+  } else {
+    return _load_barrier_on_oop_field_preloaded_runtime_stub;
+  }
+}
+
+#ifdef ASSERT
+#define __ access.gen()->lir(__FILE__, __LINE__)->
+#else
+#define __ access.gen()->lir()->
+#endif
+
+void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
+  // Fast path
+  __ append(new LIR_OpZLoadBarrierTest(result));
+
+  // Slow path
+  const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
+  CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub);
+  __ branch(lir_cond_notEqual, T_ADDRESS, stub);
+  __ branch_destination(stub->continuation());
+}
+
+#undef __
+
+void ZBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+  BarrierSetC1::load_at_resolved(access, result);
+
+  if (barrier_needed(access)) {
+    load_barrier(access, result);
+  }
+}
+
+static void pre_load_barrier(LIRAccess& access) {
+  DecoratorSet decorators = access.decorators();
+
+  // Downgrade access to MO_UNORDERED
+  decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
+
+  // Remove C1_WRITE_ACCESS
+  decorators = (decorators & ~C1_WRITE_ACCESS);
+
+  // Generate synthetic load at
+  access.gen()->access_load_at(decorators,
+                               access.type(),
+                               access.base().item(),
+                               access.offset().opr(),
+                               access.gen()->new_register(access.type()),
+                               NULL /* patch_emit_info */,
+                               NULL /* load_emit_info */);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+  if (barrier_needed(access)) {
+    pre_load_barrier(access);
+  }
+
+  return BarrierSetC1::atomic_xchg_at_resolved(access, value);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+  if (barrier_needed(access)) {
+    pre_load_barrier(access);
+  }
+
+  return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+}
+
+class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
+private:
+  const DecoratorSet _decorators;
+
+public:
+  ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
+      _decorators(decorators) {}
+
+  virtual OopMapSet* generate_code(StubAssembler* sasm) {
+    ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
+    return NULL;
+  }
+};
+
+static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
+  ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
+  CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
+  return code_blob->code_begin();
+}
+
+void ZBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
+  _load_barrier_on_oop_field_preloaded_runtime_stub =
+    generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
+  _load_barrier_on_weak_oop_field_preloaded_runtime_stub =
+    generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+#define SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+
+#include "c1/c1_CodeStubs.hpp"
+#include "c1/c1_IR.hpp"
+#include "c1/c1_LIR.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
+#include "oops/accessDecorators.hpp"
+
+class ZLoadBarrierStubC1 : public CodeStub {
+private:
+  DecoratorSet  _decorators;
+  LIR_Opr       _ref_addr;
+  LIR_Opr       _ref;
+  LIR_Opr       _tmp;
+  CodeEmitInfo* _patch_info;
+  address       _runtime_stub;
+
+public:
+  ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
+
+  DecoratorSet decorators() const;
+  LIR_Opr ref() const;
+  LIR_Opr ref_addr() const;
+  LIR_Opr tmp() const;
+  LIR_PatchCode patch_code() const;
+  CodeEmitInfo*& patch_info();
+  address runtime_stub() const;
+
+  virtual void emit_code(LIR_Assembler* ce);
+  virtual void visit(LIR_OpVisitState* visitor);
+
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const;
+#endif // PRODUCT
+};
+
+class ZBarrierSetC1 : public BarrierSetC1 {
+private:
+  address _load_barrier_on_oop_field_preloaded_runtime_stub;
+  address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+
+  address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
+  void load_barrier(LIRAccess& access, LIR_Opr result) const;
+
+protected:
+  virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
+  virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
+  virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+public:
+  ZBarrierSetC1();
+
+  virtual void generate_c1_runtime_stubs(BufferBlob* blob);
+};
+
+#endif // SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,1480 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "opto/compile.hpp"
+#include "opto/castnode.hpp"
+#include "opto/graphKit.hpp"
+#include "opto/idealKit.hpp"
+#include "opto/loopnode.hpp"
+#include "opto/macro.hpp"
+#include "opto/node.hpp"
+#include "opto/type.hpp"
+#include "utilities/macros.hpp"
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+
+ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
+  : _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8,  0, NULL)) {}
+
+int ZBarrierSetC2State::load_barrier_count() const {
+  return _load_barrier_nodes->length();
+}
+
+void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
+  assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
+  _load_barrier_nodes->append(n);
+}
+
+void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
+  // this function may be called twice for a node so check
+  // that the node is in the array before attempting to remove it
+  if (_load_barrier_nodes->contains(n)) {
+    _load_barrier_nodes->remove(n);
+  }
+}
+
+LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
+  return _load_barrier_nodes->at(idx);
+}
+
+void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
+  return new(comp_arena) ZBarrierSetC2State(comp_arena);
+}
+
+ZBarrierSetC2State* ZBarrierSetC2::state() const {
+  return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
+}
+
+bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
+  return node->is_LoadBarrier();
+}
+
+void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
+  if (node->is_LoadBarrier()) {
+    state()->add_load_barrier_node(node->as_LoadBarrier());
+  }
+}
+
+void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
+  if (node->is_LoadBarrier()) {
+    state()->remove_load_barrier_node(node->as_LoadBarrier());
+  }
+}
+
+void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
+  // Remove useless LoadBarrier nodes
+  ZBarrierSetC2State* s = state();
+  for (int i = s->load_barrier_count()-1; i >= 0; i--) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+    if (!useful.member(n)) {
+      unregister_potential_barrier_node(n);
+    }
+  }
+}
+
+void ZBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
+  if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
+    worklist.push(node);
+  }
+}
+
+void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
+  // Look for dominating barriers on the same address only once all
+  // other loop opts are over: loop opts may cause a safepoint to be
+  // inserted between a barrier and its dominating barrier.
+  Compile* C = Compile::current();
+  ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
+  ZBarrierSetC2State* s = bs->state();
+  if (s->load_barrier_count() >= 2) {
+    Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
+    PhaseIdealLoop ideal_loop(igvn, true, false, true);
+    if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
+  }
+}
+
+void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
+  // Permanent temporary workaround
+  // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
+  // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
+  // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
+  ZBarrierSetC2State* s = state();
+
+  for (int i = 0; i < s->load_barrier_count(); i++) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+    worklist->push(n);
+  }
+}
+
+const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
+  const Type** fields;
+
+  // Create input types (domain)
+  fields = TypeTuple::fields(2);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
+  fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+
+  // Create result type (range)
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
+// == LoadBarrierNode ==
+
+LoadBarrierNode::LoadBarrierNode(Compile* C,
+                                 Node* c,
+                                 Node* mem,
+                                 Node* val,
+                                 Node* adr,
+                                 bool weak,
+                                 bool writeback,
+                                 bool oop_reload_allowed) :
+    MultiNode(Number_of_Inputs),
+    _weak(weak),
+    _writeback(writeback),
+    _oop_reload_allowed(oop_reload_allowed) {
+  init_req(Control, c);
+  init_req(Memory, mem);
+  init_req(Oop, val);
+  init_req(Address, adr);
+  init_req(Similar, C->top());
+
+  init_class_id(Class_LoadBarrier);
+  BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+  bs->register_potential_barrier_node(this);
+}
+
+const Type *LoadBarrierNode::bottom_type() const {
+  const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+  Node* in_oop = in(Oop);
+  floadbarrier[Control] = Type::CONTROL;
+  floadbarrier[Memory] = Type::MEMORY;
+  floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
+  return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
+  const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+  const Type* val_t = phase->type(in(Oop));
+  floadbarrier[Control] = Type::CONTROL;
+  floadbarrier[Memory] = Type::MEMORY;
+  floadbarrier[Oop] = val_t;
+  return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
+  if (phase != NULL) {
+    return phase->is_dominator(d, n);
+  }
+
+  for (int i = 0; i < 10 && n != NULL; i++) {
+    n = IfNode::up_one_dom(n, linear_only);
+    if (n == d) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
+  Node* val = in(LoadBarrierNode::Oop);
+  if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
+    LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
+    assert(lb->in(Address) == in(Address), "");
+    // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
+    if (lb->in(Oop) == in(Oop)) {
+      return lb;
+    }
+    // Follow chain of load barrier through Similar edges
+    while (!lb->in(Similar)->is_top()) {
+      lb = lb->in(Similar)->in(0)->as_LoadBarrier();
+      assert(lb->in(Address) == in(Address), "");
+    }
+    if (lb != in(Similar)->in(0)) {
+      return lb;
+    }
+  }
+  for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+    Node* u = val->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+      if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+        return u->as_LoadBarrier();
+      }
+    }
+  }
+
+  if (ZVerifyLoadBarriers || can_be_eliminated()) {
+    return NULL;
+  }
+
+  if (!look_for_similar) {
+    return NULL;
+  }
+
+  Node* addr = in(LoadBarrierNode::Address);
+  for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+    Node* u = addr->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+      if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+        ResourceMark rm;
+        Unique_Node_List wq;
+        wq.push(in(LoadBarrierNode::Control));
+        bool ok = true;
+        bool dom_found = false;
+        for (uint next = 0; next < wq.size(); ++next) {
+          Node *n = wq.at(next);
+          if (n->is_top()) {
+            return NULL;
+          }
+          assert(n->is_CFG(), "");
+          if (n->is_SafePoint()) {
+            ok = false;
+            break;
+          }
+          if (n == u) {
+            dom_found = true;
+            continue;
+          }
+          if (n->is_Region()) {
+            for (uint i = 1; i < n->req(); i++) {
+              Node* m = n->in(i);
+              if (m != NULL) {
+                wq.push(m);
+              }
+            }
+          } else {
+            Node* m = n->in(0);
+            if (m != NULL) {
+              wq.push(m);
+            }
+          }
+        }
+        if (ok) {
+          assert(dom_found, "");
+          return u->as_LoadBarrier();;
+        }
+        break;
+      }
+    }
+  }
+
+  return NULL;
+}
+
+void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
+  // change to that barrier may affect a dominated barrier so re-push those
+  Node* val = in(LoadBarrierNode::Oop);
+
+  for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+    Node* u = val->fast_out(i);
+    if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
+      Node* this_ctrl = in(Control);
+      Node* other_ctrl = u->in(Control);
+      if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+        igvn->_worklist.push(u);
+      }
+    }
+
+    Node* addr = in(LoadBarrierNode::Address);
+    for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+      Node* u = addr->fast_out(i);
+      if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
+        Node* this_ctrl = in(Control);
+        Node* other_ctrl = u->in(Control);
+        if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+          igvn->_worklist.push(u);
+        }
+      }
+    }
+  }
+}
+
+Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
+  if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
+    return this;
+  }
+
+  bool redundant_addr = false;
+  LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
+  if (dominating_barrier != NULL) {
+    assert(dominating_barrier->in(Oop) == in(Oop), "");
+    return dominating_barrier;
+  }
+
+  return this;
+}
+
+Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if (remove_dead_region(phase, can_reshape)) {
+    return this;
+  }
+
+  Node* val = in(Oop);
+  Node* mem = in(Memory);
+  Node* ctrl = in(Control);
+  Node* adr = in(Address);
+  assert(val->Opcode() != Op_LoadN, "");
+
+  if (mem->is_MergeMem()) {
+    Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+    set_req(Memory, new_mem);
+    if (mem->outcnt() == 0 && can_reshape) {
+      phase->is_IterGVN()->_worklist.push(mem);
+    }
+
+    return this;
+  }
+
+  bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
+  LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
+  if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
+    assert(in(Address) == dominating_barrier->in(Address), "");
+    set_req(Similar, dominating_barrier->proj_out(Oop));
+    return this;
+  }
+
+  bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
+                   (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
+
+  if (eliminate) {
+    if (can_reshape) {
+      PhaseIterGVN* igvn = phase->is_IterGVN();
+      Node* out_ctrl = proj_out_or_null(Control);
+      Node* out_res = proj_out_or_null(Oop);
+
+      if (out_ctrl != NULL) {
+        igvn->replace_node(out_ctrl, ctrl);
+      }
+
+      // That transformation may cause the Similar edge on the load barrier to be invalid
+      fix_similar_in_uses(igvn);
+      if (out_res != NULL) {
+        if (dominating_barrier != NULL) {
+          igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
+        } else {
+          igvn->replace_node(out_res, val);
+        }
+      }
+    }
+
+    return new ConINode(TypeInt::ZERO);
+  }
+
+  // If the Similar edge is no longer a load barrier, clear it
+  Node* similar = in(Similar);
+  if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
+    set_req(Similar, phase->C->top());
+    return this;
+  }
+
+  if (can_reshape) {
+    // If this barrier is linked through the Similar edge by a
+    // dominated barrier and both barriers have the same Oop field,
+    // the dominated barrier can go away, so push it for reprocessing.
+    // We also want to avoid a barrier to depend on another dominating
+    // barrier through its Similar edge that itself depend on another
+    // barrier through its Similar edge and rather have the first
+    // depend on the third.
+    PhaseIterGVN* igvn = phase->is_IterGVN();
+    Node* out_res = proj_out(Oop);
+    for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+      Node* u = out_res->fast_out(i);
+      if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
+          (u->in(Oop) == val || !u->in(Similar)->is_top())) {
+        igvn->_worklist.push(u);
+      }
+    }
+
+    push_dominated_barriers(igvn);
+  }
+
+  return NULL;
+}
+
+void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
+  Node* out_res = proj_out_or_null(Oop);
+  if (out_res == NULL) {
+    return;
+  }
+
+  for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+    Node* u = out_res->fast_out(i);
+    if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
+      igvn->replace_input_of(u, Similar, igvn->C->top());
+      --i;
+      --imax;
+    }
+  }
+}
+
+bool LoadBarrierNode::has_true_uses() const {
+  Node* out_res = proj_out_or_null(Oop);
+  if (out_res == NULL) {
+    return false;
+  }
+
+  for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+    Node* u = out_res->fast_out(i);
+    if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// == Accesses ==
+
+Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
+  assert(!UseCompressedOops, "Not allowed");
+  CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
+  PhaseGVN& gvn = access.kit()->gvn();
+  Compile* C = Compile::current();
+  GraphKit* kit = access.kit();
+
+  Node* in_ctrl     = cas->in(MemNode::Control);
+  Node* in_mem      = cas->in(MemNode::Memory);
+  Node* in_adr      = cas->in(MemNode::Address);
+  Node* in_val      = cas->in(MemNode::ValueIn);
+  Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
+
+  float likely                   = PROB_LIKELY(0.999);
+
+  const TypePtr *adr_type        = gvn.type(in_adr)->isa_ptr();
+  Compile::AliasType* alias_type = C->alias_type(adr_type);
+  int alias_idx                  = C->get_alias_index(adr_type);
+
+  // Outer check - true: continue, false: load and check
+  Node* region   = new RegionNode(3);
+  Node* phi      = new PhiNode(region, TypeInt::BOOL);
+  Node* phi_mem  = new PhiNode(region, Type::MEMORY, adr_type);
+
+  // Inner check - is the healed ref equal to the expected
+  Node* region2  = new RegionNode(3);
+  Node* phi2     = new PhiNode(region2, TypeInt::BOOL);
+  Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
+
+  // CAS node returns 0 or 1
+  Node* cmp     = gvn.transform(new CmpINode(cas, kit->intcon(0)));
+  Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+  Node* then    = gvn.transform(new IfTrueNode(iff));
+  Node* elsen   = gvn.transform(new IfFalseNode(iff));
+
+  Node* scmemproj1   = gvn.transform(new SCMemProjNode(cas));
+
+  kit->set_memory(scmemproj1, alias_idx);
+  phi_mem->init_req(1, scmemproj1);
+  phi_mem2->init_req(2, scmemproj1);
+
+  // CAS fail - reload and heal oop
+  Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+  Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+  Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+  Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+  // Check load
+  Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
+  Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+  Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
+  Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+  IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+  Node* then2   = gvn.transform(new IfTrueNode(iff2));
+  Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
+
+  // redo CAS
+  Node* cas2       = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
+  Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
+  kit->set_control(elsen2);
+  kit->set_memory(scmemproj2, alias_idx);
+
+  // Merge inner flow - check if healed oop was equal too expected.
+  region2->set_req(1, kit->control());
+  region2->set_req(2, then2);
+  phi2->set_req(1, cas2);
+  phi2->set_req(2, kit->intcon(0));
+  phi_mem2->init_req(1, scmemproj2);
+  kit->set_memory(phi_mem2, alias_idx);
+
+  // Merge outer flow - then check if first cas succeded
+  region->set_req(1, then);
+  region->set_req(2, region2);
+  phi->set_req(1, kit->intcon(1));
+  phi->set_req(2, phi2);
+  phi_mem->init_req(2, phi_mem2);
+  kit->set_memory(phi_mem, alias_idx);
+
+  gvn.transform(region2);
+  gvn.transform(phi2);
+  gvn.transform(phi_mem2);
+  gvn.transform(region);
+  gvn.transform(phi);
+  gvn.transform(phi_mem);
+
+  kit->set_control(region);
+  kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+  return phi;
+}
+
+Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
+  CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
+  GraphKit* kit = access.kit();
+  PhaseGVN& gvn = kit->gvn();
+  Compile* C = Compile::current();
+
+  Node* in_ctrl     = cmpx->in(MemNode::Control);
+  Node* in_mem      = cmpx->in(MemNode::Memory);
+  Node* in_adr      = cmpx->in(MemNode::Address);
+  Node* in_val      = cmpx->in(MemNode::ValueIn);
+  Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
+
+  float likely                   = PROB_LIKELY(0.999);
+
+  const TypePtr *adr_type        = cmpx->get_ptr_type();
+  Compile::AliasType* alias_type = C->alias_type(adr_type);
+  int alias_idx                  = C->get_alias_index(adr_type);
+
+  // Outer check - true: continue, false: load and check
+  Node* region  = new RegionNode(3);
+  Node* phi     = new PhiNode(region, adr_type);
+
+  // Inner check - is the healed ref equal to the expected
+  Node* region2 = new RegionNode(3);
+  Node* phi2    = new PhiNode(region2, adr_type);
+
+  // Check if cmpx succeded
+  Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
+  Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
+  IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+  Node* then    = gvn.transform(new IfTrueNode(iff));
+  Node* elsen   = gvn.transform(new IfFalseNode(iff));
+
+  Node* scmemproj1  = gvn.transform(new SCMemProjNode(cmpx));
+  kit->set_memory(scmemproj1, alias_idx);
+
+  // CAS fail - reload and heal oop
+  Node* reload      = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+  Node* barrier     = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+  Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+  Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+  // Check load
+  Node* tmpX    = gvn.transform(new CastP2XNode(NULL, barrierdata));
+  Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+  Node* cmp2    = gvn.transform(new CmpXNode(tmpX, in_expX));
+  Node *bol2    = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+  IfNode* iff2  = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+  Node* then2   = gvn.transform(new IfTrueNode(iff2));
+  Node* elsen2  = gvn.transform(new IfFalseNode(iff2));
+
+  // Redo CAS
+  Node* cmpx2      = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
+  Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
+  kit->set_control(elsen2);
+  kit->set_memory(scmemproj2, alias_idx);
+
+  // Merge inner flow - check if healed oop was equal too expected.
+  region2->set_req(1, kit->control());
+  region2->set_req(2, then2);
+  phi2->set_req(1, cmpx2);
+  phi2->set_req(2, barrierdata);
+
+  // Merge outer flow - then check if first cas succeded
+  region->set_req(1, then);
+  region->set_req(2, region2);
+  phi->set_req(1, cmpx);
+  phi->set_req(2, phi2);
+
+  gvn.transform(region2);
+  gvn.transform(phi2);
+  gvn.transform(region);
+  gvn.transform(phi);
+
+  kit->set_control(region);
+  kit->set_memory(in_mem, alias_idx);
+  kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+  return phi;
+}
+
+Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
+  PhaseGVN& gvn = kit->gvn();
+  Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
+  Node* transformed_barrier = gvn.transform(barrier);
+
+  if (transformed_barrier->is_LoadBarrier()) {
+    if (barrier == transformed_barrier) {
+      kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
+    }
+    return gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
+  } else {
+    return val;
+  }
+}
+
+static bool barrier_needed(C2Access access) {
+  return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
+  Node* p = BarrierSetC2::load_at_resolved(access, val_type);
+  if (!barrier_needed(access)) {
+    return p;
+  }
+
+  bool conc_root = (access.decorators() & IN_CONCURRENT_ROOT) != 0;
+  bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
+
+  GraphKit* kit = access.kit();
+  PhaseGVN& gvn = kit->gvn();
+  Node* adr = access.addr().node();
+  Node* heap_base_oop = access.base();
+  bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
+  if (unsafe) {
+    if (!ZVerifyLoadBarriers) {
+      p = load_barrier(kit, p, adr);
+    } else {
+      if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
+        p = load_barrier(kit, p, adr);
+      } else {
+        IdealKit ideal(kit);
+        IdealVariable res(ideal);
+#define __ ideal.
+        __ declarations_done();
+        __ set(res, p);
+        __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
+          kit->sync_kit(ideal);
+          p = load_barrier(kit, p, adr);
+          __ set(res, p);
+          __ sync_kit(kit);
+        } __ end_if();
+        kit->final_sync(ideal);
+        p = __ value(res);
+#undef __
+      }
+    }
+    return p;
+  } else {
+    return load_barrier(access.kit(), p, access.addr().node(), weak, true, true);
+  }
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
+                                                    Node* new_val, const Type* val_type) const {
+  Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  access.set_needs_pinning(false);
+  return make_cmpx_loadbarrier(access);
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
+                                                     Node* new_val, const Type* value_type) const {
+  Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  Node* load_store = access.raw_access();
+  bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
+  bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
+
+  if (!expected_is_null) {
+    if (weak_cas) {
+      access.set_needs_pinning(false);
+      load_store = make_cas_loadbarrier(access);
+    } else {
+      access.set_needs_pinning(false);
+      load_store = make_cas_loadbarrier(access);
+    }
+  }
+
+  return load_store;
+}
+
+Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
+  Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
+  if (!barrier_needed(access)) {
+    return result;
+  }
+
+  Node* load_store = access.raw_access();
+  Node* adr = access.addr().node();
+
+  return load_barrier(access.kit(), load_store, adr, false, false, false);
+}
+
+// == Macro Expansion ==
+
+void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val  = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr  = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
+
+  PhaseIterGVN &igvn = phase->igvn();
+
+  if (ZVerifyLoadBarriers) {
+    igvn.replace_node(out_res, in_val);
+    igvn.replace_node(out_ctrl, in_ctrl);
+    return;
+  }
+
+  if (barrier->can_be_eliminated()) {
+    // Clone and pin the load for this barrier below the dominating
+    // barrier: the load cannot be allowed to float above the
+    // dominating barrier
+    Node* load = in_val;
+
+    if (load->is_Load()) {
+      Node* new_load = load->clone();
+      Node* addp = new_load->in(MemNode::Address);
+      assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
+      Node* cast = new CastPPNode(addp, igvn.type(addp), true);
+      Node* ctrl = NULL;
+      Node* similar = barrier->in(LoadBarrierNode::Similar);
+      if (similar->is_Phi()) {
+        // already expanded
+        ctrl = similar->in(0);
+      } else {
+        assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
+        ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
+      }
+      assert(ctrl != NULL, "bad control");
+      cast->set_req(0, ctrl);
+      igvn.transform(cast);
+      new_load->set_req(MemNode::Address, cast);
+      igvn.transform(new_load);
+
+      igvn.replace_node(out_res, new_load);
+      igvn.replace_node(out_ctrl, in_ctrl);
+      return;
+    }
+    // cannot eliminate
+  }
+
+  // There are two cases that require the basic loadbarrier
+  // 1) When the writeback of a healed oop must be avoided (swap)
+  // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
+  if (!barrier->is_writeback()) {
+    assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
+  }
+
+  if (!barrier->oop_reload_allowed()) {
+    expand_loadbarrier_basic(phase, barrier);
+  } else {
+    expand_loadbarrier_optimized(phase, barrier);
+  }
+}
+
+// Basic loadbarrier using conventional arg passing
+void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+  PhaseIterGVN &igvn = phase->igvn();
+
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem  = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val  = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr  = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res  = barrier->proj_out(LoadBarrierNode::Oop);
+
+  float unlikely  = PROB_UNLIKELY(0.999);
+  const Type* in_val_maybe_null_t = igvn.type(in_val);
+
+  Node* jthread = igvn.transform(new ThreadLocalNode());
+  Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+  Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
+  Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+  Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+  Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+  Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+  Node* then = igvn.transform(new IfTrueNode(iff));
+  Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+  Node* result_region;
+  Node* result_val;
+
+  result_region = new RegionNode(3);
+  result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
+
+  result_region->set_req(1, elsen);
+  Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
+  res->init_req(0, elsen);
+  result_val->set_req(1, res);
+
+  const TypeFunc *tf = load_barrier_Type();
+  Node* call;
+  if (barrier->is_weak()) {
+    call = new CallLeafNode(tf,
+                            ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
+                            "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
+                            TypeRawPtr::BOTTOM);
+  } else {
+    call = new CallLeafNode(tf,
+                            ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
+                            "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
+                            TypeRawPtr::BOTTOM);
+  }
+
+  call->init_req(TypeFunc::Control, then);
+  call->init_req(TypeFunc::I_O    , phase->top());
+  call->init_req(TypeFunc::Memory , in_mem);
+  call->init_req(TypeFunc::FramePtr, phase->top());
+  call->init_req(TypeFunc::ReturnAdr, phase->top());
+  call->init_req(TypeFunc::Parms+0, in_val);
+  if (barrier->is_writeback()) {
+    call->init_req(TypeFunc::Parms+1, in_adr);
+  } else {
+    // when slow path is called with a null adr, the healed oop will not be written back
+    call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
+  }
+  call = igvn.transform(call);
+
+  Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
+  res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
+  res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
+
+  result_region->set_req(2, ctrl);
+  result_val->set_req(2, res);
+
+  result_region = igvn.transform(result_region);
+  result_val = igvn.transform(result_val);
+
+  if (out_ctrl != NULL) { // added if cond
+    igvn.replace_node(out_ctrl, result_region);
+  }
+  igvn.replace_node(out_res, result_val);
+}
+
+// Optimized, low spill, loadbarrier variant using stub specialized on register used
+void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+  PhaseIterGVN &igvn = phase->igvn();
+#ifdef PRINT_NODE_TRAVERSALS
+  Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
+#endif
+
+  Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+  Node* in_mem = barrier->in(LoadBarrierNode::Memory);
+  Node* in_val = barrier->in(LoadBarrierNode::Oop);
+  Node* in_adr = barrier->in(LoadBarrierNode::Address);
+
+  Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+  Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
+
+  assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
+
+#ifdef PRINT_NODE_TRAVERSALS
+  tty->print("\n\n\nBefore barrier optimization:\n");
+  traverse(barrier, out_ctrl, out_res, -1);
+
+  tty->print("\nBefore barrier optimization:  preceding_barrier_node\n");
+  traverse(preceding_barrier_node, out_ctrl, out_res, -1);
+#endif
+
+  float unlikely  = PROB_UNLIKELY(0.999);
+
+  Node* jthread = igvn.transform(new ThreadLocalNode());
+  Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+  Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
+                                                 TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
+                                                 MemNode::unordered));
+  Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+  Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+  Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+  Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+  IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+  Node* then = igvn.transform(new IfTrueNode(iff));
+  Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+  Node* slow_path_surrogate;
+  if (!barrier->is_weak()) {
+    slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+                                                                    (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+  } else {
+    slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+                                                                        (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+  }
+
+  Node *new_loadp;
+  new_loadp = slow_path_surrogate;
+  // create the final region/phi pair to converge cntl/data paths to downstream code
+  Node* result_region = igvn.transform(new RegionNode(3));
+  result_region->set_req(1, then);
+  result_region->set_req(2, elsen);
+
+  Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
+  result_phi->set_req(1, new_loadp);
+  result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
+
+  // finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
+  // igvn.replace_node(out_ctrl, result_region);
+  if (out_ctrl != NULL) { // added if cond
+    igvn.replace_node(out_ctrl, result_region);
+  }
+  igvn.replace_node(out_res, result_phi);
+
+  assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
+
+#ifdef PRINT_NODE_TRAVERSALS
+  tty->print("\nAfter barrier optimization:  old out_ctrl\n");
+  traverse(out_ctrl, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  old out_res\n");
+  traverse(out_res, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  old barrier\n");
+  traverse(barrier, out_ctrl, out_res, -1);
+  tty->print("\nAfter barrier optimization:  preceding_barrier_node\n");
+  traverse(preceding_barrier_node, result_region, result_phi, -1);
+#endif
+
+  return;
+}
+
+bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
+  Compile* C = Compile::current();
+  PhaseIterGVN &igvn = macro->igvn();
+  ZBarrierSetC2State* s = state();
+  if (s->load_barrier_count() > 0) {
+#ifdef ASSERT
+    verify_gc_barriers(false);
+#endif
+    igvn.set_delay_transform(true);
+    int skipped = 0;
+    while (s->load_barrier_count() > skipped) {
+      int load_barrier_count = s->load_barrier_count();
+      LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
+      if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
+        // node is unreachable, so don't try to expand it
+        s->remove_load_barrier_node(n);
+        continue;
+      }
+      if (!n->can_be_eliminated()) {
+        skipped++;
+        continue;
+      }
+      expand_loadbarrier_node(macro, n);
+      assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+      if (C->failing())  return true;
+    }
+    while (s->load_barrier_count() > 0) {
+      int load_barrier_count = s->load_barrier_count();
+      LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
+      assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
+      assert(!n->can_be_eliminated(), "should have been processed already");
+      expand_loadbarrier_node(macro, n);
+      assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+      if (C->failing())  return true;
+    }
+    igvn.set_delay_transform(false);
+    igvn.optimize();
+    if (C->failing())  return true;
+  }
+  return false;
+}
+
+// == Loop optimization ==
+
+static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+
+  LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
+  if (lb2 != NULL) {
+    if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
+      assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
+      igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
+      C->set_major_progress();
+    } else  {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      Node* val = lb->proj_out(LoadBarrierNode::Oop);
+      assert(lb2->has_true_uses(), "");
+      assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
+
+      phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+      phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+      igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
+
+      return true;
+    }
+  }
+  return false;
+}
+
+static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
+  assert(dom->is_Region() || i == -1, "");
+  Node* m = mem;
+  while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
+    if (m->is_Mem()) {
+      assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
+      m = m->in(MemNode::Memory);
+    } else if (m->is_MergeMem()) {
+      m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+    } else if (m->is_Phi()) {
+      if (m->in(0) == dom && i != -1) {
+        m = m->in(i);
+        break;
+      } else {
+        m = m->in(LoopNode::EntryControl);
+      }
+    } else if (m->is_Proj()) {
+      m = m->in(0);
+    } else if (m->is_SafePoint() || m->is_MemBar()) {
+      m = m->in(TypeFunc::Memory);
+    } else {
+#ifdef ASSERT
+      m->dump();
+#endif
+      ShouldNotReachHere();
+    }
+  }
+  return m;
+}
+
+static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+  Node* the_clone = lb->clone();
+  the_clone->set_req(LoadBarrierNode::Control, ctl);
+  the_clone->set_req(LoadBarrierNode::Memory, mem);
+  if (oop_in != NULL) {
+    the_clone->set_req(LoadBarrierNode::Oop, oop_in);
+  }
+
+  LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
+  igvn.register_new_node_with_optimizer(new_lb);
+  IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
+  phase->set_ctrl(new_lb, new_lb->in(0));
+  phase->set_loop(new_lb, loop);
+  phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
+  if (!loop->_child) {
+    loop->_body.push(new_lb);
+  }
+
+  Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
+  igvn.register_new_node_with_optimizer(proj_ctl);
+  phase->set_ctrl(proj_ctl, proj_ctl->in(0));
+  phase->set_loop(proj_ctl, loop);
+  phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
+  if (!loop->_child) {
+    loop->_body.push(proj_ctl);
+  }
+
+  Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
+  phase->register_new_node(proj_oop, new_lb);
+
+  if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
+    LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
+    if (!phase->is_dominator(similar, ctl)) {
+      igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
+    }
+  }
+
+  return new_lb;
+}
+
+static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Node* val = lb->proj_out(LoadBarrierNode::Oop);
+  igvn.replace_node(val, new_val);
+  phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+  phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+}
+
+static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Compile* C = Compile::current();
+
+  if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
+    Node* oop_phi = lb->in(LoadBarrierNode::Oop);
+
+    if (oop_phi->in(2) == oop_phi) {
+      // Ignore phis with only one input
+      return false;
+    }
+
+    if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
+                            oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      RegionNode* region = oop_phi->in(0)->as_Region();
+
+      int backedge = LoopNode::LoopBackControl;
+      if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
+        Node* c = region->in(backedge)->in(0)->in(0);
+        assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
+        Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
+        Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
+        if (!phase->is_dominator(oop_c, c)) {
+          return false;
+        }
+      }
+
+      // If the node on the backedge above the phi is the node itself - we have a self loop.
+      // Don't clone - this will be folded later.
+      if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
+        return false;
+      }
+
+      bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
+      Node *phi = oop_phi->clone();
+
+      for (uint i = 1; i < region->req(); i++) {
+        Node* ctrl = region->in(i);
+        if (ctrl != C->top()) {
+          assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
+
+          Node* mem = lb->in(LoadBarrierNode::Memory);
+          Node* m = find_dominating_memory(phase, mem, region, i);
+
+          if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
+            ctrl = ctrl->in(0)->in(0);
+          } else if (region->is_Loop() && is_strip_mined) {
+            // If this is a strip mined loop, control must move above OuterStripMinedLoop
+            assert(i == LoopNode::EntryControl, "check");
+            assert(ctrl->is_OuterStripMinedLoop(), "sanity");
+            ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
+          }
+
+          LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
+          Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
+
+          if (is_strip_mined && (i == LoopNode::EntryControl)) {
+            assert(region->in(i)->is_OuterStripMinedLoop(), "");
+            igvn.replace_input_of(region->in(i), i, out_ctrl);
+          } else if (ctrl == region->in(i)) {
+            igvn.replace_input_of(region, i, out_ctrl);
+          } else {
+            Node* iff = region->in(i)->in(0);
+            igvn.replace_input_of(iff, 0, out_ctrl);
+            phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
+          }
+          phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
+        }
+      }
+      phase->register_new_node(phi, region);
+      replace_barrier(phase, lb, phi);
+
+      if (region->is_Loop()) {
+        // Load barrier moved to the back edge of the Loop may now
+        // have a safepoint on the path to the barrier on the Similar
+        // edge
+        igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
+        Node* head = region->in(LoopNode::EntryControl);
+        phase->set_idom(region, head, phase->dom_depth(head)+1);
+        phase->recompute_dom_depth();
+        if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
+          head->as_CountedLoop()->set_normal_loop();
+        }
+      }
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
+static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
+  if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
+    Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
+    IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
+    IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
+    if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
+      // That transformation may cause the Similar edge on dominated load barriers to be invalid
+      lb->fix_similar_in_uses(&igvn);
+
+      Node* head = lb_loop->_head;
+      assert(head->is_Loop(), "");
+
+      if (phase->is_dominator(head, oop_ctrl)) {
+        assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
+        assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
+        return false;
+      }
+
+      if (head->is_CountedLoop()) {
+        CountedLoopNode* cloop = head->as_CountedLoop();
+        if (cloop->is_main_loop()) {
+          cloop->set_normal_loop();
+        }
+        // When we are moving barrier out of a counted loop,
+        // make sure we move it all the way out of the strip mined outer loop.
+        if (cloop->is_strip_mined()) {
+          head = cloop->outer_loop();
+        }
+      }
+
+      Node* mem = lb->in(LoadBarrierNode::Memory);
+      Node* m = find_dominating_memory(phase, mem, head, -1);
+
+      LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
+
+      assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
+      Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+      igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
+      phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
+
+      replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
+
+      phase->recompute_dom_depth();
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
+static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+  PhaseIterGVN &igvn = phase->igvn();
+  Node* in_val = lb->in(LoadBarrierNode::Oop);
+  for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
+    Node* u = in_val->fast_out(i);
+    if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+      Node* this_ctrl = lb->in(LoadBarrierNode::Control);
+      Node* other_ctrl = u->in(LoadBarrierNode::Control);
+
+      Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
+      bool ok = true;
+
+      Node* proj1 = NULL;
+      Node* proj2 = NULL;
+
+      while (this_ctrl != lca && ok) {
+        if (this_ctrl->in(0) != NULL &&
+            this_ctrl->in(0)->is_MultiBranch()) {
+          if (this_ctrl->in(0)->in(0) == lca) {
+            assert(proj1 == NULL, "");
+            assert(this_ctrl->is_Proj(), "");
+            proj1 = this_ctrl;
+          } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+            ok = false;
+          }
+        }
+        this_ctrl = phase->idom(this_ctrl);
+      }
+      while (other_ctrl != lca && ok) {
+        if (other_ctrl->in(0) != NULL &&
+            other_ctrl->in(0)->is_MultiBranch()) {
+          if (other_ctrl->in(0)->in(0) == lca) {
+            assert(other_ctrl->is_Proj(), "");
+            assert(proj2 == NULL, "");
+            proj2 = other_ctrl;
+          } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+            ok = false;
+          }
+        }
+        other_ctrl = phase->idom(other_ctrl);
+      }
+      assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
+      if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
+        // That transformation may cause the Similar edge on dominated load barriers to be invalid
+        lb->fix_similar_in_uses(&igvn);
+        u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
+
+        Node* split = lca->unique_ctrl_out();
+        assert(split->in(0) == lca, "");
+
+        Node* mem = lb->in(LoadBarrierNode::Memory);
+        Node* m = find_dominating_memory(phase, mem, split, -1);
+        LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
+
+        Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+        igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
+        phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
+
+        Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
+        replace_barrier(phase, lb, proj_oop);
+        replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
+
+        phase->recompute_dom_depth();
+
+        return true;
+      }
+    }
+  }
+
+  return false;
+}
+
+static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+  Compile* C = Compile::current();
+
+  if (!C->directive()->ZOptimizeLoadBarriersOption) {
+    return;
+  }
+
+  if (lb->has_true_uses()) {
+    if (replace_with_dominating_barrier(phase, lb, last_round)) {
+      return;
+    }
+
+    if (split_barrier_thru_phi(phase, lb)) {
+      return;
+    }
+
+    if (move_out_of_loop(phase, lb)) {
+      return;
+    }
+
+    if (common_barriers(phase, lb)) {
+      return;
+    }
+  }
+}
+
+void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
+  if (node->is_LoadBarrier()) {
+    optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
+  }
+}
+
+// == Verification ==
+
+#ifdef ASSERT
+
+static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
+  if (visited.test_set(n->_idx)) {
+    return true;
+  }
+
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    Node* u = n->fast_out(i);
+    if (u->is_LoadBarrier()) {
+    } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
+      if (!look_for_barrier(u, post_parse, visited)) {
+        return false;
+      }
+    } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
+      if (!look_for_barrier(u, post_parse, visited)) {
+        return false;
+      }
+    } else if (u->Opcode() != Op_SCMemProj) {
+      tty->print("bad use"); u->dump();
+      return false;
+    }
+  }
+
+  return true;
+}
+
+void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
+  ZBarrierSetC2State* s = state();
+  Compile* C = Compile::current();
+  ResourceMark rm;
+  VectorSet visited(Thread::current()->resource_area());
+  for (int i = 0; i < s->load_barrier_count(); i++) {
+    LoadBarrierNode* n = s->load_barrier_node(i);
+
+    // The dominating barrier on the same address if it exists and
+    // this barrier must not be applied on the value from the same
+    // load otherwise the value is not reloaded before it's used the
+    // second time.
+    assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+           (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
+           "broken similar edge");
+
+    assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
+           "found unneeded load barrier");
+
+    // Several load barrier nodes chained through their Similar edge
+    // break the code that remove the barriers in final graph reshape.
+    assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+           (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+            n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
+           "chain of Similar load barriers");
+
+    if (!n->in(LoadBarrierNode::Similar)->is_top()) {
+      ResourceMark rm;
+      Unique_Node_List wq;
+      Node* other = n->in(LoadBarrierNode::Similar)->in(0);
+      wq.push(n);
+      bool ok = true;
+      bool dom_found = false;
+      for (uint next = 0; next < wq.size(); ++next) {
+        Node *n = wq.at(next);
+        assert(n->is_CFG(), "");
+        assert(!n->is_SafePoint(), "");
+
+        if (n == other) {
+          continue;
+        }
+
+        if (n->is_Region()) {
+          for (uint i = 1; i < n->req(); i++) {
+            Node* m = n->in(i);
+            if (m != NULL) {
+              wq.push(m);
+            }
+          }
+        } else {
+          Node* m = n->in(0);
+          if (m != NULL) {
+            wq.push(m);
+          }
+        }
+      }
+    }
+
+    if (ZVerifyLoadBarriers) {
+      if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
+        visited.Clear();
+        bool found = look_for_barrier(n, post_parse, visited);
+        if (!found) {
+          n->dump(1);
+          n->dump(-3);
+          stringStream ss;
+          C->method()->print_short_name(&ss);
+          tty->print_cr("-%s-", ss.as_string());
+          assert(found, "");
+        }
+      }
+    }
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+#define SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+
+#include "gc/shared/c2/barrierSetC2.hpp"
+#include "memory/allocation.hpp"
+#include "opto/node.hpp"
+#include "utilities/growableArray.hpp"
+
+class LoadBarrierNode : public MultiNode {
+private:
+  bool _weak;
+  bool _writeback;          // Controls if the barrier writes the healed oop back to memory
+                            // A swap on a memory location must never write back the healed oop
+  bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
+                            // before healing, otherwise both the oop and the address must be passed to the
+                            // barrier from the oop
+
+  static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
+  void push_dominated_barriers(PhaseIterGVN* igvn) const;
+
+public:
+  enum {
+    Control,
+    Memory,
+    Oop,
+    Address,
+    Number_of_Outputs = Address,
+    Similar,
+    Number_of_Inputs
+  };
+
+  LoadBarrierNode(Compile* C,
+                  Node* c,
+                  Node* mem,
+                  Node* val,
+                  Node* adr,
+                  bool weak,
+                  bool writeback,
+                  bool oop_reload_allowed);
+
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const;
+  virtual const Type *Value(PhaseGVN *phase) const;
+  virtual Node *Identity(PhaseGVN *phase);
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+
+  LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
+                                          bool linear_only,
+                                          bool look_for_similar);
+
+  void fix_similar_in_uses(PhaseIterGVN* igvn);
+
+  bool has_true_uses() const;
+
+  bool can_be_eliminated() const {
+    return !in(Similar)->is_top();
+  }
+
+  bool is_weak() const {
+    return _weak;
+  }
+
+  bool is_writeback() const {
+    return _writeback;
+  }
+
+  bool oop_reload_allowed() const {
+    return _oop_reload_allowed;
+  }
+};
+
+class LoadBarrierSlowRegNode : public LoadPNode {
+public:
+  LoadBarrierSlowRegNode(Node *c,
+                         Node *mem,
+                         Node *adr,
+                         const TypePtr *at,
+                         const TypePtr* t,
+                         MemOrd mo,
+                         ControlDependency control_dependency = DependsOnlyOnTest)
+    : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+  virtual const char * name() {
+    return "LoadBarrierSlowRegNode";
+  }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class LoadBarrierWeakSlowRegNode : public LoadPNode {
+public:
+  LoadBarrierWeakSlowRegNode(Node *c,
+                             Node *mem,
+                             Node *adr,
+                             const TypePtr *at,
+                             const TypePtr* t,
+                             MemOrd mo,
+                             ControlDependency control_dependency = DependsOnlyOnTest)
+    : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+  virtual const char * name() {
+    return "LoadBarrierWeakSlowRegNode";
+  }
+
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+    return NULL;
+  }
+
+  virtual int Opcode() const;
+};
+
+class ZBarrierSetC2State : public ResourceObj {
+private:
+  // List of load barrier nodes which need to be expanded before matching
+  GrowableArray<LoadBarrierNode*>* _load_barrier_nodes;
+
+public:
+  ZBarrierSetC2State(Arena* comp_arena);
+  int load_barrier_count() const;
+  void add_load_barrier_node(LoadBarrierNode* n);
+  void remove_load_barrier_node(LoadBarrierNode* n);
+  LoadBarrierNode* load_barrier_node(int idx) const;
+};
+
+class ZBarrierSetC2 : public BarrierSetC2 {
+private:
+  ZBarrierSetC2State* state() const;
+  Node* make_cas_loadbarrier(C2AtomicAccess& access) const;
+  Node* make_cmpx_loadbarrier(C2AtomicAccess& access) const;
+  void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+  void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
+  void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+  const TypeFunc* load_barrier_Type() const;
+
+protected:
+  virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
+  virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
+                                               Node* expected_val,
+                                               Node* new_val,
+                                               const Type* val_type) const;
+  virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access,
+                                                Node* expected_val,
+                                                Node* new_val,
+                                                const Type* value_type) const;
+  virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access,
+                                        Node* new_val,
+                                        const Type* val_type) const;
+
+public:
+  Node* load_barrier(GraphKit* kit,
+                     Node* val,
+                     Node* adr,
+                     bool weak = false,
+                     bool writeback = true,
+                     bool oop_reload_allowed = true) const;
+
+  virtual void* create_barrier_state(Arena* comp_arena) const;
+  virtual bool is_gc_barrier_node(Node* node) const;
+  virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
+  virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const;
+  virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
+  virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const;
+  virtual void register_potential_barrier_node(Node* node) const;
+  virtual void unregister_potential_barrier_node(Node* node) const;
+  virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; }
+  virtual Node* step_over_gc_barrier(Node* c) const { return c; }
+  // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
+  // expanded later, then now is the time to do so.
+  virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
+
+  static void find_dominating_barriers(PhaseIterGVN& igvn);
+  static void loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round);
+
+#ifdef ASSERT
+  virtual void verify_gc_barriers(bool post_parse) const;
+#endif
+};
+
+#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/vmStructs_z.hpp"
+
+ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
+    _ZGlobalPhase(&ZGlobalPhase),
+    _ZAddressGoodMask(&ZAddressGoodMask),
+    _ZAddressBadMask(&ZAddressBadMask),
+    _ZAddressWeakBadMask(&ZAddressWeakBadMask),
+    _ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift),
+    _ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {
+}
+
+ZGlobalsForVMStructs ZGlobalsForVMStructs::_instance;
+ZGlobalsForVMStructs* ZGlobalsForVMStructs::_instance_p = &ZGlobalsForVMStructs::_instance;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+#define SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zPageAllocator.hpp"
+#include "gc/z/zPhysicalMemory.hpp"
+#include "utilities/macros.hpp"
+
+// Expose some ZGC globals to the SA agent.
+class ZGlobalsForVMStructs {
+  static ZGlobalsForVMStructs _instance;
+
+public:
+  static ZGlobalsForVMStructs* _instance_p;
+
+  ZGlobalsForVMStructs();
+
+  uint32_t* _ZGlobalPhase;
+
+  uintptr_t* _ZAddressGoodMask;
+  uintptr_t* _ZAddressBadMask;
+  uintptr_t* _ZAddressWeakBadMask;
+
+  const int* _ZObjectAlignmentSmallShift;
+  const int* _ZObjectAlignmentSmall;
+};
+
+typedef ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> ZAddressRangeMapForPageTable;
+
+#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field)                      \
+  static_field(ZGlobalsForVMStructs,            _instance_p,          ZGlobalsForVMStructs*)         \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZGlobalPhase,        uint32_t*)                     \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressGoodMask,    uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressBadMask,     uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZAddressWeakBadMask, uintptr_t*)                    \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZObjectAlignmentSmallShift, const int*)             \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZObjectAlignmentSmall, const int*)                  \
+                                                                                                     \
+  nonstatic_field(ZCollectedHeap,               _heap,                ZHeap)                         \
+                                                                                                     \
+  nonstatic_field(ZHeap,                        _page_allocator,      ZPageAllocator)                \
+  nonstatic_field(ZHeap,                        _pagetable,           ZPageTable)                    \
+                                                                                                     \
+  nonstatic_field(ZPage,                        _type,                const uint8_t)                 \
+  nonstatic_field(ZPage,                        _virtual,             const ZVirtualMemory)          \
+  nonstatic_field(ZPage,                        _forwarding,          ZForwardingTable)              \
+                                                                                                     \
+  nonstatic_field(ZPageAllocator,               _physical,            ZPhysicalMemoryManager)        \
+  nonstatic_field(ZPageAllocator,               _used,                size_t)                        \
+                                                                                                     \
+  nonstatic_field(ZPageTable,                   _map,                 ZAddressRangeMapForPageTable)  \
+                                                                                                     \
+  nonstatic_field(ZAddressRangeMapForPageTable, _map,                 ZPageTableEntry* const)        \
+                                                                                                     \
+  nonstatic_field(ZVirtualMemory,                _start,              uintptr_t)                     \
+  nonstatic_field(ZVirtualMemory,                _end,                uintptr_t)                     \
+                                                                                                     \
+  nonstatic_field(ZForwardingTable,              _table,              ZForwardingTableEntry*)        \
+  nonstatic_field(ZForwardingTable,              _size,               size_t)                        \
+                                                                                                     \
+  nonstatic_field(ZPhysicalMemoryManager,        _max_capacity,       const size_t)                  \
+  nonstatic_field(ZPhysicalMemoryManager,        _capacity,           size_t)
+
+#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value)                          \
+  declare_constant(ZPhaseRelocate)                                                                   \
+  declare_constant(ZPageTypeSmall)                                                                   \
+  declare_constant(ZPageTypeMedium)                                                                  \
+  declare_constant(ZPageTypeLarge)                                                                   \
+  declare_constant(ZObjectAlignmentMediumShift)                                                      \
+  declare_constant(ZObjectAlignmentLargeShift)
+
+#define VM_LONG_CONSTANTS_ZGC(declare_constant)                                                      \
+  declare_constant(ZPageSizeSmallShift)                                                              \
+  declare_constant(ZPageSizeMediumShift)                                                             \
+  declare_constant(ZPageSizeMinShift)                                                                \
+  declare_constant(ZAddressOffsetShift)                                                              \
+  declare_constant(ZAddressOffsetBits)                                                               \
+  declare_constant(ZAddressOffsetMask)                                                               \
+  declare_constant(ZAddressSpaceStart)
+
+#define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type)                      \
+  declare_toplevel_type(ZGlobalsForVMStructs)                                                        \
+  declare_type(ZCollectedHeap, CollectedHeap)                                                        \
+  declare_toplevel_type(ZHeap)                                                                       \
+  declare_toplevel_type(ZPage)                                                                       \
+  declare_toplevel_type(ZPageAllocator)                                                              \
+  declare_toplevel_type(ZPageTable)                                                                  \
+  declare_toplevel_type(ZPageTableEntry)                                                             \
+  declare_toplevel_type(ZAddressRangeMapForPageTable)                                                \
+  declare_toplevel_type(ZVirtualMemory)                                                              \
+  declare_toplevel_type(ZForwardingTable)                                                            \
+  declare_toplevel_type(ZForwardingTableEntry)                                                       \
+  declare_toplevel_type(ZPhysicalMemoryManager)
+
+#endif // SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "runtime/thread.hpp"
+
+void ZAddressMasks::set_good_mask(uintptr_t mask) {
+  uintptr_t old_bad_mask = ZAddressBadMask;
+  ZAddressGoodMask = mask;
+  ZAddressBadMask = ZAddressGoodMask ^ ZAddressMetadataMask;
+  ZAddressWeakBadMask = (ZAddressGoodMask | ZAddressMetadataRemapped | ZAddressMetadataFinalizable) ^ ZAddressMetadataMask;
+}
+
+void ZAddressMasks::initialize() {
+  ZAddressMetadataMarked = ZAddressMetadataMarked0;
+  set_good_mask(ZAddressMetadataRemapped);
+}
+
+void ZAddressMasks::flip_to_marked() {
+  ZAddressMetadataMarked ^= (ZAddressMetadataMarked0 | ZAddressMetadataMarked1);
+  set_good_mask(ZAddressMetadataMarked);
+}
+
+void ZAddressMasks::flip_to_remapped() {
+  set_good_mask(ZAddressMetadataRemapped);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_HPP
+#define SHARE_GC_Z_ZADDRESS_HPP
+
+#include "memory/allocation.hpp"
+
+class ZAddress : public AllStatic {
+public:
+  static bool is_null(uintptr_t value);
+  static bool is_bad(uintptr_t value);
+  static bool is_good(uintptr_t value);
+  static bool is_good_or_null(uintptr_t value);
+  static bool is_weak_bad(uintptr_t value);
+  static bool is_weak_good(uintptr_t value);
+  static bool is_weak_good_or_null(uintptr_t value);
+  static bool is_marked(uintptr_t value);
+  static bool is_finalizable(uintptr_t value);
+  static bool is_remapped(uintptr_t value);
+
+  static uintptr_t address(uintptr_t value);
+  static uintptr_t offset(uintptr_t value);
+  static uintptr_t good(uintptr_t value);
+  static uintptr_t good_or_null(uintptr_t value);
+  static uintptr_t finalizable_good(uintptr_t value);
+  static uintptr_t marked(uintptr_t value);
+  static uintptr_t marked0(uintptr_t value);
+  static uintptr_t marked1(uintptr_t value);
+  static uintptr_t remapped(uintptr_t value);
+  static uintptr_t remapped_or_null(uintptr_t value);
+};
+
+class ZAddressMasks : public AllStatic {
+  friend class ZAddressTest;
+
+private:
+  static void set_good_mask(uintptr_t mask);
+
+public:
+  static void initialize();
+  static void flip_to_marked();
+  static void flip_to_remapped();
+};
+
+#endif // SHARE_GC_Z_ZADDRESS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddress.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESS_INLINE_HPP
+
+#include "gc/z/zAddress.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "utilities/macros.hpp"
+#include OS_CPU_HEADER_INLINE(gc/z/zAddress)
+
+inline bool ZAddress::is_null(uintptr_t value) {
+  return value == 0;
+}
+
+inline bool ZAddress::is_bad(uintptr_t value) {
+  return value & ZAddressBadMask;
+}
+
+inline bool ZAddress::is_good(uintptr_t value) {
+  return !is_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_good_or_null(uintptr_t value) {
+  // Checking if an address is "not bad" is an optimized version of
+  // checking if it's "good or null", which eliminates an explicit
+  // null check. However, the implicit null check only checks that
+  // the mask bits are zero, not that the entire address is zero.
+  // This means that an address without mask bits would pass through
+  // the barrier as if it was null. This should be harmless as such
+  // addresses should ever be passed through the barrier.
+  const bool result = !is_bad(value);
+  assert((is_good(value) || is_null(value)) == result, "Bad address");
+  return result;
+}
+
+inline bool ZAddress::is_weak_bad(uintptr_t value) {
+  return value & ZAddressWeakBadMask;
+}
+
+inline bool ZAddress::is_weak_good(uintptr_t value) {
+  return !is_weak_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_weak_good_or_null(uintptr_t value) {
+  return !is_weak_bad(value);
+}
+
+inline bool ZAddress::is_marked(uintptr_t value) {
+  return value & ZAddressMetadataMarked;
+}
+
+inline bool ZAddress::is_finalizable(uintptr_t value) {
+  return value & ZAddressMetadataFinalizable;
+}
+
+inline bool ZAddress::is_remapped(uintptr_t value) {
+  return value & ZAddressMetadataRemapped;
+}
+
+inline uintptr_t ZAddress::offset(uintptr_t value) {
+  return value & ZAddressOffsetMask;
+}
+
+inline uintptr_t ZAddress::good(uintptr_t value) {
+  return address(offset(value) | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
+  return is_null(value) ? 0 : good(value);
+}
+
+inline uintptr_t ZAddress::finalizable_good(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::marked(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked);
+}
+
+inline uintptr_t ZAddress::marked0(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked0);
+}
+
+inline uintptr_t ZAddress::marked1(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataMarked1);
+}
+
+inline uintptr_t ZAddress::remapped(uintptr_t value) {
+  return address(offset(value) | ZAddressMetadataRemapped);
+}
+
+inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) {
+  return is_null(value) ? 0 : remapped(value);
+}
+
+#endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+
+#include "memory/allocation.hpp"
+
+template<typename T, size_t AddressRangeShift>
+class ZAddressRangeMapIterator;
+
+template <typename T, size_t AddressRangeShift>
+class ZAddressRangeMap {
+  friend class VMStructs;
+  friend class ZAddressRangeMapIterator<T, AddressRangeShift>;
+
+private:
+  T* const _map;
+
+  size_t index_for_addr(uintptr_t addr) const;
+  size_t size() const;
+
+public:
+  ZAddressRangeMap();
+  ~ZAddressRangeMap();
+
+  T get(uintptr_t addr) const;
+  void put(uintptr_t addr, T value);
+};
+
+template <typename T, size_t AddressRangeShift>
+class ZAddressRangeMapIterator : public StackObj {
+public:
+  const ZAddressRangeMap<T, AddressRangeShift>* const _map;
+  size_t                                              _next;
+
+public:
+  ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map);
+
+  bool next(T* value);
+};
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "memory/allocation.inline.hpp"
+
+template <typename T, size_t AddressRangeShift>
+ZAddressRangeMap<T, AddressRangeShift>::ZAddressRangeMap() :
+    _map(MmapArrayAllocator<T>::allocate(size(), mtGC)) {}
+
+template <typename T, size_t AddressRangeShift>
+ZAddressRangeMap<T, AddressRangeShift>::~ZAddressRangeMap() {
+  MmapArrayAllocator<T>::free(_map, size());
+}
+
+template <typename T, size_t AddressRangeShift>
+size_t ZAddressRangeMap<T, AddressRangeShift>::index_for_addr(uintptr_t addr) const {
+  assert(!ZAddress::is_null(addr), "Invalid address");
+
+  const size_t index = ZAddress::offset(addr) >> AddressRangeShift;
+  assert(index < size(), "Invalid index");
+
+  return index;
+}
+
+template <typename T, size_t AddressRangeShift>
+size_t ZAddressRangeMap<T, AddressRangeShift>::size() const {
+  return ZAddressOffsetMax >> AddressRangeShift;
+}
+
+template <typename T, size_t AddressRangeShift>
+T ZAddressRangeMap<T, AddressRangeShift>::get(uintptr_t addr) const {
+  const uintptr_t index = index_for_addr(addr);
+  return _map[index];
+}
+
+template <typename T, size_t AddressRangeShift>
+void ZAddressRangeMap<T, AddressRangeShift>::put(uintptr_t addr, T value) {
+  const uintptr_t index = index_for_addr(addr);
+  _map[index] = value;
+}
+
+template <typename T, size_t AddressRangeShift>
+inline ZAddressRangeMapIterator<T, AddressRangeShift>::ZAddressRangeMapIterator(const ZAddressRangeMap<T, AddressRangeShift>* map) :
+    _map(map),
+    _next(0) {}
+
+template <typename T, size_t AddressRangeShift>
+inline bool ZAddressRangeMapIterator<T, AddressRangeShift>::next(T* value) {
+  if (_next < _map->size()) {
+    *value = _map->_map[_next++];
+    return true;
+  }
+
+  // End of map
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+#define SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+
+//
+// Allocation flags layout
+// -----------------------
+//
+//   7   4 3 2 1 0
+//  +---+-+-+-+-+-+
+//  |000|1|1|1|1|1|
+//  +---+-+-+-+-+-+
+//  |   | | | | |
+//  |   | | | | * 0-0 Java Thread Flag (1-bit)
+//  |   | | | |
+//  |   | | | * 1-1 Worker Thread Flag (1-bit)
+//  |   | | |
+//  |   | | * 2-2 Non-Blocking Flag (1-bit)
+//  |   | |
+//  |   | * 3-3 Relocation Flag (1-bit)
+//  |   |
+//  |   * 4-4 No Reserve Flag (1-bit)
+//  |
+//  * 7-5 Unused (3-bits)
+//
+
+class ZAllocationFlags {
+private:
+  typedef ZBitField<uint8_t, bool, 0, 1> field_java_thread;
+  typedef ZBitField<uint8_t, bool, 1, 1> field_worker_thread;
+  typedef ZBitField<uint8_t, bool, 2, 1> field_non_blocking;
+  typedef ZBitField<uint8_t, bool, 3, 1> field_relocation;
+  typedef ZBitField<uint8_t, bool, 4, 1> field_no_reserve;
+
+  uint8_t _flags;
+
+public:
+  ZAllocationFlags() :
+      _flags(0) {}
+
+  void set_java_thread() {
+    _flags |= field_java_thread::encode(true);
+  }
+
+  void set_worker_thread() {
+    _flags |= field_worker_thread::encode(true);
+  }
+
+  void set_non_blocking() {
+    _flags |= field_non_blocking::encode(true);
+  }
+
+  void set_relocation() {
+    _flags |= field_relocation::encode(true);
+  }
+
+  void set_no_reserve() {
+    _flags |= field_no_reserve::encode(true);
+  }
+
+  bool java_thread() const {
+    return field_java_thread::decode(_flags);
+  }
+
+  bool worker_thread() const {
+    return field_worker_thread::decode(_flags);
+  }
+
+  bool non_blocking() const {
+    return field_non_blocking::decode(_flags);
+  }
+
+  bool relocation() const {
+    return field_relocation::decode(_flags);
+  }
+
+  bool no_reserve() const {
+    return field_no_reserve::decode(_flags);
+  }
+};
+
+#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArguments.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArguments.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zWorkers.hpp"
+#include "gc/shared/gcArguments.inline.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+
+size_t ZArguments::conservative_max_heap_alignment() {
+  return 0;
+}
+
+void ZArguments::initialize() {
+  GCArguments::initialize();
+
+  // Enable NUMA by default
+  if (FLAG_IS_DEFAULT(UseNUMA)) {
+    FLAG_SET_DEFAULT(UseNUMA, true);
+  }
+
+  // Disable biased locking by default
+  if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
+    FLAG_SET_DEFAULT(UseBiasedLocking, false);
+  }
+
+  // Select number of parallel threads
+  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+    FLAG_SET_DEFAULT(ParallelGCThreads, ZWorkers::calculate_nparallel());
+  }
+
+  if (ParallelGCThreads == 0) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
+  }
+
+  // Select number of concurrent threads
+  if (FLAG_IS_DEFAULT(ConcGCThreads)) {
+    FLAG_SET_DEFAULT(ConcGCThreads, ZWorkers::calculate_nconcurrent());
+  }
+
+  if (ConcGCThreads == 0) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
+  }
+
+#ifdef COMPILER2
+  // Enable loop strip mining by default
+  if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+    FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+    if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+      FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+    }
+  }
+#endif
+
+  // To avoid asserts in set_active_workers()
+  FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, true);
+
+  // CompressedOops/UseCompressedClassPointers not supported
+  FLAG_SET_DEFAULT(UseCompressedOops, false);
+  FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
+
+  // ClassUnloading not (yet) supported
+  FLAG_SET_DEFAULT(ClassUnloading, false);
+  FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
+
+  // Verification before startup and after exit not (yet) supported
+  FLAG_SET_DEFAULT(VerifyDuringStartup, false);
+  FLAG_SET_DEFAULT(VerifyBeforeExit, false);
+
+  // Verification of stacks not (yet) supported, for the same reason
+  // we need fixup_partial_loads
+  DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
+
+  // JVMCI not (yet) supported
+  if (EnableJVMCI) {
+    vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:+EnableJVMCI");
+  }
+}
+
+CollectedHeap* ZArguments::create_heap() {
+  return create_heap_with_policy<ZCollectedHeap, ZCollectorPolicy>();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArguments.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARGUMENTS_HPP
+#define SHARE_GC_Z_ZARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CollectedHeap;
+
+class ZArguments : public GCArguments {
+public:
+  virtual void initialize();
+  virtual size_t conservative_max_heap_alignment();
+  virtual CollectedHeap* create_heap();
+};
+
+#endif // SHARE_GC_Z_ZARGUMENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArray.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_HPP
+#define SHARE_GC_Z_ZARRAY_HPP
+
+#include "memory/allocation.hpp"
+
+template <typename T>
+class ZArray {
+private:
+  static const size_t initial_capacity = 32;
+
+  T*     _array;
+  size_t _size;
+  size_t _capacity;
+
+  // Copy and assignment are not allowed
+  ZArray(const ZArray<T>& array);
+  ZArray<T>& operator=(const ZArray<T>& array);
+
+  void expand(size_t new_capacity);
+
+public:
+  ZArray();
+  ~ZArray();
+
+  size_t size() const;
+  bool is_empty() const;
+
+  T at(size_t index) const;
+
+  void add(T value);
+  void clear();
+};
+
+template <typename T, bool parallel>
+class ZArrayIteratorImpl : public StackObj {
+private:
+  ZArray<T>* const _array;
+  size_t           _next;
+
+public:
+  ZArrayIteratorImpl(ZArray<T>* array);
+
+  bool next(T* elem);
+};
+
+// Iterator types
+#define ZARRAY_SERIAL      false
+#define ZARRAY_PARALLEL    true
+
+template <typename T>
+class ZArrayIterator : public ZArrayIteratorImpl<T, ZARRAY_SERIAL> {
+public:
+  ZArrayIterator(ZArray<T>* array) :
+      ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
+};
+
+template <typename T>
+class ZArrayParallelIterator : public ZArrayIteratorImpl<T, ZARRAY_PARALLEL> {
+public:
+  ZArrayParallelIterator(ZArray<T>* array) :
+      ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
+};
+
+#endif // SHARE_GC_Z_ZARRAY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_INLINE_HPP
+#define SHARE_GC_Z_ZARRAY_INLINE_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
+
+template <typename T>
+inline ZArray<T>::ZArray() :
+    _array(NULL),
+    _size(0),
+    _capacity(0) {}
+
+template <typename T>
+inline ZArray<T>::~ZArray() {
+  if (_array != NULL) {
+    FREE_C_HEAP_ARRAY(T, _array);
+  }
+}
+
+template <typename T>
+inline size_t ZArray<T>::size() const {
+  return _size;
+}
+
+template <typename T>
+inline bool ZArray<T>::is_empty() const {
+  return size() == 0;
+}
+
+template <typename T>
+inline T ZArray<T>::at(size_t index) const {
+  assert(index < _size, "Index out of bounds");
+  return _array[index];
+}
+
+template <typename T>
+inline void ZArray<T>::expand(size_t new_capacity) {
+  T* new_array = NEW_C_HEAP_ARRAY(T, new_capacity, mtGC);
+  if (_array != NULL) {
+    memcpy(new_array, _array, sizeof(T) * _capacity);
+    FREE_C_HEAP_ARRAY(T, _array);
+  }
+
+  _array = new_array;
+  _capacity = new_capacity;
+}
+
+template <typename T>
+inline void ZArray<T>::add(T value) {
+  if (_size == _capacity) {
+    const size_t new_capacity = (_capacity > 0) ? _capacity * 2 : initial_capacity;
+    expand(new_capacity);
+  }
+
+  _array[_size++] = value;
+}
+
+template <typename T>
+inline void ZArray<T>::clear() {
+  _size = 0;
+}
+
+template <typename T, bool parallel>
+inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
+    _array(array),
+    _next(0) {}
+
+template <typename T, bool parallel>
+inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
+  if (parallel) {
+    const size_t next = Atomic::add(1u, &_next) - 1u;
+    if (next < _array->size()) {
+      *elem = _array->at(next);
+      return true;
+    }
+  } else {
+    if (_next < _array->size()) {
+      *elem = _array->at(_next++);
+      return true;
+    }
+  }
+
+  // No more elements
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZARRAY_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/debug.hpp"
+
+bool ZBarrier::during_mark() {
+  return ZGlobalPhase == ZPhaseMark;
+}
+
+bool ZBarrier::during_relocate() {
+  return ZGlobalPhase == ZPhaseRelocate;
+}
+
+template <bool finalizable>
+bool ZBarrier::should_mark_through(uintptr_t addr) {
+  // Finalizable marked oops can still exists on the heap after marking
+  // has completed, in which case we just want to convert this into a
+  // good oop and not push it on the mark stack.
+  if (!during_mark()) {
+    assert(ZAddress::is_marked(addr), "Should be marked");
+    assert(ZAddress::is_finalizable(addr), "Should be finalizable");
+    return false;
+  }
+
+  // During marking, we mark through already marked oops to avoid having
+  // some large part of the object graph hidden behind a pushed, but not
+  // yet flushed, entry on a mutator mark stack. Always marking through
+  // allows the GC workers to proceed through the object graph even if a
+  // mutator touched an oop first, which in turn will reduce the risk of
+  // having to flush mark stacks multiple times to terminate marking.
+  //
+  // However, when doing finalizable marking we don't always want to mark
+  // through. First, marking through an already strongly marked oop would
+  // be wasteful, since we will then proceed to do finalizable marking on
+  // an object which is, or will be, marked strongly. Second, marking
+  // through an already finalizable marked oop would also be wasteful,
+  // since such oops can never end up on a mutator mark stack and can
+  // therefore not hide some part of the object graph from GC workers.
+  if (finalizable) {
+    return !ZAddress::is_marked(addr);
+  }
+
+  // Mark through
+  return true;
+}
+
+template <bool finalizable, bool publish>
+uintptr_t ZBarrier::mark(uintptr_t addr) {
+  uintptr_t good_addr;
+
+  if (ZAddress::is_marked(addr)) {
+    // Already marked, but try to mark though anyway
+    good_addr = ZAddress::good(addr);
+  } else if (ZAddress::is_remapped(addr)) {
+    // Already remapped, but also needs to be marked
+    good_addr = ZAddress::good(addr);
+  } else {
+    // Needs to be both remapped and marked
+    good_addr = remap(addr);
+  }
+
+  // Mark
+  if (should_mark_through<finalizable>(addr)) {
+    ZHeap::heap()->mark_object<finalizable, publish>(good_addr);
+  }
+
+  return good_addr;
+}
+
+uintptr_t ZBarrier::remap(uintptr_t addr) {
+  assert(!ZAddress::is_good(addr), "Should not be good");
+  assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
+
+  if (ZHeap::heap()->is_relocating(addr)) {
+    // Forward
+    return ZHeap::heap()->forward_object(addr);
+  }
+
+  // Remap
+  return ZAddress::good(addr);
+}
+
+uintptr_t ZBarrier::relocate(uintptr_t addr) {
+  assert(!ZAddress::is_good(addr), "Should not be good");
+  assert(!ZAddress::is_weak_good(addr), "Should not be weak good");
+
+  if (ZHeap::heap()->is_relocating(addr)) {
+    // Relocate
+    return ZHeap::heap()->relocate_object(addr);
+  }
+
+  // Remap
+  return ZAddress::good(addr);
+}
+
+uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) {
+  return during_relocate() ? relocate(addr) : mark<Strong, Publish>(addr);
+}
+
+uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
+  return during_relocate() ? relocate(addr) : remap(addr);
+}
+
+//
+// Load barrier
+//
+uintptr_t ZBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) {
+  return relocate_or_mark(addr);
+}
+
+void ZBarrier::load_barrier_on_oop_fields(oop o) {
+  assert(ZOop::is_good(o), "Should be good");
+  ZLoadBarrierOopClosure cl;
+  o->oop_iterate(&cl);
+}
+
+//
+// Weak load barrier
+//
+uintptr_t ZBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) {
+  return ZAddress::is_weak_good(addr) ? ZAddress::good(addr) : relocate_or_remap(addr);
+}
+
+uintptr_t ZBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  if (ZHeap::heap()->is_object_strongly_live(good_addr)) {
+    return good_addr;
+  }
+
+  // Not strongly live
+  return 0;
+}
+
+uintptr_t ZBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  if (ZHeap::heap()->is_object_live(good_addr)) {
+    return good_addr;
+  }
+
+  // Not live
+  return 0;
+}
+
+//
+// Keep alive barrier
+//
+uintptr_t ZBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  assert(ZHeap::heap()->is_object_strongly_live(good_addr), "Should be live");
+  return good_addr;
+}
+
+uintptr_t ZBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr);
+  assert(ZHeap::heap()->is_object_live(good_addr), "Should be live");
+  return good_addr;
+}
+
+//
+// Mark barrier
+//
+uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
+  return mark<Strong, Overflow>(addr);
+}
+
+uintptr_t ZBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) {
+  const uintptr_t good_addr = mark<Finalizable, Overflow>(addr);
+  if (ZAddress::is_good(addr)) {
+    // If the oop was already strongly marked/good, then we do
+    // not want to downgrade it to finalizable marked/good.
+    return good_addr;
+  }
+
+  // Make the oop finalizable marked/good, instead of normal marked/good.
+  // This is needed because an object might first becomes finalizable
+  // marked by the GC, and then loaded by a mutator thread. In this case,
+  // the mutator thread must be able to tell that the object needs to be
+  // strongly marked. The finalizable bit in the oop exists to make sure
+  // that a load of a finalizable marked oop will fall into the barrier
+  // slow path so that we can mark the object as strongly reachable.
+  return ZAddress::finalizable_good(good_addr);
+}
+
+uintptr_t ZBarrier::mark_barrier_on_root_oop_slow_path(uintptr_t addr) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  assert(during_mark(), "Invalid phase");
+
+  // Mark
+  return mark<Strong, Publish>(addr);
+}
+
+//
+// Relocate barrier
+//
+uintptr_t ZBarrier::relocate_barrier_on_root_oop_slow_path(uintptr_t addr) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  assert(during_relocate(), "Invalid phase");
+
+  // Relocate
+  return relocate(addr);
+}
+
+//
+// Narrow oop variants, never used.
+//
+oop ZBarrier::load_barrier_on_oop_field(volatile narrowOop* p) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+void ZBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) {
+  ShouldNotReachHere();
+}
+
+oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
+
+oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) {
+  ShouldNotReachHere();
+  return NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIER_HPP
+#define SHARE_GC_Z_ZBARRIER_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+
+typedef bool (*ZBarrierFastPath)(uintptr_t);
+typedef uintptr_t (*ZBarrierSlowPath)(uintptr_t);
+
+class ZBarrier : public AllStatic {
+private:
+  static const bool Strong      = false;
+  static const bool Finalizable = true;
+
+  static const bool Publish     = true;
+  static const bool Overflow    = false;
+
+  template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop barrier(volatile oop* p, oop o);
+  template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop weak_barrier(volatile oop* p, oop o);
+  template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static void root_barrier(oop* p, oop o);
+
+  static bool is_null_fast_path(uintptr_t addr);
+  static bool is_good_or_null_fast_path(uintptr_t addr);
+  static bool is_weak_good_or_null_fast_path(uintptr_t addr);
+
+  static bool is_resurrection_blocked(volatile oop* p, oop* o);
+
+  static bool during_mark();
+  static bool during_relocate();
+  template <bool finalizable> static bool should_mark_through(uintptr_t addr);
+  template <bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr);
+  static uintptr_t remap(uintptr_t addr);
+  static uintptr_t relocate(uintptr_t addr);
+  static uintptr_t relocate_or_mark(uintptr_t addr);
+  static uintptr_t relocate_or_remap(uintptr_t addr);
+
+  static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr);
+  static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr);
+  static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr);
+  static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
+  static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
+  static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr);
+
+  static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr);
+
+public:
+  // Load barrier
+  static  oop load_barrier_on_oop(oop o);
+  static  oop load_barrier_on_oop_field(volatile oop* p);
+  static  oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
+  static void load_barrier_on_oop_array(volatile oop* p, size_t length);
+  static void load_barrier_on_oop_fields(oop o);
+  static  oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
+  static  oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
+
+  // Weak load barrier
+  static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o);
+  static oop weak_load_barrier_on_weak_oop(oop o);
+  static oop weak_load_barrier_on_weak_oop_field(volatile oop* p);
+  static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
+  static oop weak_load_barrier_on_phantom_oop(oop o);
+  static oop weak_load_barrier_on_phantom_oop_field(volatile oop* p);
+  static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
+
+  // Is alive barrier
+  static bool is_alive_barrier_on_weak_oop(oop o);
+  static bool is_alive_barrier_on_phantom_oop(oop o);
+
+  // Keep alive barrier
+  static void keep_alive_barrier_on_weak_oop_field(volatile oop* p);
+  static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p);
+
+  // Mark barrier
+  static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
+  static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable);
+  static void mark_barrier_on_root_oop_field(oop* p);
+
+  // Relocate barrier
+  static void relocate_barrier_on_root_oop_field(oop* p);
+
+  // Narrow oop variants, never used.
+  static oop  load_barrier_on_oop_field(volatile narrowOop* p);
+  static oop  load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length);
+  static oop  load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o);
+  static oop  weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o);
+};
+
+#endif // SHARE_GC_Z_ZBARRIER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
+#define SHARE_GC_Z_ZBARRIER_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zBarrier.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zResurrection.inline.hpp"
+#include "runtime/atomic.hpp"
+
+template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
+inline oop ZBarrier::barrier(volatile oop* p, oop o) {
+  uintptr_t addr = ZOop::to_address(o);
+
+retry:
+  // Fast path
+  if (fast_path(addr)) {
+    return ZOop::to_oop(addr);
+  }
+
+  // Slow path
+  const uintptr_t good_addr = slow_path(addr);
+
+  // Self heal, but only if the address was actually updated by the slow path,
+  // which might not be the case, e.g. when marking through an already good oop.
+  if (p != NULL && good_addr != addr) {
+    const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
+    if (prev_addr != addr) {
+      // Some other thread overwrote the oop. If this oop was updated by a
+      // weak barrier the new oop might not be good, in which case we need
+      // to re-apply this barrier.
+      addr = prev_addr;
+      goto retry;
+    }
+  }
+
+  return ZOop::to_oop(good_addr);
+}
+
+template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
+inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
+  const uintptr_t addr = ZOop::to_address(o);
+
+  // Fast path
+  if (fast_path(addr)) {
+    // Return the good address instead of the weak good address
+    // to ensure that the currently active heap view is used.
+    return ZOop::to_oop(ZAddress::good_or_null(addr));
+  }
+
+  // Slow path
+  uintptr_t good_addr = slow_path(addr);
+
+  // Self heal unless the address returned from the slow path is null,
+  // in which case resurrection was blocked and we must let the reference
+  // processor clear the oop. Mutators are not allowed to clear oops in
+  // these cases, since that would be similar to calling Reference.clear(),
+  // which would make the reference non-discoverable or silently dropped
+  // by the reference processor.
+  if (p != NULL && good_addr != 0) {
+    // The slow path returns a good/marked address, but we never mark oops
+    // in a weak load barrier so we always self heal with the remapped address.
+    const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
+    const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
+    if (prev_addr != addr) {
+      // Some other thread overwrote the oop. The new
+      // oop is guaranteed to be weak good or null.
+      assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
+
+      // Return the good address instead of the weak good address
+      // to ensure that the currently active heap view is used.
+      good_addr = ZAddress::good_or_null(prev_addr);
+    }
+  }
+
+  return ZOop::to_oop(good_addr);
+}
+
+template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
+inline void ZBarrier::root_barrier(oop* p, oop o) {
+  const uintptr_t addr = ZOop::to_address(o);
+
+  // Fast path
+  if (fast_path(addr)) {
+    return;
+  }
+
+  // Slow path
+  const uintptr_t good_addr = slow_path(addr);
+
+  // Non-atomic healing helps speed up root scanning. This is safe to do
+  // since we are always healing roots in a safepoint, which means we are
+  // never racing with mutators modifying roots while we are healing them.
+  // It's also safe in case multiple GC threads try to heal the same root,
+  // since they would always heal the root in the same way and it does not
+  // matter in which order it happens.
+  *p = ZOop::to_oop(good_addr);
+}
+
+inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
+  return ZAddress::is_null(addr);
+}
+
+inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
+  return ZAddress::is_good_or_null(addr);
+}
+
+inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
+  return ZAddress::is_weak_good_or_null(addr);
+}
+
+inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
+  const bool is_blocked = ZResurrection::is_blocked();
+
+  // Reload oop after checking the resurrection blocked state. This is
+  // done to prevent a race where we first load an oop, which is logically
+  // null but not yet cleared, then this oop is cleared by the reference
+  // processor and resurrection is unblocked. At this point the mutator
+  // would see the unblocked state and pass this invalid oop through the
+  // normal barrier path, which would incorrectly try to mark this oop.
+  if (p != NULL) {
+    // First assign to reloaded_o to avoid compiler warning about
+    // implicit dereference of volatile oop.
+    const oop reloaded_o = *p;
+    *o = reloaded_o;
+  }
+
+  return is_blocked;
+}
+
+//
+// Load barrier
+//
+inline oop ZBarrier::load_barrier_on_oop(oop o) {
+  return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
+}
+
+inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
+  const oop o = *p;
+  return load_barrier_on_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
+  return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
+}
+
+inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
+  for (volatile const oop* const end = p + length; p < end; p++) {
+    load_barrier_on_oop_field(p);
+  }
+}
+
+inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
+  }
+
+  return load_barrier_on_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
+  }
+
+  return load_barrier_on_oop_field_preloaded(p, o);
+}
+
+//
+// Weak load barrier
+//
+inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
+  return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
+  return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
+  const oop o = *p;
+  return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
+  }
+
+  return weak_load_barrier_on_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
+  return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
+  const oop o = *p;
+  return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
+}
+
+inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
+  if (is_resurrection_blocked(p, &o)) {
+    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
+  }
+
+  return weak_load_barrier_on_oop_field_preloaded(p, o);
+}
+
+//
+// Is alive barrier
+//
+inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
+  // Check if oop is logically non-null. This operation
+  // is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  return weak_load_barrier_on_weak_oop(o) != NULL;
+}
+
+inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
+  // Check if oop is logically non-null. This operation
+  // is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  return weak_load_barrier_on_phantom_oop(o) != NULL;
+}
+
+//
+// Keep alive barrier
+//
+inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
+  // This operation is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  const oop o = *p;
+  barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
+}
+
+inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
+  // This operation is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  const oop o = *p;
+  barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
+}
+
+//
+// Mark barrier
+//
+inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
+  // The fast path only checks for null since the GC worker
+  // threads doing marking wants to mark through good oops.
+  const oop o = *p;
+
+  if (finalizable) {
+    barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
+  } else {
+    barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
+  }
+}
+
+inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
+  for (volatile const oop* const end = p + length; p < end; p++) {
+    mark_barrier_on_oop_field(p, finalizable);
+  }
+}
+
+inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
+  const oop o = *p;
+  root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
+}
+
+//
+// Relocate barrier
+//
+inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
+  const oop o = *p;
+  root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
+}
+
+#endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "runtime/thread.hpp"
+
+ZBarrierSet::ZBarrierSet() :
+    BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
+               make_barrier_set_c1<ZBarrierSetC1>(),
+               make_barrier_set_c2<ZBarrierSetC2>(),
+               BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
+
+ZBarrierSetAssembler* ZBarrierSet::assembler() {
+  BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
+  return reinterpret_cast<ZBarrierSetAssembler*>(bsa);
+}
+
+bool ZBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) {
+  assert((decorators & AS_RAW) == 0, "Unexpected decorator");
+  assert((decorators & AS_NO_KEEPALIVE) == 0, "Unexpected decorator");
+  assert((decorators & IN_ARCHIVE_ROOT) == 0, "Unexpected decorator");
+  //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator");
+
+  if (type == T_OBJECT || type == T_ARRAY) {
+    if (((decorators & IN_HEAP) != 0) ||
+        ((decorators & IN_CONCURRENT_ROOT) != 0) ||
+        ((decorators & ON_PHANTOM_OOP_REF) != 0)) {
+      // Barrier needed
+      return true;
+    }
+  }
+
+  // Barrier not neeed
+  return false;
+}
+
+void ZBarrierSet::on_thread_create(Thread* thread) {
+  // Create thread local data
+  ZThreadLocalData::create(thread);
+}
+
+void ZBarrierSet::on_thread_destroy(Thread* thread) {
+  // Destroy thread local data
+  ZThreadLocalData::destroy(thread);
+}
+
+void ZBarrierSet::on_thread_attach(JavaThread* thread) {
+  // Set thread local address bad mask
+  ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
+}
+
+void ZBarrierSet::on_thread_detach(JavaThread* thread) {
+  // Flush and free any remaining mark stacks
+  ZHeap::heap()->mark_flush_and_free(thread);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSET_HPP
+#define SHARE_GC_Z_ZBARRIERSET_HPP
+
+#include "gc/shared/barrierSet.hpp"
+
+class ZBarrierSetAssembler;
+
+class ZBarrierSet : public BarrierSet {
+public:
+  ZBarrierSet();
+
+  static ZBarrierSetAssembler* assembler();
+  static bool barrier_needed(DecoratorSet decorators, BasicType type);
+
+  virtual void on_thread_create(Thread* thread);
+  virtual void on_thread_destroy(Thread* thread);
+  virtual void on_thread_attach(JavaThread* thread);
+  virtual void on_thread_detach(JavaThread* thread);
+
+  virtual void print_on(outputStream* st) const {}
+
+  template <DecoratorSet decorators, typename BarrierSetT = ZBarrierSet>
+  class AccessBarrier : public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
+  private:
+    typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
+
+    template <DecoratorSet expected>
+    static void verify_decorators_present();
+
+    template <DecoratorSet expected>
+    static void verify_decorators_absent();
+
+    static oop* field_addr(oop base, ptrdiff_t offset);
+
+    template <typename T>
+    static oop load_barrier_on_oop_field_preloaded(T* addr, oop o);
+
+    template <typename T>
+    static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o);
+
+  public:
+    //
+    // In heap
+    //
+    template <typename T>
+    static oop oop_load_in_heap(T* addr);
+    static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
+
+    template <typename T>
+    static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
+    static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
+
+    template <typename T>
+    static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
+    static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
+
+    template <typename T>
+    static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                      size_t length);
+
+    static void clone_in_heap(oop src, oop dst, size_t size);
+
+    //
+    // Not in heap
+    //
+    template <typename T>
+    static oop oop_load_not_in_heap(T* addr);
+
+    template <typename T>
+    static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
+
+    template <typename T>
+    static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
+  };
+};
+
+template<> struct BarrierSet::GetName<ZBarrierSet> {
+  static const BarrierSet::Name value = BarrierSet::ZBarrierSet;
+};
+
+template<> struct BarrierSet::GetType<BarrierSet::ZBarrierSet> {
+  typedef ::ZBarrierSet type;
+};
+
+#endif // SHARE_GC_Z_ZBARRIERSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
+#define SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
+
+#include "gc/shared/accessBarrierSupport.inline.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "utilities/debug.hpp"
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <DecoratorSet expected>
+inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_present() {
+  if ((decorators & expected) == 0) {
+    fatal("Using unsupported access decorators");
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <DecoratorSet expected>
+inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::verify_decorators_absent() {
+  if ((decorators & expected) != 0) {
+    fatal("Using unsupported access decorators");
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop* ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::field_addr(oop base, ptrdiff_t offset) {
+  assert(base != NULL, "Invalid base");
+  return reinterpret_cast<oop*>(reinterpret_cast<intptr_t>((void*)base) + offset);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_oop_field_preloaded(T* addr, oop o) {
+  verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
+
+  if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
+    if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
+      return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
+      return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  } else {
+    if (HasDecorator<decorators, ON_STRONG_OOP_REF>::value) {
+      return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
+      return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) {
+  verify_decorators_present<ON_UNKNOWN_OOP_REF>();
+
+  const DecoratorSet decorators_known_strength =
+    AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset);
+
+  if (HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
+    if (decorators_known_strength & ON_STRONG_OOP_REF) {
+      return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (decorators_known_strength & ON_WEAK_OOP_REF) {
+      return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  } else {
+    if (decorators_known_strength & ON_STRONG_OOP_REF) {
+      return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o);
+    } else if (decorators_known_strength & ON_WEAK_OOP_REF) {
+      return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
+    } else {
+      return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
+    }
+  }
+}
+
+//
+// In heap
+//
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap(T* addr) {
+  verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
+
+  const oop o = Raw::oop_load_in_heap(addr);
+  return load_barrier_on_oop_field_preloaded(addr, o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap_at(oop base, ptrdiff_t offset) {
+  oop* const addr = field_addr(base, offset);
+  const oop o = Raw::oop_load_in_heap(addr);
+
+  if (HasDecorator<decorators, ON_UNKNOWN_OOP_REF>::value) {
+    return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o);
+  }
+
+  return load_barrier_on_oop_field_preloaded(addr, o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  ZBarrier::load_barrier_on_oop_field(addr);
+  return Raw::oop_atomic_cmpxchg_in_heap(new_value, addr, compare_value);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+  verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  // Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can recieve
+  // calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF,
+  // with the motivation that if you're doing Unsafe operations on a Reference.referent
+  // field, then you're on your own anyway.
+  ZBarrier::load_barrier_on_oop_field(field_addr(base, offset));
+  return Raw::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  const oop o = Raw::oop_atomic_xchg_in_heap(new_value, addr);
+  return ZBarrier::load_barrier_on_oop(o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  const oop o = Raw::oop_atomic_xchg_in_heap_at(new_value, base, offset);
+  return ZBarrier::load_barrier_on_oop(o);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline bool ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                                                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                                                                       size_t length) {
+  T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
+  T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
+
+  if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
+    // No check cast, bulk barrier and bulk copy
+    ZBarrier::load_barrier_on_oop_array(src, length);
+    return Raw::oop_arraycopy_in_heap(NULL, 0, src, NULL, 0, dst, length);
+  }
+
+  // Check cast and copy each elements
+  Klass* const dst_klass = objArrayOop(dst_obj)->element_klass();
+  for (const T* const end = src + length; src < end; src++, dst++) {
+    const oop elem = ZBarrier::load_barrier_on_oop_field(src);
+    if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) {
+      // Check cast failed
+      return false;
+    }
+
+    // Cast is safe, since we know it's never a narrowOop
+    *(oop*)dst = elem;
+  }
+
+  return true;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
+  ZBarrier::load_barrier_on_oop_fields(src);
+  Raw::clone_in_heap(src, dst, size);
+}
+
+//
+// Not in heap
+//
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
+  const oop o = Raw::oop_load_not_in_heap(addr);
+
+  if (HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value) {
+    return load_barrier_on_oop_field_preloaded(addr, o);
+  }
+
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  return o;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  return Raw::oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
+  verify_decorators_present<ON_STRONG_OOP_REF>();
+  verify_decorators_absent<AS_NO_KEEPALIVE>();
+
+  return Raw::oop_atomic_xchg_not_in_heap(new_value, addr);
+}
+
+#endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "runtime/thread.hpp"
+
+Address ZBarrierSetAssemblerBase::address_bad_mask_from_thread(Register thread) {
+  return Address(thread, ZThreadLocalData::address_bad_mask_offset());
+}
+
+Address ZBarrierSetAssemblerBase::address_bad_mask_from_jni_env(Register env) {
+  return Address(env, ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
+#define SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+#include "oops/accessDecorators.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class ZBarrierSetAssemblerBase : public BarrierSetAssembler {
+public:
+  static Address address_bad_mask_from_thread(Register thread);
+  static Address address_bad_mask_from_jni_env(Register env);
+};
+
+#include CPU_HEADER(gc/z/zBarrierSetAssembler)
+
+#endif // SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetRuntime.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+
+JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p))
+  return ZBarrier::load_barrier_on_oop_field_preloaded(p, o);
+JRT_END
+
+JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p))
+  return ZBarrier::load_barrier_on_weak_oop_field_preloaded(p, o);
+JRT_END
+
+JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p))
+  return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o);
+JRT_END
+
+JRT_LEAF(void, ZBarrierSetRuntime::load_barrier_on_oop_array(oop* p, size_t length))
+  ZBarrier::load_barrier_on_oop_array(p, length);
+JRT_END
+
+address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators) {
+  if (decorators & ON_PHANTOM_OOP_REF) {
+    return load_barrier_on_phantom_oop_field_preloaded_addr();
+  } else if (decorators & ON_WEAK_OOP_REF) {
+    return load_barrier_on_weak_oop_field_preloaded_addr();
+  } else {
+    return load_barrier_on_oop_field_preloaded_addr();
+  }
+}
+
+address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() {
+  return reinterpret_cast<address>(load_barrier_on_oop_field_preloaded);
+}
+
+address ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr() {
+  return reinterpret_cast<address>(load_barrier_on_weak_oop_field_preloaded);
+}
+
+address ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr() {
+  return reinterpret_cast<address>(load_barrier_on_phantom_oop_field_preloaded);
+}
+
+address ZBarrierSetRuntime::load_barrier_on_oop_array_addr() {
+  return reinterpret_cast<address>(load_barrier_on_oop_array);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetRuntime.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
+#define SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/accessDecorators.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class oopDesc;
+
+class ZBarrierSetRuntime : public AllStatic {
+private:
+  static oopDesc* load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p);
+  static oopDesc* load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p);
+  static oopDesc* load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p);
+  static void load_barrier_on_oop_array(oop* p, size_t length);
+
+public:
+  static address load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators);
+  static address load_barrier_on_oop_field_preloaded_addr();
+  static address load_barrier_on_weak_oop_field_preloaded_addr();
+  static address load_barrier_on_phantom_oop_field_preloaded_addr();
+  static address load_barrier_on_oop_array_addr();
+};
+
+#endif // SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBitField.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBITFIELD_HPP
+#define SHARE_GC_Z_ZBITFIELD_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+//
+//  Example
+//  -------
+//
+//  typedef ZBitField<uint64_t, uint8_t,  0,  2, 3> field_word_aligned_size;
+//  typedef ZBitField<uint64_t, uint32_t, 2, 30>    field_length;
+//
+//
+//   6                                 3 3
+//   3                                 2 1                               2 10
+//  +-----------------------------------+---------------------------------+--+
+//  |11111111 11111111 11111111 11111111|11111111 11111111 11111111 111111|11|
+//  +-----------------------------------+---------------------------------+--+
+//  |                                   |                                 |
+//  |       31-2 field_length (30-bits) *                                 |
+//  |                                                                     |
+//  |                                1-0 field_word_aligned_size (2-bits) *
+//  |
+//  * 63-32 Unused (32-bits)
+//
+//
+//  field_word_aligned_size::encode(16) = 2
+//  field_length::encode(2342) = 9368
+//
+//  field_word_aligned_size::decode(9368 | 2) = 16
+//  field_length::decode(9368 | 2) = 2342
+//
+
+template <typename ContainerType, typename ValueType, int FieldShift, int FieldBits, int ValueShift = 0>
+class ZBitField : public AllStatic {
+private:
+  static const int ContainerBits = sizeof(ContainerType) * BitsPerByte;
+
+  STATIC_ASSERT(FieldBits < ContainerBits);
+  STATIC_ASSERT(FieldShift + FieldBits <= ContainerBits);
+  STATIC_ASSERT(ValueShift + FieldBits <= ContainerBits);
+
+  static const ContainerType FieldMask = (((ContainerType)1 << FieldBits) - 1);
+
+public:
+  static ValueType decode(ContainerType container) {
+    return (ValueType)(((container >> FieldShift) & FieldMask) << ValueShift);
+  }
+
+  static ContainerType encode(ValueType value) {
+    assert(((ContainerType)value & (FieldMask << ValueShift)) == (ContainerType)value, "Invalid value");
+    return ((ContainerType)value >> ValueShift) << FieldShift;
+  }
+};
+
+#endif // SHARE_GC_Z_ZBITFIELD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBitMap.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBITMAP_HPP
+#define SHARE_GC_Z_ZBITMAP_HPP
+
+#include "utilities/bitMap.hpp"
+
+class ZBitMap : public CHeapBitMap {
+private:
+  static bm_word_t bit_mask_pair(idx_t bit);
+
+  bool par_set_bit_pair_finalizable(idx_t bit, bool& inc_live);
+  bool par_set_bit_pair_strong(idx_t bit, bool& inc_live);
+
+public:
+  ZBitMap(idx_t size_in_bits);
+
+  bool par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live);
+};
+
+#endif // SHARE_GC_Z_ZBITMAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBitMap.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBITMAP_INLINE_HPP
+#define SHARE_GC_Z_ZBITMAP_INLINE_HPP
+
+#include "gc/z/zBitMap.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "utilities/debug.hpp"
+
+inline ZBitMap::ZBitMap(idx_t size_in_bits) :
+    CHeapBitMap(size_in_bits, mtGC, false /* clear */) {}
+
+inline BitMap::bm_word_t ZBitMap::bit_mask_pair(idx_t bit) {
+  assert(bit_in_word(bit) < BitsPerWord - 1, "Invalid bit index");
+  return (bm_word_t)3 << bit_in_word(bit);
+}
+
+inline bool ZBitMap::par_set_bit_pair_finalizable(idx_t bit, bool& inc_live) {
+  inc_live = par_set_bit(bit);
+  return inc_live;
+}
+
+inline bool ZBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) {
+  verify_index(bit);
+  volatile bm_word_t* const addr = word_addr(bit);
+  const bm_word_t pair_mask = bit_mask_pair(bit);
+  bm_word_t old_val = *addr;
+
+  do {
+    const bm_word_t new_val = old_val | pair_mask;
+    if (new_val == old_val) {
+      inc_live = false;
+      return false;     // Someone else beat us to it.
+    }
+    const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
+    if (cur_val == old_val) {
+      const bm_word_t marked_mask = bit_mask(bit);
+      inc_live = !(old_val & marked_mask);
+      return true;      // Success.
+    }
+    old_val = cur_val;  // The value changed, try again.
+  } while (true);
+}
+
+inline bool ZBitMap::par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live) {
+  if (finalizable) {
+    return par_set_bit_pair_finalizable(bit, inc_live);
+  } else {
+    return par_set_bit_pair_strong(bit, inc_live);
+  }
+}
+
+#endif // SHARE_GC_Z_ZBITMAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCPU.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zCPU.hpp"
+#include "logging/log.hpp"
+#include "memory/padded.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/debug.hpp"
+
+#define ZCPU_UNKNOWN_AFFINITY (Thread*)-1;
+#define ZCPU_UNKNOWN_SELF     (Thread*)-2;
+
+PaddedEnd<ZCPU::ZCPUAffinity>* ZCPU::_affinity = NULL;
+__thread Thread*  ZCPU::_self                  = ZCPU_UNKNOWN_SELF;
+__thread uint32_t ZCPU::_cpu                   = 0;
+
+void ZCPU::initialize() {
+  assert(_affinity == NULL, "Already initialized");
+  const uint32_t ncpus = count();
+
+  _affinity = PaddedArray<ZCPUAffinity, mtGC>::create_unfreeable(ncpus);
+
+  for (uint32_t i = 0; i < ncpus; i++) {
+    _affinity[i]._thread = ZCPU_UNKNOWN_AFFINITY;
+  }
+
+  log_info(gc, init)("CPUs: %u total, %u available",
+                     os::processor_count(),
+                     os::initial_active_processor_count());
+}
+
+uint32_t ZCPU::count() {
+  return os::processor_count();
+}
+
+uint32_t ZCPU::id() {
+  assert(_affinity != NULL, "Not initialized");
+
+  // Fast path
+  if (_affinity[_cpu]._thread == _self) {
+    return _cpu;
+  }
+
+  // Slow path
+  _self = Thread::current();
+  _cpu = os::processor_id();
+
+  // Update affinity table
+  _affinity[_cpu]._thread = _self;
+
+  return _cpu;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCPU.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZCPU_HPP
+#define SHARE_GC_Z_ZCPU_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/padded.hpp"
+
+class Thread;
+
+class ZCPU : public AllStatic {
+private:
+  struct ZCPUAffinity {
+    Thread* _thread;
+  };
+
+  static PaddedEnd<ZCPUAffinity>* _affinity;
+  static __thread Thread*         _self;
+  static __thread uint32_t        _cpu;
+
+public:
+  static void initialize();
+
+  static uint32_t count();
+  static uint32_t id();
+};
+
+#endif // SHARE_GC_Z_ZCPU_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcHeapSummary.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zServiceability.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+
+ZCollectedHeap* ZCollectedHeap::heap() {
+  CollectedHeap* heap = Universe::heap();
+  assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
+  assert(heap->kind() == CollectedHeap::Z, "Invalid name");
+  return (ZCollectedHeap*)heap;
+}
+
+ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
+    _collector_policy(policy),
+    _soft_ref_policy(),
+    _barrier_set(),
+    _initialize(&_barrier_set),
+    _heap(),
+    _director(new ZDirector()),
+    _driver(new ZDriver()),
+    _stat(new ZStat()),
+    _runtime_workers() {}
+
+CollectedHeap::Name ZCollectedHeap::kind() const {
+  return CollectedHeap::Z;
+}
+
+const char* ZCollectedHeap::name() const {
+  return ZGCName;
+}
+
+jint ZCollectedHeap::initialize() {
+  if (!_heap.is_initialized()) {
+    return JNI_ENOMEM;
+  }
+
+  initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
+                             (HeapWord*)ZAddressReservedEnd());
+
+  return JNI_OK;
+}
+
+void ZCollectedHeap::initialize_serviceability() {
+  _heap.serviceability_initialize();
+}
+
+void ZCollectedHeap::stop() {
+  _director->stop();
+  _driver->stop();
+  _stat->stop();
+}
+
+CollectorPolicy* ZCollectedHeap::collector_policy() const {
+  return _collector_policy;
+}
+
+SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
+  return &_soft_ref_policy;
+}
+
+size_t ZCollectedHeap::max_capacity() const {
+  return _heap.max_capacity();
+}
+
+size_t ZCollectedHeap::capacity() const {
+  return _heap.capacity();
+}
+
+size_t ZCollectedHeap::used() const {
+  return _heap.used();
+}
+
+bool ZCollectedHeap::is_maximal_no_gc() const {
+  // Not supported
+  ShouldNotReachHere();
+  return false;
+}
+
+bool ZCollectedHeap::is_scavengable(oop obj) {
+  return false;
+}
+
+bool ZCollectedHeap::is_in(const void* p) const {
+  return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
+}
+
+bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
+  return is_in(p);
+}
+
+HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
+  const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
+  const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
+
+  if (addr != 0) {
+    *actual_size = requested_size;
+  }
+
+  return (HeapWord*)addr;
+}
+
+HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
+  const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
+  return (HeapWord*)_heap.alloc_object(size_in_bytes);
+}
+
+MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
+                                                             size_t size,
+                                                             Metaspace::MetadataType mdtype) {
+  MetaWord* result;
+
+  // Start asynchronous GC
+  collect(GCCause::_metadata_GC_threshold);
+
+  // Expand and retry allocation
+  result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Start synchronous GC
+  collect(GCCause::_metadata_GC_clear_soft_refs);
+
+  // Retry allocation
+  result = loader_data->metaspace_non_null()->allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Expand and retry allocation
+  result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
+  if (result != NULL) {
+    return result;
+  }
+
+  // Out of memory
+  return NULL;
+}
+
+void ZCollectedHeap::collect(GCCause::Cause cause) {
+  _driver->collect(cause);
+}
+
+void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
+  // These collection requests are ignored since ZGC can't run a synchronous
+  // GC cycle from within the VM thread. This is considered benign, since the
+  // only GC causes comming in here should be heap dumper and heap inspector.
+  // However, neither the heap dumper nor the heap inspector really need a GC
+  // to happen, but the result of their heap iterations might in that case be
+  // less accurate since they might include objects that would otherwise have
+  // been collected by a GC.
+  assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
+  guarantee(cause == GCCause::_heap_dump ||
+            cause == GCCause::_heap_inspection, "Invalid cause");
+}
+
+void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
+  // Not supported
+  ShouldNotReachHere();
+}
+
+bool ZCollectedHeap::supports_tlab_allocation() const {
+  return true;
+}
+
+size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
+  return _heap.tlab_capacity();
+}
+
+size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
+  return _heap.tlab_used();
+}
+
+size_t ZCollectedHeap::max_tlab_size() const {
+  return _heap.max_tlab_size();
+}
+
+size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
+  return _heap.unsafe_max_tlab_alloc();
+}
+
+bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
+  return false;
+}
+
+bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
+  // Not supported
+  ShouldNotReachHere();
+  return true;
+}
+
+bool ZCollectedHeap::card_mark_must_follow_store() const {
+  // Not supported
+  ShouldNotReachHere();
+  return false;
+}
+
+GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
+  return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
+}
+
+GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
+  return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
+}
+
+void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
+  _heap.object_iterate(cl);
+}
+
+void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
+  _heap.object_iterate(cl);
+}
+
+HeapWord* ZCollectedHeap::block_start(const void* addr) const {
+  return (HeapWord*)_heap.block_start((uintptr_t)addr);
+}
+
+size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
+  size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
+  return ZUtils::bytes_to_words(size_in_bytes);
+}
+
+bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
+  return _heap.block_is_obj((uintptr_t)addr);
+}
+
+void ZCollectedHeap::register_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  ZNMethodTable::register_nmethod(nm);
+}
+
+void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  ZNMethodTable::unregister_nmethod(nm);
+}
+
+void ZCollectedHeap::verify_nmethod(nmethod* nm) {
+  // Does nothing
+}
+
+WorkGang* ZCollectedHeap::get_safepoint_workers() {
+  return _runtime_workers.workers();
+}
+
+jlong ZCollectedHeap::millis_since_last_gc() {
+  return ZStatCycle::time_since_last() / MILLIUNITS;
+}
+
+void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
+  tc->do_thread(_director);
+  tc->do_thread(_driver);
+  tc->do_thread(_stat);
+  _heap.worker_threads_do(tc);
+  _runtime_workers.threads_do(tc);
+}
+
+VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
+  const size_t capacity_in_words = capacity() / HeapWordSize;
+  const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
+  return VirtualSpaceSummary(reserved_region().start(),
+                             reserved_region().start() + capacity_in_words,
+                             reserved_region().start() + max_capacity_in_words);
+}
+
+void ZCollectedHeap::prepare_for_verify() {
+  // Does nothing
+}
+
+void ZCollectedHeap::print_on(outputStream* st) const {
+  _heap.print_on(st);
+}
+
+void ZCollectedHeap::print_on_error(outputStream* st) const {
+  CollectedHeap::print_on_error(st);
+
+  st->print_cr("Address Space");
+  st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
+  st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
+  st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
+  st->print_cr( "Heap");
+  st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
+  st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
+  st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
+  st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
+  st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
+  st->print_cr( "Metadata Bits");
+  st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
+  st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
+  st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
+  st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
+  st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
+}
+
+void ZCollectedHeap::print_extended_on(outputStream* st) const {
+  _heap.print_extended_on(st);
+}
+
+void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
+  _director->print_on(st);
+  st->cr();
+  _driver->print_on(st);
+  st->cr();
+  _stat->print_on(st);
+  st->cr();
+  _heap.print_worker_threads_on(st);
+  _runtime_workers.print_threads_on(st);
+}
+
+void ZCollectedHeap::print_tracing_info() const {
+  // Does nothing
+}
+
+void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
+  _heap.verify();
+}
+
+bool ZCollectedHeap::is_oop(oop object) const {
+  return CollectedHeap::is_oop(object) && _heap.is_oop(object);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
+#define SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/softRefPolicy.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zDirector.hpp"
+#include "gc/z/zDriver.hpp"
+#include "gc/z/zInitialize.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zRuntimeWorkers.hpp"
+#include "gc/z/zStat.hpp"
+
+class ZCollectedHeap : public CollectedHeap {
+  friend class VMStructs;
+
+private:
+  ZCollectorPolicy* _collector_policy;
+  SoftRefPolicy     _soft_ref_policy;
+  ZBarrierSet       _barrier_set;
+  ZInitialize       _initialize;
+  ZHeap             _heap;
+  ZDirector*        _director;
+  ZDriver*          _driver;
+  ZStat*            _stat;
+  ZRuntimeWorkers   _runtime_workers;
+
+  virtual HeapWord* allocate_new_tlab(size_t min_size,
+                                      size_t requested_size,
+                                      size_t* actual_size);
+
+public:
+  static ZCollectedHeap* heap();
+
+  using CollectedHeap::ensure_parsability;
+  using CollectedHeap::accumulate_statistics_all_tlabs;
+  using CollectedHeap::resize_all_tlabs;
+
+  ZCollectedHeap(ZCollectorPolicy* policy);
+  virtual Name kind() const;
+  virtual const char* name() const;
+  virtual jint initialize();
+  virtual void initialize_serviceability();
+  virtual void stop();
+
+  virtual CollectorPolicy* collector_policy() const;
+  virtual SoftRefPolicy* soft_ref_policy();
+
+  virtual size_t max_capacity() const;
+  virtual size_t capacity() const;
+  virtual size_t used() const;
+
+  virtual bool is_maximal_no_gc() const;
+  virtual bool is_scavengable(oop obj);
+  virtual bool is_in(const void* p) const;
+  virtual bool is_in_closed_subset(const void* p) const;
+
+  virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
+  virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
+                                                       size_t size,
+                                                       Metaspace::MetadataType mdtype);
+  virtual void collect(GCCause::Cause cause);
+  virtual void collect_as_vm_thread(GCCause::Cause cause);
+  virtual void do_full_collection(bool clear_all_soft_refs);
+
+  virtual bool supports_tlab_allocation() const;
+  virtual size_t tlab_capacity(Thread* thr) const;
+  virtual size_t tlab_used(Thread* thr) const;
+  virtual size_t max_tlab_size() const;
+  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
+
+  virtual bool can_elide_tlab_store_barriers() const;
+  virtual bool can_elide_initializing_store_barrier(oop new_obj);
+  virtual bool card_mark_must_follow_store() const;
+
+  virtual GrowableArray<GCMemoryManager*> memory_managers();
+  virtual GrowableArray<MemoryPool*> memory_pools();
+
+  virtual void object_iterate(ObjectClosure* cl);
+  virtual void safe_object_iterate(ObjectClosure* cl);
+
+  virtual HeapWord* block_start(const void* addr) const;
+  virtual size_t block_size(const HeapWord* addr) const;
+  virtual bool block_is_obj(const HeapWord* addr) const;
+
+  virtual void register_nmethod(nmethod* nm);
+  virtual void unregister_nmethod(nmethod* nm);
+  virtual void verify_nmethod(nmethod* nmethod);
+
+  virtual WorkGang* get_safepoint_workers();
+
+  virtual jlong millis_since_last_gc();
+
+  virtual void gc_threads_do(ThreadClosure* tc) const;
+
+  virtual VirtualSpaceSummary create_heap_space_summary();
+
+  virtual void print_on(outputStream* st) const;
+  virtual void print_on_error(outputStream* st) const;
+  virtual void print_extended_on(outputStream* st) const;
+  virtual void print_gc_threads_on(outputStream* st) const;
+  virtual void print_tracing_info() const;
+
+  virtual void prepare_for_verify();
+  virtual void verify(VerifyOption option /* ignored */);
+  virtual bool is_oop(oop object) const;
+};
+
+#endif // SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectorPolicy.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zGlobals.hpp"
+
+void ZCollectorPolicy::initialize_alignments() {
+  _space_alignment = ZPageSizeMin;
+  _heap_alignment = _space_alignment;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zCollectorPolicy.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
+#define SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
+
+#include "gc/shared/collectorPolicy.hpp"
+
+class ZCollectorPolicy : public CollectorPolicy {
+public:
+  virtual void initialize_alignments();
+};
+
+#endif // SHARE_GC_Z_ZCOLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDebug.gdb	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,147 @@
+#
+# GDB functions for debugging the Z Garbage Collector
+#
+
+printf "Loading zDebug.gdb\n"
+
+# Print Klass*
+define zpk
+    printf "Klass: %s\n", (char*)((Klass*)($arg0))->_name->_body
+end
+
+# Print oop
+define zpo
+    set $obj = (oopDesc*)($arg0)
+
+    printf "Oop:   0x%016llx\tState: ", (uintptr_t)$obj
+    if ((uintptr_t)$obj & (uintptr_t)ZAddressGoodMask)
+        printf "Good "
+        if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped)
+            printf "(Remapped)"
+        else
+            if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked)
+                printf "(Marked)"
+            else
+                printf "(Unknown)"
+            end
+        end
+    else
+        printf "Bad "
+        if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataMarked)
+            # Should be marked
+            if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped)
+                printf "(Not Marked, Remapped)"
+            else
+                printf "(Not Marked, Not Remapped)"
+            end
+        else
+            if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataRemapped)
+                # Should be remapped
+                if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked)
+                    printf "(Marked, Not Remapped)"
+                else
+                    printf "(Not Marked, Not Remapped)"
+                end
+            else
+                # Unknown
+                printf "(Unknown)"
+            end
+        end
+    end
+    printf "\t Page: %llu\n", ((uintptr_t)$obj & ZAddressOffsetMask) >> ZPageSizeMinShift
+    x/16gx $obj
+    printf "Mark:  0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$obj->_metadata->_klass->_name->_body
+end
+
+# Print heap page by pagetable index
+define zpp
+    set $page = (ZPage*)((uintptr_t)ZHeap::_heap._pagetable._map._map[($arg0)] & ~1)
+    printf "Page %p\n", $page
+    print *$page
+end
+
+# Print pagetable
+define zpt
+    printf "Pagetable (first 128 slots)\n"
+    x/128gx ZHeap::_heap._pagetable._map._map
+end
+
+# Print live map
+define __zmarked
+    set $livemap   = $arg0
+    set $bit        = $arg1
+    set $size       = $livemap._bitmap._size
+    set $segment    = $size / ZLiveMap::nsegments
+    set $segment_bit = 1 << $segment
+
+    printf "Segment is "
+    if !($livemap._segment_live_bits & $segment_bit)
+        printf "NOT "
+    end
+    printf "live (segment %d)\n", $segment
+
+    if $bit >= $size
+        print "Error: Bit %z out of bounds (bitmap size %z)\n", $bit, $size
+    else
+        set $word_index = $bit / 64
+        set $bit_index  = $bit % 64
+        set $word       = $livemap._bitmap._map[$word_index]
+        set $live_bit   = $word & (1 << $bit_index)
+
+        printf "Object is "
+        if $live_bit == 0
+            printf "NOT "
+        end
+        printf "live (word index %d, bit index %d)\n", $word_index, $bit_index
+    end
+end
+
+define zmarked
+    set $addr          = $arg0
+    set $obj           = ((uintptr_t)$addr & ZAddressOffsetMask)
+    set $page_index    = $obj >> ZPageSizeMinShift
+    set $page_entry    = (uintptr_t)ZHeap::_heap._pagetable._map._map[$page_index]
+    set $page          = (ZPage*)($page_entry & ~1)
+    set $page_start    = (uintptr_t)$page._virtual._start
+    set $page_end      = (uintptr_t)$page._virtual._end
+    set $page_seqnum   = $page._livemap._seqnum
+    set $global_seqnum = ZGlobalSeqNum
+
+    if $obj < $page_start || $obj >= $page_end
+        printf "Error: %p not in page %p (start %p, end %p)\n", $obj, $page, $page_start, $page_end
+    else
+        printf "Page is "
+        if $page_seqnum != $global_seqnum
+            printf "NOT "
+        end
+        printf "live (page %p, page seqnum %d, global seqnum %d)\n", $page, $page_seqnum, $global_seqnum
+
+        #if $page_seqnum == $global_seqnum
+            set $offset = $obj - $page_start
+            set $bit = $offset / 8
+            __zmarked $page._livemap $bit
+        #end
+    end
+end
+
+# Print heap information
+define zph
+    printf "Address Space\n"
+    printf "     Start:             0x%llx\n", ZAddressSpaceStart
+    printf "     End:               0x%llx\n", ZAddressSpaceEnd
+    printf "     Size:              %-15llu (0x%llx)\n", ZAddressSpaceSize, ZAddressSpaceSize
+    printf "Heap\n"
+    printf "     GlobalPhase:       %u\n", ZGlobalPhase
+    printf "     GlobalSeqNum:      %u\n", ZGlobalSeqNum
+    printf "     Offset Max:        %-15llu (0x%llx)\n", ZAddressOffsetMax, ZAddressOffsetMax
+    printf "     Page Size Small:   %-15llu (0x%llx)\n", ZPageSizeSmall, ZPageSizeSmall
+    printf "     Page Size Medium:  %-15llu (0x%llx)\n", ZPageSizeMedium, ZPageSizeMedium
+    printf "Metadata Bits\n"
+    printf "     Good:              0x%016llx\n", ZAddressGoodMask
+    printf "     Bad:               0x%016llx\n", ZAddressBadMask
+    printf "     WeakBad:           0x%016llx\n", ZAddressWeakBadMask
+    printf "     Marked:            0x%016llx\n", ZAddressMetadataMarked
+    printf "     Remapped:          0x%016llx\n", ZAddressMetadataRemapped
+end
+
+# End of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDirector.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zDirector.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zUtils.hpp"
+#include "logging/log.hpp"
+
+const double ZDirector::one_in_1000 = 3.290527;
+
+ZDirector::ZDirector() :
+    _metronome(ZStatAllocRate::sample_hz) {
+  set_name("ZDirector");
+  create_and_start();
+}
+
+void ZDirector::sample_allocation_rate() const {
+  // Sample allocation rate. This is needed by rule_allocation_rate()
+  // below to estimate the time we have until we run out of memory.
+  const double bytes_per_second = ZStatAllocRate::sample_and_reset();
+
+  log_debug(gc, alloc)("Allocation Rate: %.3fMB/s, Avg: %.3f(+/-%.3f)MB/s",
+                       bytes_per_second / M,
+                       ZStatAllocRate::avg() / M,
+                       ZStatAllocRate::avg_sd() / M);
+}
+
+bool ZDirector::is_first() const {
+  return ZStatCycle::ncycles() == 0;
+}
+
+bool ZDirector::is_warm() const {
+  return ZStatCycle::ncycles() >= 3;
+}
+
+bool ZDirector::rule_timer() const {
+  if (ZCollectionInterval == 0) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if timer has expired.
+  const double time_since_last_gc = ZStatCycle::time_since_last();
+  const double time_until_gc = ZCollectionInterval - time_since_last_gc;
+
+  log_debug(gc, director)("Rule: Timer, Interval: %us, TimeUntilGC: %.3lfs",
+                          ZCollectionInterval, time_until_gc);
+
+  return time_until_gc <= 0;
+}
+
+bool ZDirector::rule_warmup() const {
+  if (is_warm()) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if heap usage passes 10/20/30% and no other GC has been
+  // performed yet. This allows us to get some early samples of the GC
+  // duration, which is needed by the other rules.
+  const size_t max_capacity = ZHeap::heap()->max_capacity();
+  const size_t used = ZHeap::heap()->used();
+  const double used_threshold_percent = (ZStatCycle::ncycles() + 1) * 0.1;
+  const size_t used_threshold = max_capacity * used_threshold_percent;
+
+  log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB",
+                          used_threshold_percent * 100, used / M, used_threshold / M);
+
+  return used >= used_threshold;
+}
+
+bool ZDirector::rule_allocation_rate() const {
+  if (is_first()) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if the estimated max allocation rate indicates that we
+  // will run out of memory. The estimated max allocation rate is based
+  // on the moving average of the sampled allocation rate plus a safety
+  // margin based on variations in the allocation rate and unforseen
+  // allocation spikes.
+
+  // Calculate amount of free memory available to Java threads. Note that
+  // the heap reserve is not available to Java threads and is therefore not
+  // considered part of the free memory.
+  const size_t max_capacity = ZHeap::heap()->max_capacity();
+  const size_t max_reserve = ZHeap::heap()->max_reserve();
+  const size_t used = ZHeap::heap()->used();
+  const size_t free_with_reserve = max_capacity - used;
+  const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve);
+
+  // Calculate time until OOM given the max allocation rate and the amount
+  // of free memory. The allocation rate is a moving average and we multiply
+  // that with an alllcation spike tolerance factor to guard against unforseen
+  // phase changes in the allocate rate. We then add ~3.3 sigma to account for
+  // the allocation rate variance, which means the probablility is 1 in 1000
+  // that a sample is outside of the confidence interval.
+  const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::avg_sd() * one_in_1000);
+  const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero
+
+  // Calculate max duration of a GC cycle. The duration of GC is a moving
+  // average, we add ~3.3 sigma to account for the GC duration variance.
+  const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
+  const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
+
+  // Calculate time until GC given the time until OOM and max duration of GC.
+  // We also deduct the sample interval, so that we don't overshoot the target
+  // time and end up starting the GC too late in the next interval.
+  const double sample_interval = 1.0 / ZStatAllocRate::sample_hz;
+  const double time_until_gc = time_until_oom - max_duration_of_gc - sample_interval;
+
+  log_debug(gc, director)("Rule: Allocation Rate, MaxAllocRate: %.3lfMB/s, Free: " SIZE_FORMAT "MB, MaxDurationOfGC: %.3lfs, TimeUntilGC: %.3lfs",
+                          max_alloc_rate / M, free / M, max_duration_of_gc, time_until_gc);
+
+  return time_until_gc <= 0;
+}
+
+bool ZDirector::rule_proactive() const {
+  if (!ZProactive || !is_warm()) {
+    // Rule disabled
+    return false;
+  }
+
+  // Perform GC if the impact of doing so, in terms of application throughput
+  // reduction, is considered acceptable. This rule allows us to keep the heap
+  // size down and allow reference processing to happen even when we have a lot
+  // of free space on the heap.
+
+  // Only consider doing a proactive GC if the heap usage has grown by at least
+  // 10% of the max capacity since the previous GC, or more than 5 minutes has
+  // passed since the previous GC. This helps avoid superfluous GCs when running
+  // applications with very low allocation rate.
+  const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end();
+  const size_t used_increase_threshold = ZHeap::heap()->max_capacity() * 0.10; // 10%
+  const size_t used_threshold = used_after_last_gc + used_increase_threshold;
+  const size_t used = ZHeap::heap()->used();
+  const double time_since_last_gc = ZStatCycle::time_since_last();
+  const double time_since_last_gc_threshold = 5 * 60; // 5 minutes
+  if (used < used_threshold && time_since_last_gc < time_since_last_gc_threshold) {
+    // Don't even consider doing a proactive GC
+    log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3lfs",
+                            (used_threshold - used) / M,
+                            time_since_last_gc_threshold - time_since_last_gc);
+    return false;
+  }
+
+  const double assumed_throughput_drop_during_gc = 0.50; // 50%
+  const double acceptable_throughput_drop = 0.01;        // 1%
+  const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
+  const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
+  const double acceptable_gc_interval = max_duration_of_gc * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0);
+  const double time_until_gc = acceptable_gc_interval - time_since_last_gc;
+
+  log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3lfs, TimeSinceLastGC: %.3lfs, TimeUntilGC: %.3lfs",
+                          acceptable_gc_interval, time_since_last_gc, time_until_gc);
+
+  return time_until_gc <= 0;
+}
+
+GCCause::Cause ZDirector::make_gc_decision() const {
+  // Rule 0: Timer
+  if (rule_timer()) {
+    return GCCause::_z_timer;
+  }
+
+  // Rule 1: Warmup
+  if (rule_warmup()) {
+    return GCCause::_z_warmup;
+  }
+
+  // Rule 2: Allocation rate
+  if (rule_allocation_rate()) {
+    return GCCause::_z_allocation_rate;
+  }
+
+  // Rule 3: Proactive
+  if (rule_proactive()) {
+    return GCCause::_z_proactive;
+  }
+
+  // No GC
+  return GCCause::_no_gc;
+}
+
+void ZDirector::run_service() {
+  // Main loop
+  while (_metronome.wait_for_tick()) {
+    sample_allocation_rate();
+    const GCCause::Cause cause = make_gc_decision();
+    if (cause != GCCause::_no_gc) {
+      ZCollectedHeap::heap()->collect(cause);
+    }
+  }
+}
+
+void ZDirector::stop_service() {
+  _metronome.stop();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDirector.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZDIRECTOR_HPP
+#define SHARE_GC_Z_ZDIRECTOR_HPP
+
+#include "gc/shared/concurrentGCThread.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/z/zMetronome.hpp"
+
+class ZDirector : public ConcurrentGCThread {
+private:
+  static const double one_in_1000;
+
+  ZMetronome _metronome;
+
+  void sample_allocation_rate() const;
+
+  bool is_first() const;
+  bool is_warm() const;
+
+  bool rule_timer() const;
+  bool rule_warmup() const;
+  bool rule_allocation_rate() const;
+  bool rule_proactive() const;
+  GCCause::Cause make_gc_decision() const;
+
+protected:
+  virtual void run_service();
+  virtual void stop_service();
+
+public:
+  ZDirector();
+};
+
+#endif // SHARE_GC_Z_ZDIRECTOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDriver.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/vmGCOperations.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zDriver.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zMessagePort.inline.hpp"
+#include "gc/z/zServiceability.hpp"
+#include "gc/z/zStat.hpp"
+#include "logging/log.hpp"
+#include "runtime/vm_operations.hpp"
+#include "runtime/vmThread.hpp"
+
+static const ZStatPhaseCycle      ZPhaseCycle("Garbage Collection Cycle");
+static const ZStatPhasePause      ZPhasePauseMarkStart("Pause Mark Start");
+static const ZStatPhaseConcurrent ZPhaseConcurrentMark("Concurrent Mark");
+static const ZStatPhaseConcurrent ZPhaseConcurrentMarkContinue("Concurrent Mark Continue");
+static const ZStatPhasePause      ZPhasePauseMarkEnd("Pause Mark End");
+static const ZStatPhaseConcurrent ZPhaseConcurrentProcessNonStrongReferences("Concurrent Process Non-Strong References");
+static const ZStatPhaseConcurrent ZPhaseConcurrentResetRelocationSet("Concurrent Reset Relocation Set");
+static const ZStatPhaseConcurrent ZPhaseConcurrentDestroyDetachedPages("Concurrent Destroy Detached Pages");
+static const ZStatPhaseConcurrent ZPhaseConcurrentSelectRelocationSet("Concurrent Select Relocation Set");
+static const ZStatPhaseConcurrent ZPhaseConcurrentPrepareRelocationSet("Concurrent Prepare Relocation Set");
+static const ZStatPhasePause      ZPhasePauseRelocateStart("Pause Relocate Start");
+static const ZStatPhaseConcurrent ZPhaseConcurrentRelocated("Concurrent Relocate");
+static const ZStatCriticalPhase   ZCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */);
+static const ZStatSampler         ZSamplerJavaThreads("System", "Java Threads", ZStatUnitThreads);
+
+class ZOperationClosure : public StackObj {
+public:
+  virtual const char* name() const = 0;
+
+  virtual bool needs_inactive_gc_locker() const {
+    // An inactive GC locker is needed in operations where we change the good
+    // mask or move objects. Changing the good mask will invalidate all oops,
+    // which makes it conceptually the same thing as moving all objects.
+    return false;
+  }
+
+  virtual bool do_operation() = 0;
+};
+
+class VM_ZOperation : public VM_Operation {
+private:
+  ZOperationClosure* _cl;
+  uint               _gc_id;
+  bool               _gc_locked;
+  bool               _success;
+
+public:
+  VM_ZOperation(ZOperationClosure* cl) :
+      _cl(cl),
+      _gc_id(GCId::current()),
+      _gc_locked(false),
+      _success(false) {}
+
+  virtual VMOp_Type type() const {
+    return VMOp_ZOperation;
+  }
+
+  virtual const char* name() const {
+    return _cl->name();
+  }
+
+  virtual bool doit_prologue() {
+    Heap_lock->lock();
+    return true;
+  }
+
+  virtual void doit() {
+    assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+    ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads());
+
+    // JVMTI support
+    SvcGCMarker sgcm(SvcGCMarker::OTHER);
+
+    // Setup GC id
+    GCIdMark gcid(_gc_id);
+
+    if (_cl->needs_inactive_gc_locker() && GCLocker::check_active_before_gc()) {
+      // GC locker is active, bail out
+      _gc_locked = true;
+    } else {
+      // Execute operation
+      IsGCActiveMark mark;
+      _success = _cl->do_operation();
+    }
+  }
+
+  virtual void doit_epilogue() {
+    Heap_lock->unlock();
+  }
+
+  bool gc_locked() {
+    return _gc_locked;
+  }
+
+  bool success() const {
+    return _success;
+  }
+};
+
+class ZMarkStartClosure : public ZOperationClosure {
+public:
+  virtual const char* name() const {
+    return "ZMarkStart";
+  }
+
+  virtual bool needs_inactive_gc_locker() const {
+    return true;
+  }
+
+  virtual bool do_operation() {
+    ZStatTimer timer(ZPhasePauseMarkStart);
+    ZServiceabilityMarkStartTracer tracer;
+
+    ZCollectedHeap::heap()->increment_total_collections(true /* full */);
+
+    ZHeap::heap()->mark_start();
+    return true;
+  }
+};
+
+class ZMarkEndClosure : public ZOperationClosure {
+public:
+  virtual const char* name() const {
+    return "ZMarkEnd";
+  }
+
+  virtual bool do_operation() {
+    ZStatTimer timer(ZPhasePauseMarkEnd);
+    ZServiceabilityMarkEndTracer tracer;
+
+    return ZHeap::heap()->mark_end();
+  }
+};
+
+class ZRelocateStartClosure : public ZOperationClosure {
+public:
+  virtual const char* name() const {
+    return "ZRelocateStart";
+  }
+
+  virtual bool needs_inactive_gc_locker() const {
+    return true;
+  }
+
+  virtual bool do_operation() {
+    ZStatTimer timer(ZPhasePauseRelocateStart);
+    ZServiceabilityRelocateStartTracer tracer;
+
+    ZHeap::heap()->relocate_start();
+    return true;
+  }
+};
+
+ZDriver::ZDriver() :
+    _gc_cycle_port(),
+    _gc_locker_port() {
+  set_name("ZDriver");
+  create_and_start();
+}
+
+bool ZDriver::vm_operation(ZOperationClosure* cl) {
+  for (;;) {
+    VM_ZOperation op(cl);
+    VMThread::execute(&op);
+    if (op.gc_locked()) {
+      // Wait for GC to become unlocked and restart the VM operation
+      ZStatTimer timer(ZCriticalPhaseGCLockerStall);
+      _gc_locker_port.wait();
+      continue;
+    }
+
+    // Notify VM operation completed
+    _gc_locker_port.ack();
+
+    return op.success();
+  }
+}
+
+void ZDriver::collect(GCCause::Cause cause) {
+  switch (cause) {
+  case GCCause::_wb_young_gc:
+  case GCCause::_wb_conc_mark:
+  case GCCause::_wb_full_gc:
+  case GCCause::_dcmd_gc_run:
+  case GCCause::_java_lang_system_gc:
+  case GCCause::_full_gc_alot:
+  case GCCause::_scavenge_alot:
+  case GCCause::_jvmti_force_gc:
+  case GCCause::_metadata_GC_clear_soft_refs:
+    // Start synchronous GC
+    _gc_cycle_port.send_sync(cause);
+    break;
+
+  case GCCause::_z_timer:
+  case GCCause::_z_warmup:
+  case GCCause::_z_allocation_rate:
+  case GCCause::_z_allocation_stall:
+  case GCCause::_z_proactive:
+  case GCCause::_metadata_GC_threshold:
+    // Start asynchronous GC
+    _gc_cycle_port.send_async(cause);
+    break;
+
+  case GCCause::_gc_locker:
+    // Restart VM operation previously blocked by the GC locker
+    _gc_locker_port.signal();
+    break;
+
+  default:
+    // Other causes not supported
+    fatal("Unsupported GC cause (%s)", GCCause::to_string(cause));
+    break;
+  }
+}
+
+GCCause::Cause ZDriver::start_gc_cycle() {
+  // Wait for GC request
+  return _gc_cycle_port.receive();
+}
+
+class ZSoftReferencePolicyScope : public StackObj {
+private:
+  bool should_clear_soft_reference(GCCause::Cause cause) const {
+    const bool clear = ZCollectedHeap::heap()->soft_ref_policy()->should_clear_all_soft_refs();
+
+    // Clear all soft reference if the policy says so, or if
+    // the GC cause indicates that we're running low on memory.
+    return clear ||
+           cause == GCCause::_z_allocation_stall ||
+           cause == GCCause::_metadata_GC_clear_soft_refs;
+  }
+
+  void clear_should_clear_soft_reference() const {
+    ZCollectedHeap::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false);
+  }
+
+public:
+  ZSoftReferencePolicyScope(GCCause::Cause cause) {
+    const bool clear = should_clear_soft_reference(cause);
+    ZHeap::heap()->set_soft_reference_policy(clear);
+    clear_should_clear_soft_reference();
+  }
+
+  ~ZSoftReferencePolicyScope() {
+    Universe::update_heap_info_at_gc();
+  }
+};
+
+class ZDriverCycleScope : public StackObj {
+private:
+  GCIdMark                  _gc_id;
+  GCCauseSetter             _gc_cause_setter;
+  ZSoftReferencePolicyScope _soft_ref_policy;
+  ZStatTimer                _timer;
+
+  bool should_boost_worker_threads(GCCause::Cause cause) const {
+    return cause == GCCause::_java_lang_system_gc ||
+           cause == GCCause::_z_allocation_stall;
+  }
+
+public:
+  ZDriverCycleScope(GCCause::Cause cause) :
+      _gc_id(),
+      _gc_cause_setter(ZCollectedHeap::heap(), cause),
+      _soft_ref_policy(cause),
+      _timer(ZPhaseCycle) {
+    // Update statistics
+    ZStatCycle::at_start();
+
+    // Set boost mode
+    const bool boost = should_boost_worker_threads(cause);
+    ZHeap::heap()->set_boost_worker_threads(boost);
+  }
+
+  ~ZDriverCycleScope() {
+    // Calculate boost factor
+    const double boost_factor = (double)ZHeap::heap()->nconcurrent_worker_threads() /
+                                (double)ZHeap::heap()->nconcurrent_no_boost_worker_threads();
+
+    // Update statistics
+    ZStatCycle::at_end(boost_factor);
+  }
+};
+
+void ZDriver::run_gc_cycle(GCCause::Cause cause) {
+  ZDriverCycleScope scope(cause);
+
+  // Phase 1: Pause Mark Start
+  {
+    ZMarkStartClosure cl;
+    vm_operation(&cl);
+  }
+
+  // Phase 2: Concurrent Mark
+  {
+    ZStatTimer timer(ZPhaseConcurrentMark);
+    ZHeap::heap()->mark();
+  }
+
+  // Phase 3: Pause Mark End
+  {
+    ZMarkEndClosure cl;
+    while (!vm_operation(&cl)) {
+      // Phase 3.5: Concurrent Mark Continue
+      ZStatTimer timer(ZPhaseConcurrentMarkContinue);
+      ZHeap::heap()->mark();
+    }
+  }
+
+  // Phase 4: Concurrent Process Non-Strong References
+  {
+    ZStatTimer timer(ZPhaseConcurrentProcessNonStrongReferences);
+    ZHeap::heap()->process_non_strong_references();
+  }
+
+  // Phase 5: Concurrent Reset Relocation Set
+  {
+    ZStatTimer timer(ZPhaseConcurrentResetRelocationSet);
+    ZHeap::heap()->reset_relocation_set();
+  }
+
+  // Phase 6: Concurrent Destroy Detached Pages
+  {
+    ZStatTimer timer(ZPhaseConcurrentDestroyDetachedPages);
+    ZHeap::heap()->destroy_detached_pages();
+  }
+
+  // Phase 7: Concurrent Select Relocation Set
+  {
+    ZStatTimer timer(ZPhaseConcurrentSelectRelocationSet);
+    ZHeap::heap()->select_relocation_set();
+  }
+
+  // Phase 8: Prepare Relocation Set
+  {
+    ZStatTimer timer(ZPhaseConcurrentPrepareRelocationSet);
+    ZHeap::heap()->prepare_relocation_set();
+  }
+
+  // Phase 9: Pause Relocate Start
+  {
+    ZRelocateStartClosure cl;
+    vm_operation(&cl);
+  }
+
+  // Phase 10: Concurrent Relocate
+  {
+    ZStatTimer timer(ZPhaseConcurrentRelocated);
+    ZHeap::heap()->relocate();
+  }
+}
+
+void ZDriver::end_gc_cycle() {
+  // Notify GC cycle completed
+  _gc_cycle_port.ack();
+
+  // Check for out of memory condition
+  ZHeap::heap()->check_out_of_memory();
+}
+
+void ZDriver::run_service() {
+  // Main loop
+  while (!should_terminate()) {
+    const GCCause::Cause cause = start_gc_cycle();
+    if (cause != GCCause::_no_gc) {
+      run_gc_cycle(cause);
+      end_gc_cycle();
+    }
+  }
+}
+
+void ZDriver::stop_service() {
+  _gc_cycle_port.send_async(GCCause::_no_gc);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zDriver.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZDRIVER_HPP
+#define SHARE_GC_Z_ZDRIVER_HPP
+
+#include "gc/shared/concurrentGCThread.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/z/zMessagePort.hpp"
+
+class ZOperationClosure;
+
+class ZDriver : public ConcurrentGCThread {
+private:
+  ZMessagePort<GCCause::Cause> _gc_cycle_port;
+  ZRendezvousPort              _gc_locker_port;
+
+  bool vm_operation(ZOperationClosure* cl);
+
+  GCCause::Cause start_gc_cycle();
+  void run_gc_cycle(GCCause::Cause cause);
+  void end_gc_cycle();
+
+protected:
+  virtual void run_service();
+  virtual void stop_service();
+
+public:
+  ZDriver();
+
+  void collect(GCCause::Cause cause);
+};
+
+#endif // SHARE_GC_Z_ZDRIVER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zErrno.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zErrno.hpp"
+
+#include <errno.h>
+#include <string.h>
+
+ZErrno::ZErrno() :
+    _error(errno) {}
+
+ZErrno::ZErrno(int error) :
+    _error(error) {}
+
+ZErrno::operator bool() const {
+  return _error != 0;
+}
+
+bool ZErrno::operator==(int error) const {
+  return _error == error;
+}
+
+bool ZErrno::operator!=(int error) const {
+  return _error != error;
+}
+
+const char* ZErrno::to_string() const {
+  return strerror(_error);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zErrno.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZERRNO_HPP
+#define SHARE_GC_Z_ZERRNO_HPP
+
+#include "memory/allocation.hpp"
+
+class ZErrno : public StackObj {
+private:
+  const int _error;
+
+public:
+  ZErrno();
+  ZErrno(int error);
+
+  operator bool() const;
+  bool operator==(int error) const;
+  bool operator!=(int error) const;
+  const char* to_string() const;
+};
+
+#endif // SHARE_GC_Z_ZERRNO_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zForwardingTable.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zForwardingTable.inline.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "memory/allocation.inline.hpp"
+#include "utilities/debug.hpp"
+
+void ZForwardingTable::setup(size_t live_objects) {
+  assert(is_null(), "Should be empty");
+  assert(live_objects > 0, "Invalid size");
+
+  // Allocate table for linear probing. The size of the table must be
+  // a power of two to allow for quick and inexpensive indexing/masking.
+  // The table is sized to have a load factor of 50%, i.e. sized to have
+  // double the number of entries actuallly inserted.
+  _size = ZUtils::round_up_power_of_2(live_objects * 2);
+  _table = MallocArrayAllocator<ZForwardingTableEntry>::allocate(_size, mtGC);
+
+  // Clear table
+  memset(_table, ZForwardingTableEntry::empty(), _size * sizeof(ZForwardingTableEntry));
+}
+
+void ZForwardingTable::reset() {
+  // Free table
+  MallocArrayAllocator<ZForwardingTableEntry>::free(_table);
+  _table = NULL;
+  _size = 0;
+}
+
+void ZForwardingTable::verify(size_t object_max_count, size_t live_objects) const {
+  size_t count = 0;
+
+  for (size_t i = 0; i < _size; i++) {
+    const ZForwardingTableEntry entry = _table[i];
+    if (entry.is_empty()) {
+      // Skip empty entries
+      continue;
+    }
+
+    // Check from index
+    guarantee(entry.from_index() < object_max_count, "Invalid from index");
+
+    // Check for duplicates
+    for (size_t j = i + 1; j < _size; j++) {
+      const ZForwardingTableEntry other = _table[j];
+      guarantee(entry.from_index() != other.from_index(), "Duplicate from");
+      guarantee(entry.to_offset() != other.to_offset(), "Duplicate to");
+    }
+
+    count++;
+  }
+
+  // Check number of non-null entries
+  guarantee(live_objects == count, "Count mismatch");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zForwardingTable.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZFORWARDING_HPP
+#define SHARE_GC_Z_ZFORWARDING_HPP
+
+#include "gc/z/zForwardingTableEntry.hpp"
+#include "memory/allocation.hpp"
+
+typedef size_t ZForwardingTableCursor;
+
+class ZForwardingTable {
+  friend class VMStructs;
+  friend class ZForwardingTableTest;
+
+private:
+  ZForwardingTableEntry* _table;
+  size_t                 _size;
+
+  ZForwardingTableEntry at(ZForwardingTableCursor* cursor) const;
+  ZForwardingTableEntry first(uintptr_t from_index, ZForwardingTableCursor* cursor) const;
+  ZForwardingTableEntry next(ZForwardingTableCursor* cursor) const;
+
+public:
+  ZForwardingTable();
+  ~ZForwardingTable();
+
+  bool is_null() const;
+  void setup(size_t live_objects);
+  void reset();
+
+  ZForwardingTableEntry find(uintptr_t from_index) const;
+  ZForwardingTableEntry find(uintptr_t from_index, ZForwardingTableCursor* cursor) const;
+  uintptr_t insert(uintptr_t from_index, uintptr_t to_offset, ZForwardingTableCursor* cursor);
+
+  void verify(size_t object_max_count, size_t live_objects) const;
+};
+
+#endif // SHARE_GC_Z_ZFORWARDING_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zForwardingTable.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZFORWARDING_INLINE_HPP
+#define SHARE_GC_Z_ZFORWARDING_INLINE_HPP
+
+#include "gc/z/zForwardingTable.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHash.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+
+inline ZForwardingTable::ZForwardingTable() :
+    _table(NULL),
+    _size(0) {}
+
+inline ZForwardingTable::~ZForwardingTable() {
+  assert(is_null(), "Should be empty");
+}
+
+inline ZForwardingTableEntry ZForwardingTable::at(ZForwardingTableCursor* cursor) const {
+  return _table[*cursor];
+}
+
+inline ZForwardingTableEntry ZForwardingTable::first(uintptr_t from_index, ZForwardingTableCursor* cursor) const {
+  const size_t mask = _size - 1;
+  const size_t hash = ZHash::uint32_to_uint32((uint32_t)from_index);
+  *cursor = hash & mask;
+  return at(cursor);
+}
+
+inline ZForwardingTableEntry ZForwardingTable::next(ZForwardingTableCursor* cursor) const {
+  const size_t mask = _size - 1;
+  *cursor = (*cursor + 1) & mask;
+  return at(cursor);
+}
+
+inline bool ZForwardingTable::is_null() const {
+  return _table == NULL;
+}
+
+inline ZForwardingTableEntry ZForwardingTable::find(uintptr_t from_index) const {
+  ZForwardingTableCursor dummy;
+  return find(from_index, &dummy);
+}
+
+inline ZForwardingTableEntry ZForwardingTable::find(uintptr_t from_index, ZForwardingTableCursor* cursor) const {
+  // Reading entries in the table races with the atomic cas done for
+  // insertion into the table. This is safe because each entry is at
+  // most updated once (from -1 to something else).
+  ZForwardingTableEntry entry = first(from_index, cursor);
+  while (!entry.is_empty()) {
+    if (entry.from_index() == from_index) {
+      // Match found, return matching entry
+      return entry;
+    }
+
+    entry = next(cursor);
+  }
+
+  // Match not found, return empty entry
+  return entry;
+}
+
+inline uintptr_t ZForwardingTable::insert(uintptr_t from_index, uintptr_t to_offset, ZForwardingTableCursor* cursor) {
+  const ZForwardingTableEntry new_entry(from_index, to_offset);
+  const ZForwardingTableEntry old_entry; // empty
+
+  for (;;) {
+    const ZForwardingTableEntry prev_entry = Atomic::cmpxchg(new_entry, _table + *cursor, old_entry);
+    if (prev_entry.is_empty()) {
+      // Success
+      return to_offset;
+    }
+
+    // Find next empty or matching entry
+    ZForwardingTableEntry entry = at(cursor);
+    while (!entry.is_empty()) {
+      if (entry.from_index() == from_index) {
+        // Match found, return already inserted address
+        return entry.to_offset();
+      }
+
+      entry = next(cursor);
+    }
+  }
+}
+
+#endif // SHARE_GC_Z_ZFORWARDING_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zForwardingTableEntry.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZFORWARDINGTABLEENTRY_HPP
+#define SHARE_GC_Z_ZFORWARDINGTABLEENTRY_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
+
+//
+// Forwarding table entry layout
+// -----------------------------
+//
+//   6                      4 4                                             0
+//   3                      2 1                                             0
+//  +------------------------+-----------------------------------------------+
+//  |11111111 11111111 111111|11 11111111 11111111 11111111 11111111 11111111|
+//  +------------------------+-----------------------------------------------+
+//  |                        |
+//  |                        * 41-0 To Object Offset (42-bits)
+//  |
+//  * 63-42 From Object Index (22-bits)
+//
+
+class ZForwardingTableEntry {
+  friend struct PrimitiveConversions;
+
+private:
+  typedef ZBitField<uint64_t, size_t, 0,  42> field_to_offset;
+  typedef ZBitField<uint64_t, size_t, 42, 22> field_from_index;
+
+  uint64_t _entry;
+
+public:
+  ZForwardingTableEntry() :
+      _entry(empty()) {}
+
+  ZForwardingTableEntry(size_t from_index, size_t to_offset) :
+      _entry(field_from_index::encode(from_index) |
+             field_to_offset::encode(to_offset)) {}
+
+  static uintptr_t empty() {
+    return (uintptr_t)-1;
+  }
+
+  bool is_empty() const {
+    return _entry == empty();
+  }
+
+  size_t to_offset() const {
+    return field_to_offset::decode(_entry);
+  }
+
+  size_t from_index() const {
+    return field_from_index::decode(_entry);
+  }
+};
+
+// Needed to allow atomic operations on ZForwardingTableEntry
+template <>
+struct PrimitiveConversions::Translate<ZForwardingTableEntry> : public TrueType {
+  typedef ZForwardingTableEntry Value;
+  typedef uint64_t              Decayed;
+
+  static Decayed decay(Value v) {
+    return v._entry;
+  }
+
+  static Value recover(Decayed d) {
+    ZForwardingTableEntry entry;
+    entry._entry = d;
+    return entry;
+  }
+};
+
+#endif // SHARE_GC_Z_ZFORWARDINGTABLEENTRY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zFuture.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZFUTURE_HPP
+#define SHARE_GC_Z_ZFUTURE_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/semaphore.hpp"
+
+template <typename T>
+class ZFuture {
+private:
+  Semaphore _sema;
+  T         _value;
+
+public:
+  void set(T value);
+  T get();
+};
+
+#endif // SHARE_GC_Z_ZFUTURE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zFuture.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZFUTURE_INLINE_HPP
+#define SHARE_GC_Z_ZFUTURE_INLINE_HPP
+
+#include "gc/z/zFuture.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/semaphore.inline.hpp"
+#include "runtime/thread.hpp"
+
+template <typename T>
+inline void ZFuture<T>::set(T value) {
+  // Set value
+  _value = value;
+
+  // Notify waiter
+  _sema.signal();
+}
+
+template <typename T>
+inline T ZFuture<T>::get() {
+  // Wait for notification
+  Thread* const thread = Thread::current();
+  if (thread->is_Java_thread()) {
+    _sema.wait_with_safepoint_check((JavaThread*)thread);
+  } else {
+    _sema.wait();
+  }
+
+  // Return value
+  return _value;
+}
+
+#endif // SHARE_GC_Z_ZFUTURE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zGlobals.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
+
+uint32_t ZGlobalPhase                 = ZPhaseRelocate;
+uint32_t ZGlobalSeqNum                = 1;
+
+const int& ZObjectAlignmentSmallShift = LogMinObjAlignmentInBytes;
+const int& ZObjectAlignmentSmall      = MinObjAlignmentInBytes;
+
+uintptr_t ZAddressGoodMask;
+uintptr_t ZAddressBadMask             = 0;
+uintptr_t ZAddressWeakBadMask;
+
+uintptr_t ZAddressMetadataMarked;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zGlobals.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZGLOBALS_HPP
+#define SHARE_GC_Z_ZGLOBALS_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include OS_CPU_HEADER(gc/z/zGlobals)
+
+// Collector name
+const char* const ZGCName                       = "The Z Garbage Collector";
+
+// Global phase state
+extern uint32_t   ZGlobalPhase;
+const uint32_t    ZPhaseMark                    = 0;
+const uint32_t    ZPhaseMarkCompleted           = 1;
+const uint32_t    ZPhaseRelocate                = 2;
+
+// Global sequence number
+extern uint32_t   ZGlobalSeqNum;
+
+// Page types
+const uint8_t     ZPageTypeSmall                = 0;
+const uint8_t     ZPageTypeMedium               = 1;
+const uint8_t     ZPageTypeLarge                = 2;
+
+// Page size shifts
+const size_t      ZPageSizeSmallShift           = ZPlatformPageSizeSmallShift;
+const size_t      ZPageSizeMediumShift          = ZPageSizeSmallShift + 4;
+const size_t      ZPageSizeMinShift             = ZPageSizeSmallShift;
+
+// Page sizes
+const size_t      ZPageSizeSmall                = (size_t)1 << ZPageSizeSmallShift;
+const size_t      ZPageSizeMedium               = (size_t)1 << ZPageSizeMediumShift;
+const size_t      ZPageSizeMin                  = (size_t)1 << ZPageSizeMinShift;
+
+// Object size limits
+const size_t      ZObjectSizeLimitSmall         = (ZPageSizeSmall / 8);  // Allow 12.5% waste
+const size_t      ZObjectSizeLimitMedium        = (ZPageSizeMedium / 8); // Allow 12.5% waste
+
+// Object alignment shifts
+extern const int& ZObjectAlignmentSmallShift;
+const int         ZObjectAlignmentMediumShift   = ZPageSizeMediumShift - 13; // 8192 objects per page
+const int         ZObjectAlignmentLargeShift    = ZPageSizeSmallShift;
+
+// Object alignments
+extern const int& ZObjectAlignmentSmall;
+const int         ZObjectAlignmentMedium        = 1 << ZObjectAlignmentMediumShift;
+const int         ZObjectAlignmentLarge         = 1 << ZObjectAlignmentLargeShift;
+
+// Pointer part of address
+const uintptr_t   ZAddressOffsetShift           = 0;
+const uintptr_t   ZAddressOffsetBits            = ZPlatformAddressOffsetBits;
+const uintptr_t   ZAddressOffsetMask            = (((uintptr_t)1 << ZAddressOffsetBits) - 1) << ZAddressOffsetShift;
+const size_t      ZAddressOffsetMax             = (uintptr_t)1 << ZAddressOffsetBits;
+
+// Metadata part of address
+const uintptr_t   ZAddressMetadataShift         = ZPlatformAddressMetadataShift;
+const uintptr_t   ZAddressMetadataBits          = 4;
+const uintptr_t   ZAddressMetadataMask          = (((uintptr_t)1 << ZAddressMetadataBits) - 1) << ZAddressMetadataShift;
+
+// Metadata types
+const uintptr_t   ZAddressMetadataMarked0       = (uintptr_t)1 << (ZAddressMetadataShift + 0);
+const uintptr_t   ZAddressMetadataMarked1       = (uintptr_t)1 << (ZAddressMetadataShift + 1);
+const uintptr_t   ZAddressMetadataRemapped      = (uintptr_t)1 << (ZAddressMetadataShift + 2);
+const uintptr_t   ZAddressMetadataFinalizable   = (uintptr_t)1 << (ZAddressMetadataShift + 3);
+
+// Address space start/end/size
+const uintptr_t   ZAddressSpaceStart            = ZPlatformAddressSpaceStart;
+const uintptr_t   ZAddressSpaceSize             = ZPlatformAddressSpaceSize;
+const uintptr_t   ZAddressSpaceEnd              = ZAddressSpaceStart + ZAddressSpaceSize;
+
+// Cache line size
+const size_t      ZCacheLineSize                = ZPlatformCacheLineSize;
+
+// Reserved start/end
+uintptr_t ZAddressReservedStart();
+uintptr_t ZAddressReservedEnd();
+
+//
+// Good/Bad mask states
+// --------------------
+//
+//                 GoodMask         BadMask          WeakGoodMask     WeakBadMask
+//                 --------------------------------------------------------------
+//  Marked0        001              110              101              010
+//  Marked1        010              101              110              001
+//  Remapped       100              011              100              011
+//
+
+// Good/bad masks
+extern uintptr_t  ZAddressGoodMask;
+extern uintptr_t  ZAddressBadMask;
+extern uintptr_t  ZAddressWeakBadMask;
+
+// Marked state
+extern uintptr_t  ZAddressMetadataMarked;
+
+// Address space for mark stack allocations
+const size_t      ZMarkStackSpaceSizeShift      = 40; // 1TB
+const size_t      ZMarkStackSpaceSize           = (size_t)1 << ZMarkStackSpaceSizeShift;
+const uintptr_t   ZMarkStackSpaceStart          = ZAddressSpaceEnd + ZMarkStackSpaceSize;
+const uintptr_t   ZMarkStackSpaceEnd            = ZMarkStackSpaceStart + ZMarkStackSpaceSize;
+const size_t      ZMarkStackSpaceExpandSize     = (size_t)1 << 25; // 32M
+
+// Mark stack and magazine sizes
+const size_t      ZMarkStackSizeShift           = 11; // 2K
+const size_t      ZMarkStackSize                = (size_t)1 << ZMarkStackSizeShift;
+const size_t      ZMarkStackHeaderSize          = (size_t)1 << 4; // 16B
+const size_t      ZMarkStackSlots               = (ZMarkStackSize - ZMarkStackHeaderSize) / sizeof(uintptr_t);
+const size_t      ZMarkStackMagazineSize        = (size_t)1 << 15; // 32K
+const size_t      ZMarkStackMagazineSlots       = (ZMarkStackMagazineSize / ZMarkStackSize) - 1;
+
+// Mark stripe size
+const size_t      ZMarkStripeShift              = ZPageSizeMinShift;
+
+// Max number of mark stripes
+const size_t      ZMarkStripesMax               = 16; // Must be a power of two
+
+// Mark cache size
+const size_t      ZMarkCacheSize                = 1024; // Must be a power of two
+
+// Partial array minimum size
+const size_t      ZMarkPartialArrayMinSizeShift = 12; // 4K
+const size_t      ZMarkPartialArrayMinSize      = (size_t)1 << ZMarkPartialArrayMinSizeShift;
+
+// Max number of proactive/terminate flush attempts
+const size_t      ZMarkProactiveFlushMax        = 10;
+const size_t      ZMarkTerminateFlushMax        = 3;
+
+// Try complete mark timeout
+const uint64_t    ZMarkCompleteTimeout          = 1; // ms
+
+#endif // SHARE_GC_Z_ZGLOBALS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zHash.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZHASH_HPP
+#define SHARE_GC_Z_ZHASH_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class ZHash : public AllStatic {
+public:
+  static uint32_t uint32_to_uint32(uint32_t key);
+  static uint32_t address_to_uint32(uintptr_t key);
+};
+
+#endif // SHARE_GC_Z_ZHASH_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zHash.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZHASH_INLINE_HPP
+#define SHARE_GC_Z_ZHASH_INLINE_HPP
+
+#include "gc/z/zHash.hpp"
+
+inline uint32_t ZHash::uint32_to_uint32(uint32_t key) {
+  key = ~key + (key << 15);
+  key = key ^ (key >> 12);
+  key = key + (key << 2);
+  key = key ^ (key >> 4);
+  key = key * 2057;
+  key = key ^ (key >> 16);
+  return key;
+}
+
+inline uint32_t ZHash::address_to_uint32(uintptr_t key) {
+  return uint32_to_uint32((uint32_t)(key >> 3));
+}
+
+#endif // SHARE_GC_Z_ZHASH_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zHeap.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/oopStorage.hpp"
+#include "gc/z/zAddress.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zHeapIterator.hpp"
+#include "gc/z/zList.inline.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zMark.inline.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPageTable.inline.hpp"
+#include "gc/z/zRelocationSet.inline.hpp"
+#include "gc/z/zResurrection.hpp"
+#include "gc/z/zRootsIterator.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zThread.hpp"
+#include "gc/z/zTracer.inline.hpp"
+#include "gc/z/zVirtualMemory.inline.hpp"
+#include "gc/z/zWorkers.inline.hpp"
+#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+static const ZStatSampler  ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
+static const ZStatSampler  ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
+static const ZStatSampler  ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
+static const ZStatSampler  ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
+static const ZStatCounter  ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
+static const ZStatCounter  ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
+
+ZHeap* ZHeap::_heap = NULL;
+
+ZHeap::ZHeap() :
+    _workers(),
+    _object_allocator(_workers.nworkers()),
+    _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
+    _pagetable(),
+    _mark(&_workers, &_pagetable),
+    _reference_processor(&_workers),
+    _weak_roots_processor(&_workers),
+    _relocate(&_workers),
+    _relocation_set(),
+    _serviceability(heap_min_size(), heap_max_size()) {
+  // Install global heap instance
+  assert(_heap == NULL, "Already initialized");
+  _heap = this;
+
+  // Update statistics
+  ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
+}
+
+size_t ZHeap::heap_min_size() const {
+  const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
+  return MIN2(aligned_min_size, heap_max_size());
+}
+
+size_t ZHeap::heap_max_size() const {
+  const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
+  return MIN2(aligned_max_size, ZAddressOffsetMax);
+}
+
+size_t ZHeap::heap_max_reserve_size() const {
+  // Reserve one small page per worker plus one shared medium page. This is still just
+  // an estimate and doesn't guarantee that we can't run out of memory during relocation.
+  const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
+  return MIN2(max_reserve_size, heap_max_size());
+}
+
+bool ZHeap::is_initialized() const {
+  return _page_allocator.is_initialized();
+}
+
+size_t ZHeap::min_capacity() const {
+  return heap_min_size();
+}
+
+size_t ZHeap::max_capacity() const {
+  return _page_allocator.max_capacity();
+}
+
+size_t ZHeap::capacity() const {
+  return _page_allocator.capacity();
+}
+
+size_t ZHeap::max_reserve() const {
+  return _page_allocator.max_reserve();
+}
+
+size_t ZHeap::used_high() const {
+  return _page_allocator.used_high();
+}
+
+size_t ZHeap::used_low() const {
+  return _page_allocator.used_low();
+}
+
+size_t ZHeap::used() const {
+  return _page_allocator.used();
+}
+
+size_t ZHeap::allocated() const {
+  return _page_allocator.allocated();
+}
+
+size_t ZHeap::reclaimed() const {
+  return _page_allocator.reclaimed();
+}
+
+size_t ZHeap::tlab_capacity() const {
+  return capacity();
+}
+
+size_t ZHeap::tlab_used() const {
+  return _object_allocator.used();
+}
+
+size_t ZHeap::max_tlab_size() const {
+  return ZObjectSizeLimitSmall;
+}
+
+size_t ZHeap::unsafe_max_tlab_alloc() const {
+  size_t size = _object_allocator.remaining();
+
+  if (size < MinTLABSize) {
+    // The remaining space in the allocator is not enough to
+    // fit the smallest possible TLAB. This means that the next
+    // TLAB allocation will force the allocator to get a new
+    // backing page anyway, which in turn means that we can then
+    // fit the larges possible TLAB.
+    size = max_tlab_size();
+  }
+
+  return MIN2(size, max_tlab_size());
+}
+
+bool ZHeap::is_in(uintptr_t addr) const {
+  if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
+    return false;
+  }
+
+  const ZPage* const page = _pagetable.get(addr);
+  if (page != NULL) {
+    return page->is_in(addr);
+  }
+
+  return false;
+}
+
+uintptr_t ZHeap::block_start(uintptr_t addr) const {
+  const ZPage* const page = _pagetable.get(addr);
+  return page->block_start(addr);
+}
+
+size_t ZHeap::block_size(uintptr_t addr) const {
+  const ZPage* const page = _pagetable.get(addr);
+  return page->block_size(addr);
+}
+
+bool ZHeap::block_is_obj(uintptr_t addr) const {
+  const ZPage* const page = _pagetable.get(addr);
+  return page->block_is_obj(addr);
+}
+
+uint ZHeap::nconcurrent_worker_threads() const {
+  return _workers.nconcurrent();
+}
+
+uint ZHeap::nconcurrent_no_boost_worker_threads() const {
+  return _workers.nconcurrent_no_boost();
+}
+
+void ZHeap::set_boost_worker_threads(bool boost) {
+  _workers.set_boost(boost);
+}
+
+void ZHeap::worker_threads_do(ThreadClosure* tc) const {
+  _workers.threads_do(tc);
+}
+
+void ZHeap::print_worker_threads_on(outputStream* st) const {
+  _workers.print_threads_on(st);
+}
+
+void ZHeap::out_of_memory() {
+  ResourceMark rm;
+
+  ZStatInc(ZCounterOutOfMemory);
+  log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
+}
+
+ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
+  ZPage* const page = _page_allocator.alloc_page(type, size, flags);
+  if (page != NULL) {
+    // Update pagetable
+    _pagetable.insert(page);
+  }
+
+  return page;
+}
+
+void ZHeap::undo_alloc_page(ZPage* page) {
+  assert(page->is_allocating(), "Invalid page state");
+
+  ZStatInc(ZCounterUndoPageAllocation);
+  log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
+                ZThread::id(), ZThread::name(), p2i(page), page->size());
+
+  release_page(page, false /* reclaimed */);
+}
+
+bool ZHeap::retain_page(ZPage* page) {
+  return page->inc_refcount();
+}
+
+void ZHeap::release_page(ZPage* page, bool reclaimed) {
+  if (page->dec_refcount()) {
+    _page_allocator.free_page(page, reclaimed);
+  }
+}
+
+void ZHeap::flip_views() {
+  // For debugging only
+  if (ZUnmapBadViews) {
+    // Flip pages
+    ZPageTableIterator iter(&_pagetable);
+    for (ZPage* page; iter.next(&page);) {
+      if (!page->is_detached()) {
+        _page_allocator.flip_page(page);
+      }
+    }
+
+    // Flip pre-mapped memory
+    _page_allocator.flip_pre_mapped();
+  }
+}
+
+void ZHeap::mark_start() {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+  // Update statistics
+  ZStatSample(ZSamplerHeapUsedBeforeMark, used());
+
+  // Retire TLABs
+  _object_allocator.retire_tlabs();
+
+  // Flip address view
+  ZAddressMasks::flip_to_marked();
+  flip_views();
+
+  // Reset allocated/reclaimed/used statistics
+  _page_allocator.reset_statistics();
+
+  // Reset encountered/dropped/enqueued statistics
+  _reference_processor.reset_statistics();
+
+  // Enter mark phase
+  ZGlobalPhase = ZPhaseMark;
+
+  // Reset marking information and mark roots
+  _mark.start();
+
+  // Update statistics
+  ZStatHeap::set_at_mark_start(capacity(), used());
+}
+
+void ZHeap::mark() {
+  _mark.mark();
+}
+
+void ZHeap::mark_flush_and_free(Thread* thread) {
+  _mark.flush_and_free(thread);
+}
+
+class ZFixupPartialLoadsTask : public ZTask {
+private:
+  ZThreadRootsIterator _thread_roots;
+
+public:
+  ZFixupPartialLoadsTask() :
+      ZTask("ZFixupPartialLoadsTask"),
+      _thread_roots() {}
+
+  virtual void work() {
+    ZMarkRootOopClosure cl;
+    _thread_roots.oops_do(&cl);
+  }
+};
+
+void ZHeap::fixup_partial_loads() {
+  ZFixupPartialLoadsTask task;
+  _workers.run_parallel(&task);
+}
+
+bool ZHeap::mark_end() {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+  // C2 can generate code where a safepoint poll is inserted
+  // between a load and the associated load barrier. To handle
+  // this case we need to rescan the thread stack here to make
+  // sure such oops are marked.
+  fixup_partial_loads();
+
+  // Try end marking
+  if (!_mark.end()) {
+    // Marking not completed, continue concurrent mark
+    return false;
+  }
+
+  // Enter mark completed phase
+  ZGlobalPhase = ZPhaseMarkCompleted;
+
+  // Resize metaspace
+  MetaspaceGC::compute_new_size();
+
+  // Update statistics
+  ZStatSample(ZSamplerHeapUsedAfterMark, used());
+  ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
+
+  // Block resurrection of weak/phantom references
+  ZResurrection::block();
+
+  // Process weak roots
+  _weak_roots_processor.process_weak_roots();
+
+  // Verification
+  if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
+    Universe::verify();
+  }
+
+  return true;
+}
+
+void ZHeap::set_soft_reference_policy(bool clear) {
+  _reference_processor.set_soft_reference_policy(clear);
+}
+
+void ZHeap::process_non_strong_references() {
+  // Process Soft/Weak/Final/PhantomReferences
+  _reference_processor.process_references();
+
+  // Process concurrent weak roots
+  _weak_roots_processor.process_concurrent_weak_roots();
+
+  // Unblock resurrection of weak/phantom references
+  ZResurrection::unblock();
+
+  // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
+  // must be done after unblocking resurrection. Otherwise the
+  // Finalizer thread could call Reference.get() on the Finalizers
+  // that were just enqueued, which would incorrectly return null
+  // during the resurrection block window, since such referents
+  // are only Finalizable marked.
+  _reference_processor.enqueue_references();
+}
+
+void ZHeap::destroy_detached_pages() {
+  ZList<ZPage> list;
+
+  _page_allocator.flush_detached_pages(&list);
+
+  for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
+    // Remove pagetable entry
+    _pagetable.remove(page);
+
+    // Delete the page
+    _page_allocator.destroy_page(page);
+  }
+}
+
+void ZHeap::select_relocation_set() {
+  // Register relocatable pages with selector
+  ZRelocationSetSelector selector;
+  ZPageTableIterator iter(&_pagetable);
+  for (ZPage* page; iter.next(&page);) {
+    if (!page->is_relocatable()) {
+      // Not relocatable, don't register
+      continue;
+    }
+
+    if (page->is_marked()) {
+      // Register live page
+      selector.register_live_page(page);
+    } else {
+      // Register garbage page
+      selector.register_garbage_page(page);
+
+      // Reclaim page immediately
+      release_page(page, true /* reclaimed */);
+    }
+  }
+
+  // Select pages to relocate
+  selector.select(&_relocation_set);
+
+  // Update statistics
+  ZStatRelocation::set_at_select_relocation_set(selector.relocating());
+  ZStatHeap::set_at_select_relocation_set(selector.live(),
+                                          selector.garbage(),
+                                          reclaimed());
+}
+
+void ZHeap::prepare_relocation_set() {
+  ZRelocationSetIterator iter(&_relocation_set);
+  for (ZPage* page; iter.next(&page);) {
+    // Prepare for relocation
+    page->set_forwarding();
+
+    // Update pagetable
+    _pagetable.set_relocating(page);
+  }
+}
+
+void ZHeap::reset_relocation_set() {
+  ZRelocationSetIterator iter(&_relocation_set);
+  for (ZPage* page; iter.next(&page);) {
+    // Reset relocation information
+    page->reset_forwarding();
+
+    // Update pagetable
+    _pagetable.clear_relocating(page);
+  }
+}
+
+void ZHeap::relocate_start() {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+  // Update statistics
+  ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
+
+  // Flip address view
+  ZAddressMasks::flip_to_remapped();
+  flip_views();
+
+  // Remap TLABs
+  _object_allocator.remap_tlabs();
+
+  // Enter relocate phase
+  ZGlobalPhase = ZPhaseRelocate;
+
+  // Update statistics
+  ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
+
+  // Remap/Relocate roots
+  _relocate.start();
+}
+
+uintptr_t ZHeap::relocate_object(uintptr_t addr) {
+  assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
+  ZPage* const page = _pagetable.get(addr);
+  const bool retained = retain_page(page);
+  const uintptr_t new_addr = page->relocate_object(addr);
+  if (retained) {
+    release_page(page, true /* reclaimed */);
+  }
+
+  return new_addr;
+}
+
+uintptr_t ZHeap::forward_object(uintptr_t addr) {
+  assert(ZGlobalPhase == ZPhaseMark ||
+         ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
+  ZPage* const page = _pagetable.get(addr);
+  return page->forward_object(addr);
+}
+
+void ZHeap::relocate() {
+  // Relocate relocation set
+  const bool success = _relocate.relocate(&_relocation_set);
+
+  // Update statistics
+  ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
+  ZStatRelocation::set_at_relocate_end(success);
+  ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
+                                 used(), used_high(), used_low());
+}
+
+void ZHeap::object_iterate(ObjectClosure* cl) {
+  // Should only be called in a safepoint after mark end.
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+  ZHeapIterator iter;
+  iter.objects_do(cl);
+}
+
+void ZHeap::serviceability_initialize() {
+  _serviceability.initialize();
+}
+
+GCMemoryManager* ZHeap::serviceability_memory_manager() {
+  return _serviceability.memory_manager();
+}
+
+MemoryPool* ZHeap::serviceability_memory_pool() {
+  return _serviceability.memory_pool();
+}
+
+ZServiceabilityCounters* ZHeap::serviceability_counters() {
+  return _serviceability.counters();
+}
+
+void ZHeap::print_on(outputStream* st) const {
+  st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
+               used() / M,
+               capacity() / M,
+               max_capacity() / M);
+  MetaspaceUtils::print_on(st);
+}
+
+void ZHeap::print_extended_on(outputStream* st) const {
+  print_on(st);
+  st->cr();
+
+  ZPageTableIterator iter(&_pagetable);
+  for (ZPage* page; iter.next(&page);) {
+    page->print_on(st);
+  }
+
+  st->cr();
+}
+
+class ZVerifyRootsTask : public ZTask {
+private:
+  ZRootsIterator     _strong_roots;
+  ZWeakRootsIterator _weak_roots;
+
+public:
+  ZVerifyRootsTask() :
+      ZTask("ZVerifyRootsTask"),
+      _strong_roots(),
+      _weak_roots() {}
+
+  virtual void work() {
+    ZVerifyRootOopClosure cl;
+    _strong_roots.oops_do(&cl);
+    _weak_roots.oops_do(&cl);
+  }
+};
+
+void ZHeap::verify() {
+  // Heap verification can only be done between mark end and
+  // relocate start. This is the only window where all oop are
+  // good and the whole heap is in a consistent state.
+  guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
+
+  {
+    ZVerifyRootsTask task;
+    _workers.run_parallel(&task);
+  }
+
+  {
+    ZVerifyObjectClosure cl;
+    object_iterate(&cl);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zHeap.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZHEAP_HPP
+#define SHARE_GC_Z_ZHEAP_HPP
+
+#include "gc/shared/gcTimer.hpp"
+#include "gc/z/zAllocationFlags.hpp"
+#include "gc/z/zArray.hpp"
+#include "gc/z/zList.hpp"
+#include "gc/z/zLock.hpp"
+#include "gc/z/zMark.hpp"
+#include "gc/z/zObjectAllocator.hpp"
+#include "gc/z/zPage.hpp"
+#include "gc/z/zPageAllocator.hpp"
+#include "gc/z/zPageTable.hpp"
+#include "gc/z/zReferenceProcessor.hpp"
+#include "gc/z/zRelocate.hpp"
+#include "gc/z/zRelocationSet.hpp"
+#include "gc/z/zRelocationSetSelector.hpp"
+#include "gc/z/zRootsIterator.hpp"
+#include "gc/z/zWeakRootsProcessor.hpp"
+#include "gc/z/zServiceability.hpp"
+#include "gc/z/zWorkers.hpp"
+#include "memory/allocation.hpp"
+
+class ZHeap {
+  friend class VMStructs;
+
+private:
+  static ZHeap*       _heap;
+
+  ZWorkers            _workers;
+  ZObjectAllocator    _object_allocator;
+  ZPageAllocator      _page_allocator;
+  ZPageTable          _pagetable;
+  ZMark               _mark;
+  ZReferenceProcessor _reference_processor;
+  ZWeakRootsProcessor _weak_roots_processor;
+  ZRelocate           _relocate;
+  ZRelocationSet      _relocation_set;
+  ZServiceability     _serviceability;
+
+  size_t heap_min_size() const;
+  size_t heap_max_size() const;
+  size_t heap_max_reserve_size() const;
+
+  void out_of_memory();
+  void flip_views();
+  void fixup_partial_loads();
+
+public:
+  static ZHeap* heap();
+
+  ZHeap();
+
+  bool is_initialized() const;
+
+  // Heap metrics
+  size_t min_capacity() const;
+  size_t max_capacity() const;
+  size_t capacity() const;
+  size_t max_reserve() const;
+  size_t used_high() const;
+  size_t used_low() const;
+  size_t used() const;
+  size_t allocated() const;
+  size_t reclaimed() const;
+
+  size_t tlab_capacity() const;
+  size_t tlab_used() const;
+  size_t max_tlab_size() const;
+  size_t unsafe_max_tlab_alloc() const;
+
+  bool is_in(uintptr_t addr) const;
+
+  // Block
+  uintptr_t block_start(uintptr_t addr) const;
+  size_t block_size(uintptr_t addr) const;
+  bool block_is_obj(uintptr_t addr) const;
+
+  // Workers
+  uint nconcurrent_worker_threads() const;
+  uint nconcurrent_no_boost_worker_threads() const;
+  void set_boost_worker_threads(bool boost);
+  void worker_threads_do(ThreadClosure* tc) const;
+  void print_worker_threads_on(outputStream* st) const;
+
+  // Reference processing
+  ReferenceDiscoverer* reference_discoverer();
+  void set_soft_reference_policy(bool clear);
+
+  // Non-strong reference processing
+  void process_non_strong_references();
+
+  // Page allocation
+  ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
+  void undo_alloc_page(ZPage* page);
+  bool retain_page(ZPage* page);
+  void release_page(ZPage* page, bool reclaimed);
+
+  // Object allocation
+  uintptr_t alloc_tlab(size_t size);
+  uintptr_t alloc_object(size_t size);
+  uintptr_t alloc_object_for_relocation(size_t size);
+  void undo_alloc_object_for_relocation(uintptr_t addr, size_t size);
+  void check_out_of_memory();
+
+  // Marking
+  bool is_object_live(uintptr_t addr) const;
+  bool is_object_strongly_live(uintptr_t addr) const;
+  template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
+  void mark_start();
+  void mark();
+  void mark_flush_and_free(Thread* thread);
+  bool mark_end();
+
+  // Post-marking & Pre-relocation
+  void destroy_detached_pages();
+
+  // Relocation set
+  void select_relocation_set();
+  void prepare_relocation_set();
+  void reset_relocation_set();
+
+  // Relocation
+  bool is_relocating(uintptr_t addr) const;
+  void relocate_start();
+  uintptr_t relocate_object(uintptr_t addr);
+  uintptr_t forward_object(uintptr_t addr);
+  void relocate();
+
+  // Iteration
+  void object_iterate(ObjectClosure* cl);
+
+  // Serviceability
+  void serviceability_initialize();
+  GCMemoryManager* serviceability_memory_manager();
+  MemoryPool* serviceability_memory_pool();
+  ZServiceabilityCounters* serviceability_counters();
+
+  // Printing
+  void print_on(outputStream* st) const;
+  void print_extended_on(outputStream* st) const;
+
+  // Verification
+  bool is_oop(oop object) const;
+  void verify();
+};
+
+#endif // SHARE_GC_Z_ZHEAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zHeap.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZHEAP_INLINE_HPP
+#define SHARE_GC_Z_ZHEAP_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zMark.inline.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPageTable.inline.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "utilities/debug.hpp"
+
+inline ZHeap* ZHeap::heap() {
+  assert(_heap != NULL, "Not initialized");
+  return _heap;
+}
+
+inline ReferenceDiscoverer* ZHeap::reference_discoverer() {
+  return &_reference_processor;
+}
+
+inline bool ZHeap::is_relocating(uintptr_t addr) const {
+  return _pagetable.is_relocating(addr);
+}
+
+inline bool ZHeap::is_object_live(uintptr_t addr) const {
+  ZPage* page = _pagetable.get(addr);
+  return page->is_object_live(addr);
+}
+
+inline bool ZHeap::is_object_strongly_live(uintptr_t addr) const {
+  ZPage* page = _pagetable.get(addr);
+  return page->is_object_strongly_live(addr);
+}
+
+template <bool finalizable, bool publish>
+inline void ZHeap::mark_object(uintptr_t addr) {
+  assert(ZGlobalPhase == ZPhaseMark, "Mark not allowed");
+  _mark.mark_object<finalizable, publish>(addr);
+}
+
+inline uintptr_t ZHeap::alloc_tlab(size_t size) {
+  guarantee(size <= max_tlab_size(), "TLAB too large");
+  return _object_allocator.alloc_object(size);
+}
+
+inline uintptr_t ZHeap::alloc_object(size_t size) {
+  uintptr_t addr = _object_allocator.alloc_object(size);
+  assert(ZAddress::is_good_or_null(addr), "Bad address");
+
+  if (addr == 0) {
+    out_of_memory();
+  }
+
+  return addr;
+}
+
+inline uintptr_t ZHeap::alloc_object_for_relocation(size_t size) {
+  uintptr_t addr = _object_allocator.alloc_object_for_relocation(size);
+  assert(ZAddress::is_good_or_null(addr), "Bad address");
+  return addr;
+}
+
+inline void ZHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) {
+  ZPage* const page = _pagetable.get(addr);
+  _object_allocator.undo_alloc_object_for_relocation(page, addr, size);
+}
+
+inline void ZHeap::check_out_of_memory() {
+  _page_allocator.check_out_of_memory();
+}
+
+inline bool ZHeap::is_oop(oop object) const {
+  return ZOop::is_good(object);
+}
+
+#endif // SHARE_GC_Z_ZHEAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zHeapIterator.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddressRangeMap.inline.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeapIterator.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zRootsIterator.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "utilities/stack.inline.hpp"
+
+class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
+private:
+  CHeapBitMap _map;
+
+public:
+  ZHeapIteratorBitMap(size_t size_in_bits) :
+      _map(size_in_bits) {}
+
+  bool try_set_bit(size_t index) {
+    if (_map.at(index)) {
+      return false;
+    }
+
+    _map.set_bit(index);
+    return true;
+  }
+};
+
+class ZHeapIteratorRootOopClosure : public OopClosure {
+private:
+  ZHeapIterator* const _iter;
+  ObjectClosure* const _cl;
+
+public:
+  ZHeapIteratorRootOopClosure(ZHeapIterator* iter, ObjectClosure* cl) :
+      _iter(iter),
+      _cl(cl) {}
+
+  virtual void do_oop(oop* p) {
+    // Load barrier needed here for the same reason we
+    // need fixup_partial_loads() in ZHeap::mark_end()
+    const oop obj = RootAccess<>::oop_load(p);
+    _iter->push(obj);
+    _iter->drain(_cl);
+  }
+
+  virtual void do_oop(narrowOop* p) {
+    ShouldNotReachHere();
+  }
+};
+
+class ZHeapIteratorPushOopClosure : public ExtendedOopClosure {
+private:
+  ZHeapIterator* const _iter;
+  const oop            _base;
+
+public:
+  ZHeapIteratorPushOopClosure(ZHeapIterator* iter, oop base) :
+      _iter(iter),
+      _base(base) {}
+
+  void do_oop_nv(oop* p) {
+    const oop obj = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
+    _iter->push(obj);
+  }
+
+  void do_oop_nv(narrowOop* p) {
+    ShouldNotReachHere();
+  }
+
+  virtual void do_oop(oop* p) {
+    do_oop_nv(p);
+  }
+
+  virtual void do_oop(narrowOop* p) {
+    do_oop_nv(p);
+  }
+
+#ifdef ASSERT
+  virtual bool should_verify_oops() {
+    return false;
+  }
+#endif
+};
+
+ZHeapIterator::ZHeapIterator() :
+    _visit_stack(),
+    _visit_map() {}
+
+ZHeapIterator::~ZHeapIterator() {
+  ZVisitMapIterator iter(&_visit_map);
+  for (ZHeapIteratorBitMap* map; iter.next(&map);) {
+    delete map;
+  }
+}
+
+size_t ZHeapIterator::object_index_max() const {
+  return ZPageSizeMin >> ZObjectAlignmentSmallShift;
+}
+
+size_t ZHeapIterator::object_index(oop obj) const {
+  const uintptr_t addr = ZOop::to_address(obj);
+  const uintptr_t offset = ZAddress::offset(addr);
+  const uintptr_t mask = (1 << ZPageSizeMinShift) - 1;
+  return (offset & mask) >> ZObjectAlignmentSmallShift;
+}
+
+ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
+  const uintptr_t addr = ZOop::to_address(obj);
+  ZHeapIteratorBitMap* map = _visit_map.get(addr);
+  if (map == NULL) {
+    map = new ZHeapIteratorBitMap(object_index_max());
+    _visit_map.put(addr, map);
+  }
+
+  return map;
+}
+
+void ZHeapIterator::push(oop obj) {
+  if (obj == NULL) {
+    // Ignore
+    return;
+  }
+
+  ZHeapIteratorBitMap* const map = object_map(obj);
+  const size_t index = object_index(obj);
+  if (!map->try_set_bit(index)) {
+    // Already pushed
+    return;
+  }
+
+  // Push
+  _visit_stack.push(obj);
+}
+
+void ZHeapIterator::drain(ObjectClosure* cl) {
+  while (!_visit_stack.is_empty()) {
+    const oop obj = _visit_stack.pop();
+
+    // Visit
+    cl->do_object(obj);
+
+    // Push members to visit
+    ZHeapIteratorPushOopClosure push_cl(this, obj);
+    obj->oop_iterate(&push_cl);
+  }
+}
+
+void ZHeapIterator::objects_do(ObjectClosure* cl) {
+  ZHeapIteratorRootOopClosure root_cl(this, cl);
+  ZRootsIterator roots;
+
+  // Follow roots. Note that we also visit the JVMTI weak tag map
+  // as if they where strong roots to make sure we visit all tagged
+  // objects, even those that might now have become unreachable.
+  // If we didn't do this the user would have expected to see
+  // ObjectFree events for unreachable objects in the tag map.
+  roots.oops_do(&root_cl, true /* visit_jvmti_weak_export */);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zHeapIterator.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZHEAPITERATOR_HPP
+#define SHARE_GC_Z_ZHEAPITERATOR_HPP
+
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/stack.hpp"
+
+class ZHeapIteratorBitMap;
+
+class ZHeapIterator : public StackObj {
+  friend class ZHeapIteratorRootOopClosure;
+  friend class ZHeapIteratorPushOopClosure;
+
+private:
+  typedef ZAddressRangeMap<ZHeapIteratorBitMap*, ZPageSizeMinShift>         ZVisitMap;
+  typedef ZAddressRangeMapIterator<ZHeapIteratorBitMap*, ZPageSizeMinShift> ZVisitMapIterator;
+  typedef Stack<oop, mtGC>                                                  ZVisitStack;
+
+  ZVisitStack _visit_stack;
+  ZVisitMap   _visit_map;
+
+  size_t object_index_max() const;
+  size_t object_index(oop obj) const;
+  ZHeapIteratorBitMap* object_map(oop obj);
+
+  void push(oop obj);
+  void drain(ObjectClosure* cl);
+
+public:
+  ZHeapIterator();
+  ~ZHeapIterator();
+
+  void objects_do(ObjectClosure* cl);
+};
+
+#endif // SHARE_GC_Z_ZHEAPITERATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zInitialize.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zCPU.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zInitialize.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTracer.hpp"
+#include "logging/log.hpp"
+#include "runtime/vm_version.hpp"
+
+ZInitialize::ZInitialize(ZBarrierSet* barrier_set) {
+  log_info(gc, init)("Initializing %s", ZGCName);
+  log_info(gc, init)("Version: %s (%s)",
+                     Abstract_VM_Version::vm_release(),
+                     Abstract_VM_Version::jdk_debug_level());
+
+  // Early initialization
+  ZAddressMasks::initialize();
+  ZNUMA::initialize();
+  ZCPU::initialize();
+  ZStatValue::initialize();
+  ZTracer::initialize();
+  ZLargePages::initialize();
+  ZBarrierSet::set_barrier_set(barrier_set);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zInitialize.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZINITIALIZE_HPP
+#define SHARE_GC_Z_ZINITIALIZE_HPP
+
+#include "memory/allocation.hpp"
+
+class ZBarrierSet;
+
+class ZInitialize {
+public:
+  ZInitialize(ZBarrierSet* barrier_set);
+};
+
+#endif // SHARE_GC_Z_ZINITIALIZE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLargePages.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "logging/log.hpp"
+#include "runtime/os.hpp"
+
+ZLargePages::State ZLargePages::_state;
+
+void ZLargePages::initialize() {
+  initialize_platform();
+
+  log_info(gc, init)("Memory: " JULONG_FORMAT "M", os::physical_memory() / M);
+  log_info(gc, init)("Large Page Support: %s", to_string());
+}
+
+const char* ZLargePages::to_string() {
+  switch (_state) {
+  case Explicit:
+    return "Enabled (Explicit)";
+
+  case Transparent:
+    return "Enabled (Transparent)";
+
+  default:
+    return "Disabled";
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLargePages.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLARGEPAGES_HPP
+#define SHARE_GC_Z_ZLARGEPAGES_HPP
+
+#include "memory/allocation.hpp"
+
+class ZLargePages : public AllStatic {
+private:
+  enum State {
+    Disabled,
+    Explicit,
+    Transparent
+  };
+
+  static State _state;
+
+  static void initialize_platform();
+
+public:
+  static void initialize();
+
+  static bool is_enabled();
+  static bool is_explicit();
+  static bool is_transparent();
+
+  static const char* to_string();
+};
+
+#endif // SHARE_GC_Z_ZLARGEPAGES_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLargePages.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLARGEPAGES_INLINE_HPP
+#define SHARE_GC_Z_ZLARGEPAGES_INLINE_HPP
+
+#include "gc/z/zLargePages.hpp"
+
+inline bool ZLargePages::is_enabled() {
+  return _state != Disabled;
+}
+
+inline bool ZLargePages::is_explicit() {
+  return _state == Explicit;
+}
+
+inline bool ZLargePages::is_transparent() {
+  return _state == Transparent;
+}
+
+#endif // SHARE_GC_Z_ZLARGEPAGES_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zList.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLIST_HPP
+#define SHARE_GC_Z_ZLIST_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+template <typename T> class ZList;
+
+// Element in a double linked list
+template <typename T>
+class ZListNode {
+  friend class ZList<T>;
+
+private:
+  ZListNode* _next;
+  ZListNode* _prev;
+
+  ZListNode(ZListNode* next, ZListNode* prev) :
+      _next(next),
+      _prev(prev) {}
+
+  void set_unused() {
+    _next = NULL;
+    _prev = NULL;
+  }
+
+public:
+  ZListNode() {
+    set_unused();
+  }
+
+  ~ZListNode() {
+    set_unused();
+  }
+
+  bool is_unused() const {
+    return _next == NULL && _prev == NULL;
+  }
+};
+
+// Double-linked list
+template <typename T>
+class ZList {
+private:
+  ZListNode<T> _head;
+  size_t       _size;
+
+  // Passing by value and assignment is not allowed
+  ZList(const ZList<T>& list);
+  ZList<T>& operator=(const ZList<T>& list);
+
+  void verify() const {
+    assert(_head._next->_prev == &_head, "List corrupt");
+    assert(_head._prev->_next == &_head, "List corrupt");
+  }
+
+  void insert(ZListNode<T>* before, ZListNode<T>* node) {
+    verify();
+
+    assert(node->is_unused(), "Already in a list");
+    node->_prev = before;
+    node->_next = before->_next;
+    before->_next = node;
+    node->_next->_prev = node;
+
+    _size++;
+  }
+
+  ZListNode<T>* cast_to_inner(T* elem) const {
+    return &elem->_node;
+  }
+
+  T* cast_to_outer(ZListNode<T>* node) const {
+    return (T*)((uintptr_t)node - offset_of(T, _node));
+  }
+
+public:
+  ZList() :
+      _head(&_head, &_head),
+      _size(0) {
+    verify();
+  }
+
+  size_t size() const {
+    verify();
+    return _size;
+  }
+
+  bool is_empty() const {
+    return _size == 0;
+  }
+
+  T* first() const {
+    return is_empty() ? NULL : cast_to_outer(_head._next);
+  }
+
+  T* last() const {
+    return is_empty() ? NULL : cast_to_outer(_head._prev);
+  }
+
+  T* next(T* elem) const {
+    verify();
+    ZListNode<T>* next = cast_to_inner(elem)->_next;
+    return (next == &_head) ? NULL : cast_to_outer(next);
+  }
+
+  T* prev(T* elem) const {
+    verify();
+    ZListNode<T>* prev = cast_to_inner(elem)->_prev;
+    return (prev == &_head) ? NULL : cast_to_outer(prev);
+  }
+
+  void insert_first(T* elem) {
+    insert(&_head, cast_to_inner(elem));
+  }
+
+  void insert_last(T* elem) {
+    insert(_head._prev, cast_to_inner(elem));
+  }
+
+  void insert_before(T* before, T* elem) {
+    insert(cast_to_inner(before)->_prev, cast_to_inner(elem));
+  }
+
+  void insert_after(T* after, T* elem) {
+    insert(cast_to_inner(after), cast_to_inner(elem));
+  }
+
+  void remove(T* elem) {
+    verify();
+
+    ZListNode<T>* const node = cast_to_inner(elem);
+    assert(!node->is_unused(), "Not in a list");
+
+    ZListNode<T>* const next = node->_next;
+    ZListNode<T>* const prev = node->_prev;
+    assert(next->_prev == node, "List corrupt");
+    assert(prev->_next == node, "List corrupt");
+
+    prev->_next = next;
+    next->_prev = prev;
+    node->set_unused();
+
+    _size--;
+  }
+
+  T* remove_first() {
+    T* elem = first();
+    if (elem != NULL) {
+      remove(elem);
+    }
+
+    return elem;
+  }
+
+  T* remove_last() {
+    T* elem = last();
+    if (elem != NULL) {
+      remove(elem);
+    }
+
+    return elem;
+  }
+
+  void transfer(ZList<T>* list) {
+    verify();
+
+    if (!list->is_empty()) {
+      list->_head._next->_prev = _head._prev;
+      list->_head._prev->_next = _head._prev->_next;
+
+      _head._prev->_next = list->_head._next;
+      _head._prev = list->_head._prev;
+
+      list->_head._next = &list->_head;
+      list->_head._prev = &list->_head;
+
+      _size += list->_size;
+      list->_size = 0;
+
+      list->verify();
+      verify();
+    }
+  }
+};
+
+template <typename T, bool forward>
+class ZListIteratorImpl : public StackObj {
+private:
+  ZList<T>* const _list;
+  T*              _next;
+
+public:
+  ZListIteratorImpl(ZList<T>* list);
+
+  bool next(T** elem);
+};
+
+// Iterator types
+#define ZLIST_FORWARD        true
+#define ZLIST_REVERSE        false
+
+template <typename T>
+class ZListIterator : public ZListIteratorImpl<T, ZLIST_FORWARD> {
+public:
+  ZListIterator(ZList<T>* list) :
+      ZListIteratorImpl<T, ZLIST_FORWARD>(list) {}
+};
+
+template <typename T>
+class ZListReverseIterator : public ZListIteratorImpl<T, ZLIST_REVERSE> {
+public:
+  ZListReverseIterator(ZList<T>* list) :
+      ZListIteratorImpl<T, ZLIST_REVERSE>(list) {}
+};
+
+#endif // SHARE_GC_Z_ZLIST_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zList.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLIST_INLINE_HPP
+#define SHARE_GC_Z_ZLIST_INLINE_HPP
+
+#include "gc/z/zList.hpp"
+
+template <typename T, bool forward>
+ZListIteratorImpl<T, forward>::ZListIteratorImpl(ZList<T>* list) :
+    _list(list),
+    _next(forward ? list->first() : list->last()) {}
+
+template <typename T, bool forward>
+bool ZListIteratorImpl<T, forward>::next(T** elem) {
+  if (_next != NULL) {
+    *elem = _next;
+    _next = forward ? _list->next(_next) : _list->prev(_next);
+    return true;
+  }
+
+  // No more elements
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZLIST_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zLiveMap.inline.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zThread.hpp"
+#include "logging/log.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "utilities/debug.hpp"
+
+static const ZStatCounter ZCounterMarkSeqNumResetContention("Contention", "Mark SeqNum Reset Contention", ZStatUnitOpsPerSecond);
+static const ZStatCounter ZCounterMarkSegmentResetContention("Contention", "Mark Segment Reset Contention", ZStatUnitOpsPerSecond);
+
+ZLiveMap::ZLiveMap(uint32_t size) :
+    _seqnum(0),
+    _live_objects(0),
+    _live_bytes(0),
+    _segment_live_bits(0),
+    _segment_claim_bits(0),
+    // We need at least one bit per segment.
+    _bitmap(MAX2<size_t>(size, nsegments) * 2),
+    _shift(exact_log2(segment_size())) {}
+
+void ZLiveMap::reset(size_t index) {
+  const uint32_t seqnum_initializing = (uint32_t)-1;
+  bool contention = false;
+
+  // Multiple threads can enter here, make sure only one of them
+  // resets the marking information while the others busy wait.
+  for (uint32_t seqnum = _seqnum; seqnum != ZGlobalSeqNum; seqnum = _seqnum) {
+    if ((seqnum != seqnum_initializing) &&
+        (Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) {
+      // Reset marking information
+      _live_bytes = 0;
+      _live_objects = 0;
+
+      // Clear segment claimed/live bits
+      segment_live_bits().clear();
+      segment_claim_bits().clear();
+
+      // Make sure the newly reset marking information is
+      // globally visible before updating the page seqnum.
+      OrderAccess::storestore();
+
+      // Update seqnum
+      assert(_seqnum == seqnum_initializing, "Invalid");
+      _seqnum = ZGlobalSeqNum;
+      break;
+    }
+
+    // Mark reset contention
+    if (!contention) {
+      // Count contention once, not every loop
+      ZStatInc(ZCounterMarkSeqNumResetContention);
+      contention = true;
+
+      log_trace(gc)("Mark seqnum reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", bit: " SIZE_FORMAT,
+                    ZThread::id(), ZThread::name(), p2i(this), index);
+    }
+  }
+}
+
+void ZLiveMap::reset_segment(BitMap::idx_t segment) {
+  bool contention = false;
+
+  if (!claim_segment(segment)) {
+    // Already claimed, wait for live bit to be set
+    while (!is_segment_live(segment)) {
+      // Busy wait. The loadload barrier is needed to make
+      // sure we re-read the live bit every time we loop.
+      OrderAccess::loadload();
+
+      // Mark reset contention
+      if (!contention) {
+        // Count contention once, not every loop
+        ZStatInc(ZCounterMarkSegmentResetContention);
+        contention = true;
+
+        log_trace(gc)("Mark segment reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", segment: " SIZE_FORMAT,
+                      ZThread::id(), ZThread::name(), p2i(this), segment);
+      }
+    }
+
+    // Segment is live
+    return;
+  }
+
+  // Segment claimed, clear it
+  const BitMap::idx_t start_index = segment_start(segment);
+  const BitMap::idx_t end_index   = segment_end(segment);
+  if (segment_size() / BitsPerWord >= 32) {
+    _bitmap.clear_large_range(start_index, end_index);
+  } else {
+    _bitmap.clear_range(start_index, end_index);
+  }
+
+  // Set live bit
+  const bool success = set_segment_live_atomic(segment);
+  assert(success, "Should never fail");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLIVEMAP_HPP
+#define SHARE_GC_Z_ZLIVEMAP_HPP
+
+#include "gc/z/zBitMap.hpp"
+#include "memory/allocation.hpp"
+
+class ObjectClosure;
+
+class ZLiveMap {
+  friend class ZLiveMapTest;
+
+private:
+  static const size_t nsegments = 64;
+
+  volatile uint32_t _seqnum;              // Mark sequence number
+  volatile uint32_t _live_objects;        // Number of live objects
+  volatile size_t   _live_bytes;          // Number of live bytes
+  BitMap::bm_word_t _segment_live_bits;   // Segment live bits
+  BitMap::bm_word_t _segment_claim_bits;  // Segment claim bits
+  ZBitMap           _bitmap;              // Mark bitmap
+  const size_t      _shift;               // Segment shift
+
+  const BitMapView segment_live_bits() const;
+  const BitMapView segment_claim_bits() const;
+
+  BitMapView segment_live_bits();
+  BitMapView segment_claim_bits();
+
+  BitMap::idx_t segment_size() const;
+
+  BitMap::idx_t segment_start(BitMap::idx_t segment) const;
+  BitMap::idx_t segment_end(BitMap::idx_t segment) const;
+
+  bool is_segment_live(BitMap::idx_t segment) const;
+  bool set_segment_live_atomic(BitMap::idx_t segment);
+
+  BitMap::idx_t first_live_segment() const;
+  BitMap::idx_t next_live_segment(BitMap::idx_t segment) const;
+  BitMap::idx_t index_to_segment(BitMap::idx_t index) const;
+
+  bool claim_segment(BitMap::idx_t segment);
+
+  void reset(size_t index);
+  void reset_segment(BitMap::idx_t segment);
+
+  void iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift);
+
+public:
+  ZLiveMap(uint32_t size);
+
+  void reset();
+
+  bool is_marked() const;
+
+  uint32_t live_objects() const;
+  size_t live_bytes() const;
+
+  bool get(size_t index) const;
+  bool set_atomic(size_t index, bool finalizable, bool& inc_live);
+
+  void inc_live_atomic(uint32_t objects, size_t bytes);
+
+  void iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift);
+};
+
+#endif // SHARE_GC_Z_ZLIVEMAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLIVEMAP_INLINE_HPP
+#define SHARE_GC_Z_ZLIVEMAP_INLINE_HPP
+
+#include "gc/z/zBitMap.inline.hpp"
+#include "gc/z/zLiveMap.hpp"
+#include "gc/z/zMark.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "utilities/debug.hpp"
+
+inline void ZLiveMap::reset() {
+  _seqnum = 0;
+}
+
+inline bool ZLiveMap::is_marked() const {
+  return _seqnum == ZGlobalSeqNum;
+}
+
+inline uint32_t ZLiveMap::live_objects() const {
+  assert(ZGlobalPhase != ZPhaseMark, "Invalid phase");
+  return _live_objects;
+}
+
+inline size_t ZLiveMap::live_bytes() const {
+  assert(ZGlobalPhase != ZPhaseMark, "Invalid phase");
+  return _live_bytes;
+}
+
+inline const BitMapView ZLiveMap::segment_live_bits() const {
+  return BitMapView(const_cast<BitMap::bm_word_t*>(&_segment_live_bits), nsegments);
+}
+
+inline const BitMapView ZLiveMap::segment_claim_bits() const {
+  return BitMapView(const_cast<BitMap::bm_word_t*>(&_segment_claim_bits), nsegments);
+}
+
+inline BitMapView ZLiveMap::segment_live_bits() {
+  return BitMapView(&_segment_live_bits, nsegments);
+}
+
+inline BitMapView ZLiveMap::segment_claim_bits() {
+  return BitMapView(&_segment_claim_bits, nsegments);
+}
+
+inline bool ZLiveMap::is_segment_live(BitMap::idx_t segment) const {
+  return segment_live_bits().at(segment);
+}
+
+inline bool ZLiveMap::set_segment_live_atomic(BitMap::idx_t segment) {
+  return segment_live_bits().par_set_bit(segment);
+}
+
+inline bool ZLiveMap::claim_segment(BitMap::idx_t segment) {
+  return segment_claim_bits().par_set_bit(segment);
+}
+
+inline BitMap::idx_t ZLiveMap::first_live_segment() const {
+  return segment_live_bits().get_next_one_offset(0, nsegments);
+}
+
+inline BitMap::idx_t ZLiveMap::next_live_segment(BitMap::idx_t segment) const {
+  return segment_live_bits().get_next_one_offset(segment + 1, nsegments);
+}
+
+inline BitMap::idx_t ZLiveMap::segment_size() const {
+  return _bitmap.size() / nsegments;
+}
+
+inline BitMap::idx_t ZLiveMap::index_to_segment(BitMap::idx_t index) const {
+  return index >> _shift;
+}
+
+inline bool ZLiveMap::get(size_t index) const {
+  BitMap::idx_t segment = index_to_segment(index);
+  return is_marked() &&               // Page is marked
+         is_segment_live(segment) &&  // Segment is marked
+         _bitmap.at(index);          // Object is marked
+}
+
+inline bool ZLiveMap::set_atomic(size_t index, bool finalizable, bool& inc_live) {
+  if (!is_marked()) {
+    // First object to be marked during this
+    // cycle, reset marking information.
+    reset(index);
+  }
+
+  const BitMap::idx_t segment = index_to_segment(index);
+  if (!is_segment_live(segment)) {
+    // First object to be marked in this segment during
+    // this cycle, reset segment bitmap.
+    reset_segment(segment);
+  }
+
+  return _bitmap.par_set_bit_pair(index, finalizable, inc_live);
+}
+
+inline void ZLiveMap::inc_live_atomic(uint32_t objects, size_t bytes) {
+  Atomic::add(objects, &_live_objects);
+  Atomic::add(bytes, &_live_bytes);
+}
+
+inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const {
+  return segment_size() * segment;
+}
+
+inline BitMap::idx_t ZLiveMap::segment_end(BitMap::idx_t segment) const {
+  return segment_start(segment) + segment_size();
+}
+
+inline void ZLiveMap::iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift) {
+  assert(is_segment_live(segment), "Must be");
+
+  const BitMap::idx_t start_index = segment_start(segment);
+  const BitMap::idx_t end_index   = segment_end(segment);
+  BitMap::idx_t index = _bitmap.get_next_one_offset(start_index, end_index);
+
+  while (index < end_index) {
+    // Calculate object address
+    const uintptr_t addr = page_start + ((index / 2) << page_object_alignment_shift);
+
+    // Apply closure
+    cl->do_object(ZOop::to_oop(addr));
+
+    // Find next bit after this object
+    const size_t size = ZUtils::object_size(addr);
+    const uintptr_t next_addr = align_up(addr + size, 1 << page_object_alignment_shift);
+    const BitMap::idx_t next_index = ((next_addr - page_start) >> page_object_alignment_shift) * 2;
+    if (next_index >= end_index) {
+      // End of live map
+      break;
+    }
+
+    index = _bitmap.get_next_one_offset(next_index, end_index);
+  }
+}
+
+inline void ZLiveMap::iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift) {
+  if (is_marked()) {
+    for (BitMap::idx_t segment = first_live_segment(); segment < nsegments; segment = next_live_segment(segment)) {
+      // For each live segment
+      iterate_segment(cl, segment, page_start, page_object_alignment_shift);
+    }
+  }
+}
+
+#endif // SHARE_GC_Z_ZLIVEMAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLock.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLOCK_HPP
+#define SHARE_GC_Z_ZLOCK_HPP
+
+#include "memory/allocation.hpp"
+#include <pthread.h>
+
+class ZLock {
+private:
+  pthread_mutex_t _lock;
+
+public:
+  ZLock();
+
+  void lock();
+  bool try_lock();
+  void unlock();
+};
+
+class ZLocker : public StackObj {
+private:
+  ZLock* const _lock;
+
+public:
+  ZLocker(ZLock* lock);
+  ~ZLocker();
+};
+
+#endif // SHARE_GC_Z_ZLOCK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zLock.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZLOCK_INLINE_HPP
+#define SHARE_GC_Z_ZLOCK_INLINE_HPP
+
+#include "gc/z/zLock.hpp"
+
+inline ZLock::ZLock() {
+  pthread_mutex_init(&_lock, NULL);
+}
+
+inline void ZLock::lock() {
+  pthread_mutex_lock(&_lock);
+}
+
+inline bool ZLock::try_lock() {
+  return pthread_mutex_trylock(&_lock) == 0;
+}
+
+inline void ZLock::unlock() {
+  pthread_mutex_unlock(&_lock);
+}
+
+inline ZLocker::ZLocker(ZLock* lock) :
+    _lock(lock) {
+  _lock->lock();
+}
+
+inline ZLocker::~ZLocker() {
+  _lock->unlock();
+}
+
+#endif // SHARE_GC_Z_ZLOCK_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMark.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,684 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zMark.inline.hpp"
+#include "gc/z/zMarkCache.inline.hpp"
+#include "gc/z/zMarkStack.inline.hpp"
+#include "gc/z/zMarkTerminate.inline.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zPage.hpp"
+#include "gc/z/zPageTable.inline.hpp"
+#include "gc/z/zRootsIterator.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zThread.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "gc/z/zWorkers.inline.hpp"
+#include "logging/log.hpp"
+#include "oops/objArrayOop.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/handshake.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/align.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ticks.hpp"
+
+static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
+static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
+static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
+static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
+static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
+
+ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
+    _workers(workers),
+    _pagetable(pagetable),
+    _allocator(),
+    _stripes(),
+    _terminate(),
+    _work_terminateflush(true),
+    _work_nproactiveflush(0),
+    _work_nterminateflush(0),
+    _nproactiveflush(0),
+    _nterminateflush(0),
+    _ntrycomplete(0),
+    _ncontinue(0),
+    _nworkers(0) {}
+
+size_t ZMark::calculate_nstripes(uint nworkers) const {
+  // Calculate the number of stripes from the number of workers we use,
+  // where the number of stripes must be a power of two and we want to
+  // have at least one worker per stripe.
+  const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
+  return MIN2(nstripes, ZMarkStripesMax);
+}
+
+void ZMark::prepare_mark() {
+  // Increment global sequence number to invalidate
+  // marking information for all pages.
+  ZGlobalSeqNum++;
+
+  // Reset flush/continue counters
+  _nproactiveflush = 0;
+  _nterminateflush = 0;
+  _ntrycomplete = 0;
+  _ncontinue = 0;
+
+  // Set number of workers to use
+  _nworkers = _workers->nconcurrent();
+
+  // Set number of mark stripes to use, based on number
+  // of workers we will use in the concurrent mark phase.
+  const size_t nstripes = calculate_nstripes(_nworkers);
+  _stripes.set_nstripes(nstripes);
+
+  // Update statistics
+  ZStatMark::set_at_mark_start(nstripes);
+
+  // Print worker/stripe distribution
+  LogTarget(Debug, gc, marking) log;
+  if (log.is_enabled()) {
+    log.print("Mark Worker/Stripe Distribution");
+    for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
+      const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
+      const size_t stripe_id = _stripes.stripe_id(stripe);
+      log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
+                worker_id, _nworkers, stripe_id, nstripes);
+    }
+  }
+}
+
+class ZMarkRootsTask : public ZTask {
+private:
+  ZMark* const   _mark;
+  ZRootsIterator _roots;
+
+public:
+  ZMarkRootsTask(ZMark* mark) :
+      ZTask("ZMarkRootsTask"),
+      _mark(mark),
+      _roots() {}
+
+  virtual void work() {
+    ZMarkRootOopClosure cl;
+    _roots.oops_do(&cl);
+
+    // Flush and free worker stacks. Needed here since
+    // the set of workers executing during root scanning
+    // can be different from the set of workers executing
+    // during mark.
+    _mark->flush_and_free();
+  }
+};
+
+void ZMark::start() {
+  // Verification
+  if (ZVerifyMarking) {
+    verify_all_stacks_empty();
+  }
+
+  // Prepare for concurrent mark
+  prepare_mark();
+
+  // Mark roots
+  ZMarkRootsTask task(this);
+  _workers->run_parallel(&task);
+}
+
+void ZMark::prepare_work() {
+  assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
+
+  // Set number of active workers
+  _terminate.reset(_nworkers);
+
+  // Reset flush counters
+  _work_nproactiveflush = _work_nterminateflush = 0;
+  _work_terminateflush = true;
+}
+
+void ZMark::finish_work() {
+  // Accumulate proactive/terminate flush counters
+  _nproactiveflush += _work_nproactiveflush;
+  _nterminateflush += _work_nterminateflush;
+}
+
+bool ZMark::is_array(uintptr_t addr) const {
+  return ZOop::to_oop(addr)->is_objArray();
+}
+
+void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
+  assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
+  ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
+  ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
+  const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
+  const uintptr_t length = size / oopSize;
+  const ZMarkStackEntry entry(offset, length, finalizable);
+
+  log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
+                                 addr, size, _stripes.stripe_id(stripe));
+
+  stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
+}
+
+void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
+  assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
+  const size_t length = size / oopSize;
+
+  log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
+
+  ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
+}
+
+void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
+  assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
+  assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
+  const uintptr_t start = addr;
+  const uintptr_t end = start + size;
+
+  // Calculate the aligned middle start/end/size, where the middle start
+  // should always be greater than the start (hence the +1 below) to make
+  // sure we always do some follow work, not just split the array into pieces.
+  const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
+  const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
+  const uintptr_t middle_end = middle_start + middle_size;
+
+  log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
+                                 "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
+                                 start, end, size, middle_start, middle_end, middle_size);
+
+  // Push unaligned trailing part
+  if (end > middle_end) {
+    const uintptr_t trailing_addr = middle_end;
+    const size_t trailing_size = end - middle_end;
+    push_partial_array(trailing_addr, trailing_size, finalizable);
+  }
+
+  // Push aligned middle part(s)
+  uintptr_t partial_addr = middle_end;
+  while (partial_addr > middle_start) {
+    const size_t parts = 2;
+    const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
+    partial_addr -= partial_size;
+    push_partial_array(partial_addr, partial_size, finalizable);
+  }
+
+  // Follow leading part
+  assert(start < middle_start, "Miscalculated middle start");
+  const uintptr_t leading_addr = start;
+  const size_t leading_size = middle_start - start;
+  follow_small_array(leading_addr, leading_size, finalizable);
+}
+
+void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
+  if (size <= ZMarkPartialArrayMinSize) {
+    follow_small_array(addr, size, finalizable);
+  } else {
+    follow_large_array(addr, size, finalizable);
+  }
+}
+
+void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
+  const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
+  const size_t size = entry.partial_array_length() * oopSize;
+
+  follow_array(addr, size, finalizable);
+}
+
+void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
+  const uintptr_t addr = (uintptr_t)obj->base();
+  const size_t size = (size_t)obj->length() * oopSize;
+
+  follow_array(addr, size, finalizable);
+}
+
+void ZMark::follow_object(oop obj, bool finalizable) {
+  if (finalizable) {
+    ZMarkBarrierOopClosure<true /* finalizable */> cl;
+    obj->oop_iterate(&cl);
+  } else {
+    ZMarkBarrierOopClosure<false /* finalizable */> cl;
+    obj->oop_iterate(&cl);
+  }
+}
+
+bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
+  ZPage* const page = _pagetable->get(addr);
+  if (page->is_allocating()) {
+    // Newly allocated objects are implicitly marked
+    return false;
+  }
+
+  // Try mark object
+  bool inc_live = false;
+  const bool success = page->mark_object(addr, finalizable, inc_live);
+  if (inc_live) {
+    // Update live objects/bytes for page. We use the aligned object
+    // size since that is the actual number of bytes used on the page
+    // and alignment paddings can never be reclaimed.
+    const size_t size = ZUtils::object_size(addr);
+    const size_t aligned_size = align_up(size, page->object_alignment());
+    cache->inc_live(page, aligned_size);
+  }
+
+  return success;
+}
+
+void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
+  // Decode flags
+  const bool finalizable = entry.finalizable();
+  const bool partial_array = entry.partial_array();
+
+  if (partial_array) {
+    follow_partial_array(entry, finalizable);
+    return;
+  }
+
+  // Decode object address
+  const uintptr_t addr = entry.object_address();
+
+  if (!try_mark_object(cache, addr, finalizable)) {
+    // Already marked
+    return;
+  }
+
+  if (is_array(addr)) {
+    follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
+  } else {
+    follow_object(ZOop::to_oop(addr), finalizable);
+  }
+}
+
+template <typename T>
+bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
+  ZMarkStackEntry entry;
+
+  // Drain stripe stacks
+  while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
+    mark_and_follow(cache, entry);
+
+    // Check timeout
+    if (timeout->has_expired()) {
+      // Timeout
+      return false;
+    }
+  }
+
+  // Success
+  return true;
+}
+
+template <typename T>
+bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
+  const bool success = drain(stripe, stacks, cache, timeout);
+
+  // Flush and publish worker stacks
+  stacks->flush(&_allocator, &_stripes);
+
+  return success;
+}
+
+bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
+  // Try to steal a stack from another stripe
+  for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
+       victim_stripe != stripe;
+       victim_stripe = _stripes.stripe_next(victim_stripe)) {
+    ZMarkStack* const stack = victim_stripe->steal_stack();
+    if (stack != NULL) {
+      // Success, install the stolen stack
+      stacks->install(&_stripes, stripe, stack);
+      return true;
+    }
+  }
+
+  // Nothing to steal
+  return false;
+}
+
+void ZMark::idle() const {
+  ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
+  os::naked_short_sleep(1);
+}
+
+class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
+private:
+  ZMark* const _mark;
+  bool         _flushed;
+
+public:
+  ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
+      _mark(mark),
+      _flushed(false) {}
+
+  void do_thread(Thread* thread) {
+    if (_mark->flush_and_free(thread)) {
+      _flushed = true;
+    }
+  }
+
+  bool flushed() const {
+    return _flushed;
+  }
+};
+
+bool ZMark::flush(bool at_safepoint) {
+  ZMarkFlushAndFreeStacksClosure cl(this);
+  if (at_safepoint) {
+    Threads::threads_do(&cl);
+  } else {
+    Handshake::execute(&cl);
+  }
+
+  // Returns true if more work is available
+  return cl.flushed() || !_stripes.is_empty();
+}
+
+bool ZMark::try_flush(volatile size_t* nflush) {
+  // Only flush if handhakes are enabled
+  if (!ThreadLocalHandshakes) {
+    return false;
+  }
+
+  Atomic::inc(nflush);
+
+  ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
+  return flush(false /* at_safepoint */);
+}
+
+bool ZMark::try_proactive_flush() {
+  // Only do proactive flushes from worker 0
+  if (ZThread::worker_id() != 0) {
+    return false;
+  }
+
+  if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
+      Atomic::load(&_work_nterminateflush) != 0) {
+    // Limit reached or we're trying to terminate
+    return false;
+  }
+
+  return try_flush(&_work_nproactiveflush);
+}
+
+bool ZMark::try_terminate() {
+  ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
+
+  if (_terminate.enter_stage0()) {
+    // Last thread entered stage 0, flush
+    if (Atomic::load(&_work_terminateflush) &&
+        Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
+      // Exit stage 0 to allow other threads to continue marking
+      _terminate.exit_stage0();
+
+      // Flush before termination
+      if (!try_flush(&_work_nterminateflush)) {
+        // No more work available, skip further flush attempts
+        Atomic::store(false, &_work_terminateflush);
+      }
+
+      // Don't terminate, regardless of whether we successfully
+      // flushed out more work or not. We've already exited
+      // termination stage 0, to allow other threads to continue
+      // marking, so this thread has to return false and also
+      // make another round of attempted marking.
+      return false;
+    }
+  }
+
+  for (;;) {
+    if (_terminate.enter_stage1()) {
+      // Last thread entered stage 1, terminate
+      return true;
+    }
+
+    // Idle to give the other threads
+    // a chance to enter termination.
+    idle();
+
+    if (!_terminate.try_exit_stage1()) {
+      // All workers in stage 1, terminate
+      return true;
+    }
+
+    if (_terminate.try_exit_stage0()) {
+      // More work available, don't terminate
+      return false;
+    }
+  }
+}
+
+class ZMarkNoTimeout : public StackObj {
+public:
+  bool has_expired() {
+    return false;
+  }
+};
+
+void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
+  ZStatTimer timer(ZSubPhaseConcurrentMark);
+  ZMarkNoTimeout no_timeout;
+
+  for (;;) {
+    drain_and_flush(stripe, stacks, cache, &no_timeout);
+
+    if (try_steal(stripe, stacks)) {
+      // Stole work
+      continue;
+    }
+
+    if (try_proactive_flush()) {
+      // Work available
+      continue;
+    }
+
+    if (try_terminate()) {
+      // Terminate
+      break;
+    }
+  }
+}
+
+class ZMarkTimeout : public StackObj {
+private:
+  const Ticks    _start;
+  const uint64_t _timeout;
+  const uint64_t _check_interval;
+  uint64_t       _check_at;
+  uint64_t       _check_count;
+  bool           _expired;
+
+public:
+  ZMarkTimeout(uint64_t timeout_in_millis) :
+      _start(Ticks::now()),
+      _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
+      _check_interval(200),
+      _check_at(_check_interval),
+      _check_count(0),
+      _expired(false) {}
+
+  ~ZMarkTimeout() {
+    const Tickspan duration = Ticks::now() - _start;
+    log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
+                           ZThread::name(), _expired ? "Expired" : "Completed",
+                           _check_count, TimeHelper::counter_to_millis(duration.value()));
+  }
+
+  bool has_expired() {
+    if (++_check_count == _check_at) {
+      _check_at += _check_interval;
+      if ((uint64_t)Ticks::now().value() >= _timeout) {
+        // Timeout
+        _expired = true;
+      }
+    }
+
+    return _expired;
+  }
+};
+
+void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
+  ZStatTimer timer(ZSubPhaseMarkTryComplete);
+  ZMarkTimeout timeout(timeout_in_millis);
+
+  for (;;) {
+    if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
+      // Timed out
+      break;
+    }
+
+    if (try_steal(stripe, stacks)) {
+      // Stole work
+      continue;
+    }
+
+    // Terminate
+    break;
+  }
+}
+
+void ZMark::work(uint64_t timeout_in_millis) {
+  ZMarkCache cache(_stripes.nstripes());
+  ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
+  ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
+
+  if (timeout_in_millis == 0) {
+    work_without_timeout(&cache, stripe, stacks);
+  } else {
+    work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
+  }
+
+  // Make sure stacks have been flushed
+  assert(stacks->is_empty(&_stripes), "Should be empty");
+
+  // Free remaining stacks
+  stacks->free(&_allocator);
+}
+
+class ZMarkTask : public ZTask {
+private:
+  ZMark* const   _mark;
+  const uint64_t _timeout_in_millis;
+
+public:
+  ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
+      ZTask("ZMarkTask"),
+      _mark(mark),
+      _timeout_in_millis(timeout_in_millis) {
+    _mark->prepare_work();
+  }
+
+  ~ZMarkTask() {
+    _mark->finish_work();
+  }
+
+  virtual void work() {
+    _mark->work(_timeout_in_millis);
+  }
+};
+
+void ZMark::mark() {
+  ZMarkTask task(this);
+  _workers->run_concurrent(&task);
+}
+
+bool ZMark::try_complete() {
+  _ntrycomplete++;
+
+  // Use nconcurrent number of worker threads to maintain the
+  // worker/stripe distribution used during concurrent mark.
+  ZMarkTask task(this, ZMarkCompleteTimeout);
+  _workers->run_concurrent(&task);
+
+  // Successful if all stripes are empty
+  return _stripes.is_empty();
+}
+
+bool ZMark::try_end() {
+  // Flush all mark stacks
+  if (!flush(true /* at_safepoint */)) {
+    // Mark completed
+    return true;
+  }
+
+  // Try complete marking by doing a limited
+  // amount of mark work in this phase.
+  return try_complete();
+}
+
+bool ZMark::end() {
+  // Try end marking
+  if (!try_end()) {
+    // Mark not completed
+    _ncontinue++;
+    return false;
+  }
+
+  // Verification
+  if (ZVerifyMarking) {
+    verify_all_stacks_empty();
+  }
+
+  // Update statistics
+  ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
+
+  // Mark completed
+  return true;
+}
+
+void ZMark::flush_and_free() {
+  Thread* const thread = Thread::current();
+  flush_and_free(thread);
+}
+
+bool ZMark::flush_and_free(Thread* thread) {
+  ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
+  const bool flushed = stacks->flush(&_allocator, &_stripes);
+  stacks->free(&_allocator);
+  return flushed;
+}
+
+class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
+private:
+  const ZMarkStripeSet* const _stripes;
+
+public:
+  ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
+      _stripes(stripes) {}
+
+  void do_thread(Thread* thread) {
+    ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
+    guarantee(stacks->is_empty(_stripes), "Should be empty");
+  }
+};
+
+void ZMark::verify_all_stacks_empty() const {
+  // Verify thread stacks
+  ZVerifyMarkStacksEmptyClosure cl(&_stripes);
+  Threads::threads_do(&cl);
+
+  // Verify stripe stacks
+  guarantee(_stripes.is_empty(), "Should be emtpy");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMark.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARK_HPP
+#define SHARE_GC_Z_ZMARK_HPP
+
+#include "gc/z/zMarkStack.hpp"
+#include "gc/z/zMarkTerminate.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class Thread;
+class ZMarkCache;
+class ZPageTable;
+class ZWorkers;
+
+class ZMark {
+  friend class ZMarkRootsTask;
+  friend class ZMarkTask;
+  friend class ZMarkTryCompleteTask;
+
+private:
+  ZWorkers* const     _workers;
+  ZPageTable* const   _pagetable;
+  ZMarkStackAllocator _allocator;
+  ZMarkStripeSet      _stripes;
+  ZMarkTerminate      _terminate;
+  volatile bool       _work_terminateflush;
+  volatile size_t     _work_nproactiveflush;
+  volatile size_t     _work_nterminateflush;
+  size_t              _nproactiveflush;
+  size_t              _nterminateflush;
+  size_t              _ntrycomplete;
+  size_t              _ncontinue;
+  uint                _nworkers;
+
+  size_t calculate_nstripes(uint nworkers) const;
+  void prepare_mark();
+
+  bool is_array(uintptr_t addr) const;
+  void push_partial_array(uintptr_t addr, size_t size, bool finalizable);
+  void follow_small_array(uintptr_t addr, size_t size, bool finalizable);
+  void follow_large_array(uintptr_t addr, size_t size, bool finalizable);
+  void follow_array(uintptr_t addr, size_t size, bool finalizable);
+  void follow_partial_array(ZMarkStackEntry entry, bool finalizable);
+  void follow_array_object(objArrayOop obj, bool finalizable);
+  void follow_object(oop obj, bool finalizable);
+  bool try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable);
+  void mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry);
+
+  template <typename T> bool drain(ZMarkStripe* stripe,
+                                   ZMarkThreadLocalStacks* stacks,
+                                   ZMarkCache* cache,
+                                   T* timeout);
+  template <typename T> bool drain_and_flush(ZMarkStripe* stripe,
+                                             ZMarkThreadLocalStacks* stacks,
+                                             ZMarkCache* cache,
+                                             T* timeout);
+  bool try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks);
+  void idle() const;
+  bool flush(bool at_safepoint);
+  bool try_proactive_flush();
+  bool try_flush(volatile size_t* nflush);
+  bool try_terminate();
+  bool try_complete();
+  bool try_end();
+
+  void prepare_work();
+  void finish_work();
+
+  void work_without_timeout(ZMarkCache* cache,
+                            ZMarkStripe* stripe,
+                            ZMarkThreadLocalStacks* stacks);
+  void work_with_timeout(ZMarkCache* cache,
+                         ZMarkStripe* stripe,
+                         ZMarkThreadLocalStacks* stacks,
+                         uint64_t timeout_in_millis);
+  void work(uint64_t timeout_in_millis);
+
+  void verify_all_stacks_empty() const;
+
+public:
+  ZMark(ZWorkers* workers, ZPageTable* pagetable);
+
+  template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
+
+  void start();
+  void mark();
+  bool end();
+
+  void flush_and_free();
+  bool flush_and_free(Thread* thread);
+};
+
+#endif // SHARE_GC_Z_ZMARK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMark.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARK_INLINE_HPP
+#define SHARE_GC_Z_ZMARK_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zMark.hpp"
+#include "gc/z/zMarkStack.inline.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+
+template <bool finalizable, bool publish>
+inline void ZMark::mark_object(uintptr_t addr) {
+  assert(ZAddress::is_marked(addr), "Should be marked");
+  ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
+  ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
+  ZMarkStackEntry entry(addr, finalizable);
+
+  stacks->push(&_allocator, &_stripes, stripe, entry, publish);
+}
+
+#endif // SHARE_GC_Z_ZMARK_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkCache.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zMarkCache.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+ZMarkCacheEntry::ZMarkCacheEntry() :
+    _page(NULL),
+    _objects(0),
+    _bytes(0) {}
+
+ZMarkCache::ZMarkCache(size_t nstripes) :
+    _shift(ZMarkStripeShift + exact_log2(nstripes)) {}
+
+ZMarkCache::~ZMarkCache() {
+  // Evict all entries
+  for (size_t i = 0; i < ZMarkCacheSize; i++) {
+    _cache[i].evict();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkCache.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARKCACHE_HPP
+#define SHARE_GC_Z_ZMARKCACHE_HPP
+
+#include "gc/z/zGlobals.hpp"
+#include "memory/allocation.hpp"
+
+class ZPage;
+
+class ZMarkCacheEntry {
+private:
+  ZPage*   _page;
+  uint32_t _objects;
+  size_t   _bytes;
+
+public:
+  ZMarkCacheEntry();
+
+  void inc_live(ZPage* page, size_t bytes);
+  void evict();
+};
+
+class ZMarkCache : public StackObj {
+private:
+  const size_t    _shift;
+  ZMarkCacheEntry _cache[ZMarkCacheSize];
+
+public:
+  ZMarkCache(size_t nstripes);
+  ~ZMarkCache();
+
+  void inc_live(ZPage* page, size_t bytes);
+};
+
+#endif // SHARE_GC_Z_ZMARKCACHE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkCache.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARKCACHE_INLINE_HPP
+#define SHARE_GC_Z_ZMARKCACHE_INLINE_HPP
+
+#include "gc/z/zMarkCache.hpp"
+#include "gc/z/zPage.inline.hpp"
+
+inline void ZMarkCacheEntry::inc_live(ZPage* page, size_t bytes) {
+  if (_page == page) {
+    // Cache hit
+    _objects++;
+    _bytes += bytes;
+  } else {
+    // Cache miss
+    evict();
+    _page = page;
+    _objects = 1;
+    _bytes = bytes;
+  }
+}
+
+inline void ZMarkCacheEntry::evict() {
+  if (_page != NULL) {
+    // Write cached data out to page
+    _page->inc_live_atomic(_objects, _bytes);
+    _page = NULL;
+  }
+}
+
+inline void ZMarkCache::inc_live(ZPage* page, size_t bytes) {
+  const size_t mask = ZMarkCacheSize - 1;
+  const size_t index = (page->start() >> _shift) & mask;
+  _cache[index].inc_live(page, bytes);
+}
+
+#endif // SHARE_GC_Z_ZMARKCACHE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkStack.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zMarkStack.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+ZMarkStackSpace::ZMarkStackSpace() :
+    _expand_lock(),
+    _top(0),
+    _end(0) {
+  assert(ZMarkStacksMax >= ZMarkStackSpaceExpandSize, "ZMarkStacksMax too small");
+  assert(ZMarkStacksMax <= ZMarkStackSpaceSize, "ZMarkStacksMax too large");
+
+  // Reserve address space
+  const void* res = mmap((void*)ZMarkStackSpaceStart, ZMarkStackSpaceSize,
+                         PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  if (res != (void*)ZMarkStackSpaceStart) {
+    log_error(gc, marking)("Failed to reserve address space for marking stacks");
+    return;
+  }
+
+  // Successfully initialized
+  _top = _end = ZMarkStackSpaceStart;
+}
+
+bool ZMarkStackSpace::is_initialized() const {
+  return _top != 0;
+}
+
+bool ZMarkStackSpace::expand() {
+  const size_t max = ZMarkStackSpaceStart + ZMarkStacksMax;
+  if (_end + ZMarkStackSpaceExpandSize > max) {
+    // Expansion limit reached
+    return false;
+  }
+
+  void* const res = mmap((void*)_end, ZMarkStackSpaceExpandSize,
+                         PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    log_error(gc, marking)("Failed to map memory for marking stacks (%s)", err.to_string());
+    return false;
+  }
+
+  return true;
+}
+
+uintptr_t ZMarkStackSpace::alloc_space(size_t size) {
+  uintptr_t top = _top;
+
+  for (;;) {
+    const uintptr_t new_top = top + size;
+    if (new_top > _end) {
+      // Not enough space left
+      return 0;
+    }
+
+    const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, top);
+    if (prev_top == top) {
+      // Success
+      return top;
+    }
+
+    // Retry
+    top = prev_top;
+  }
+}
+
+uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) {
+  ZLocker locker(&_expand_lock);
+
+  // Retry allocation before expanding
+  uintptr_t addr = alloc_space(size);
+  if (addr != 0) {
+    return addr;
+  }
+
+  // Expand stack space
+  if (!expand()) {
+    // We currently can't handle the situation where we
+    // are running out of mark stack space.
+    fatal("Mark stack overflow (allocated " SIZE_FORMAT "M, size " SIZE_FORMAT "M, max " SIZE_FORMAT "M),"
+          " use -XX:ZMarkStacksMax=? to increase this limit",
+          (_end - ZMarkStackSpaceStart) / M, size / M, ZMarkStacksMax / M);
+    return 0;
+  }
+
+  log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M",
+                         (_end - ZMarkStackSpaceStart) / M,
+                         (_end - ZMarkStackSpaceStart + ZMarkStackSpaceExpandSize) / M);
+
+  // Increment top before end to make sure another
+  // thread can't steal out newly expanded space.
+  addr = Atomic::add(size, &_top) - size;
+  _end += ZMarkStackSpaceExpandSize;
+
+  return addr;
+}
+
+uintptr_t ZMarkStackSpace::alloc(size_t size) {
+  const uintptr_t addr = alloc_space(size);
+  if (addr != 0) {
+    return addr;
+  }
+
+  return expand_and_alloc_space(size);
+}
+
+ZMarkStackAllocator::ZMarkStackAllocator() :
+    _freelist(),
+    _space() {
+  guarantee(sizeof(ZMarkStack) == ZMarkStackSize, "Size mismatch");
+  guarantee(sizeof(ZMarkStackMagazine) <= ZMarkStackSize, "Size mismatch");
+
+  // Prime free list to avoid an immediate space
+  // expansion when marking starts.
+  if (_space.is_initialized()) {
+    prime_freelist();
+  }
+}
+
+bool ZMarkStackAllocator::is_initialized() const {
+  return _space.is_initialized();
+}
+
+void ZMarkStackAllocator::prime_freelist() {
+  for (size_t size = 0; size < ZMarkStackSpaceExpandSize; size += ZMarkStackMagazineSize) {
+    const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize);
+    ZMarkStackMagazine* const magazine = create_magazine_from_space(addr, ZMarkStackMagazineSize);
+    free_magazine(magazine);
+  }
+}
+
+ZMarkStackMagazine* ZMarkStackAllocator::create_magazine_from_space(uintptr_t addr, size_t size) {
+  assert(is_aligned(size, ZMarkStackSize), "Invalid size");
+
+  // Use first stack as magazine
+  ZMarkStackMagazine* const magazine = new ((void*)addr) ZMarkStackMagazine();
+  for (size_t i = ZMarkStackSize; i < size; i += ZMarkStackSize) {
+    ZMarkStack* const stack = new ((void*)(addr + i)) ZMarkStack();
+    const bool success = magazine->push(stack);
+    assert(success, "Magazine should never get full");
+  }
+
+  return magazine;
+}
+
+ZMarkStackMagazine* ZMarkStackAllocator::alloc_magazine() {
+  // Try allocating from the free list first
+  ZMarkStackMagazine* const magazine = _freelist.pop_atomic();
+  if (magazine != NULL) {
+    return magazine;
+  }
+
+  // Allocate new magazine
+  const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize);
+  if (addr == 0) {
+    return NULL;
+  }
+
+  return create_magazine_from_space(addr, ZMarkStackMagazineSize);
+}
+
+void ZMarkStackAllocator::free_magazine(ZMarkStackMagazine* magazine) {
+  _freelist.push_atomic(magazine);
+}
+
+ZMarkStripe::ZMarkStripe() :
+    _published(),
+    _overflowed() {}
+
+ZMarkStripeSet::ZMarkStripeSet() :
+    _nstripes(0),
+    _nstripes_mask(0),
+    _stripes() {}
+
+void ZMarkStripeSet::set_nstripes(size_t nstripes) {
+  assert(is_power_of_2(nstripes), "Must be a power of two");
+  assert(is_power_of_2(ZMarkStripesMax), "Must be a power of two");
+  assert(nstripes >= 1, "Invalid number of stripes");
+  assert(nstripes <= ZMarkStripesMax, "Invalid number of stripes");
+
+  _nstripes = nstripes;
+  _nstripes_mask = nstripes - 1;
+
+  log_debug(gc, marking)("Using " SIZE_FORMAT " mark stripes", _nstripes);
+}
+
+bool ZMarkStripeSet::is_empty() const {
+  for (size_t i = 0; i < _nstripes; i++) {
+    if (!_stripes[i].is_empty()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+ZMarkStripe* ZMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) {
+  const size_t spillover_limit = (nworkers / _nstripes) * _nstripes;
+  size_t index;
+
+  if (worker_id < spillover_limit) {
+    // Not a spillover worker, use natural stripe
+    index = worker_id & _nstripes_mask;
+  } else {
+    // Distribute spillover workers evenly across stripes
+    const size_t spillover_nworkers = nworkers - spillover_limit;
+    const size_t spillover_worker_id = worker_id - spillover_limit;
+    const double spillover_chunk = (double)_nstripes / (double)spillover_nworkers;
+    index = spillover_worker_id * spillover_chunk;
+  }
+
+  assert(index < _nstripes, "Invalid index");
+  return &_stripes[index];
+}
+
+ZMarkThreadLocalStacks::ZMarkThreadLocalStacks() :
+    _magazine(NULL) {
+  for (size_t i = 0; i < ZMarkStripesMax; i++) {
+    _stacks[i] = NULL;
+  }
+}
+
+bool ZMarkThreadLocalStacks::is_empty(const ZMarkStripeSet* stripes) const {
+  for (size_t i = 0; i < stripes->nstripes(); i++) {
+    ZMarkStack* const stack = _stacks[i];
+    if (stack != NULL) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+ZMarkStack* ZMarkThreadLocalStacks::allocate_stack(ZMarkStackAllocator* allocator) {
+  if (_magazine == NULL) {
+    // Allocate new magazine
+    _magazine = allocator->alloc_magazine();
+    if (_magazine == NULL) {
+      return NULL;
+    }
+  }
+
+  ZMarkStack* stack = NULL;
+
+  if (!_magazine->pop(stack)) {
+    // Magazine is empty, convert magazine into a new stack
+    _magazine->~ZMarkStackMagazine();
+    stack = new ((void*)_magazine) ZMarkStack();
+    _magazine = NULL;
+  }
+
+  return stack;
+}
+
+void ZMarkThreadLocalStacks::free_stack(ZMarkStackAllocator* allocator, ZMarkStack* stack) {
+  for (;;) {
+    if (_magazine == NULL) {
+      // Convert stack into a new magazine
+      stack->~ZMarkStack();
+      _magazine = new ((void*)stack) ZMarkStackMagazine();
+      return;
+    }
+
+    if (_magazine->push(stack)) {
+      // Success
+      return;
+    }
+
+    // Free and uninstall full magazine
+    allocator->free_magazine(_magazine);
+    _magazine = NULL;
+  }
+}
+
+bool ZMarkThreadLocalStacks::push_slow(ZMarkStackAllocator* allocator,
+                                       ZMarkStripe* stripe,
+                                       ZMarkStack** stackp,
+                                       ZMarkStackEntry entry,
+                                       bool publish) {
+  ZMarkStack* stack = *stackp;
+
+  for (;;) {
+    if (stack == NULL) {
+      // Allocate and install new stack
+      *stackp = stack = allocate_stack(allocator);
+      if (stack == NULL) {
+        // Out of mark stack memory
+        return false;
+      }
+    }
+
+    if (stack->push(entry)) {
+      // Success
+      return true;
+    }
+
+    // Publish/Overflow and uninstall stack
+    stripe->publish_stack(stack, publish);
+    *stackp = stack = NULL;
+  }
+}
+
+bool ZMarkThreadLocalStacks::pop_slow(ZMarkStackAllocator* allocator,
+                                      ZMarkStripe* stripe,
+                                      ZMarkStack** stackp,
+                                      ZMarkStackEntry& entry) {
+  ZMarkStack* stack = *stackp;
+
+  for (;;) {
+    if (stack == NULL) {
+      // Try steal and install stack
+      *stackp = stack = stripe->steal_stack();
+      if (stack == NULL) {
+        // Nothing to steal
+        return false;
+      }
+    }
+
+    if (stack->pop(entry)) {
+      // Success
+      return true;
+    }
+
+    // Free and uninstall stack
+    free_stack(allocator, stack);
+    *stackp = stack = NULL;
+  }
+}
+
+bool ZMarkThreadLocalStacks::flush(ZMarkStackAllocator* allocator, ZMarkStripeSet* stripes) {
+  bool flushed = false;
+
+  // Flush all stacks
+  for (size_t i = 0; i < stripes->nstripes(); i++) {
+    ZMarkStripe* const stripe = stripes->stripe_at(i);
+    ZMarkStack** const stackp = &_stacks[i];
+    ZMarkStack* const stack = *stackp;
+    if (stack == NULL) {
+      continue;
+    }
+
+    // Free/Publish and uninstall stack
+    if (stack->is_empty()) {
+      free_stack(allocator, stack);
+    } else {
+      stripe->publish_stack(stack);
+      flushed = true;
+    }
+    *stackp = NULL;
+  }
+
+  return flushed;
+}
+
+void ZMarkThreadLocalStacks::free(ZMarkStackAllocator* allocator) {
+  // Free and uninstall magazine
+  if (_magazine != NULL) {
+    allocator->free_magazine(_magazine);
+    _magazine = NULL;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkStack.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARKSTACK_HPP
+#define SHARE_GC_Z_ZMARKSTACK_HPP
+
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLock.hpp"
+#include "gc/z/zMarkStackEntry.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+template <typename T, size_t S>
+class ZStack {
+private:
+  size_t        _top;
+  ZStack<T, S>* _next;
+  T             _slots[S];
+
+  bool is_full() const;
+
+public:
+  ZStack();
+
+  bool is_empty() const;
+
+  bool push(T value);
+  bool pop(T& value);
+
+  ZStack<T, S>* next() const;
+  ZStack<T, S>** next_addr();
+};
+
+template <typename T>
+class ZStackList {
+private:
+  T* volatile _head;
+
+  T* encode_versioned_pointer(const T* stack, uint32_t version) const;
+  void decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const;
+
+public:
+  ZStackList();
+
+  bool is_empty() const;
+
+  void push_atomic(T* stack);
+  T* pop_atomic();
+};
+
+typedef ZStack<ZMarkStackEntry, ZMarkStackSlots>     ZMarkStack;
+typedef ZStackList<ZMarkStack>                       ZMarkStackList;
+typedef ZStack<ZMarkStack*, ZMarkStackMagazineSlots> ZMarkStackMagazine;
+typedef ZStackList<ZMarkStackMagazine>               ZMarkStackMagazineList;
+
+class ZMarkStackSpace {
+private:
+  ZLock              _expand_lock;
+  volatile uintptr_t _top;
+  volatile uintptr_t _end;
+
+  bool expand();
+
+  uintptr_t alloc_space(size_t size);
+  uintptr_t expand_and_alloc_space(size_t size);
+
+public:
+  ZMarkStackSpace();
+
+  bool is_initialized() const;
+
+  uintptr_t alloc(size_t size);
+};
+
+class ZMarkStackAllocator {
+private:
+  ZMarkStackMagazineList _freelist ATTRIBUTE_ALIGNED(ZCacheLineSize);
+  ZMarkStackSpace        _space    ATTRIBUTE_ALIGNED(ZCacheLineSize);
+
+  void prime_freelist();
+  ZMarkStackMagazine* create_magazine_from_space(uintptr_t addr, size_t size);
+
+public:
+  ZMarkStackAllocator();
+
+  bool is_initialized() const;
+
+  ZMarkStackMagazine* alloc_magazine();
+  void free_magazine(ZMarkStackMagazine* magazine);
+};
+
+class ZMarkStripe {
+private:
+  ZMarkStackList _published  ATTRIBUTE_ALIGNED(ZCacheLineSize);
+  ZMarkStackList _overflowed ATTRIBUTE_ALIGNED(ZCacheLineSize);
+
+public:
+  ZMarkStripe();
+
+  bool is_empty() const;
+
+  void publish_stack(ZMarkStack* stack, bool publish = true);
+  ZMarkStack* steal_stack();
+};
+
+class ZMarkStripeSet {
+private:
+  size_t      _nstripes;
+  size_t      _nstripes_mask;
+  ZMarkStripe _stripes[ZMarkStripesMax];
+
+public:
+  ZMarkStripeSet();
+
+  size_t nstripes() const;
+  void set_nstripes(size_t nstripes);
+
+  bool is_empty() const;
+
+  size_t stripe_id(const ZMarkStripe* stripe) const;
+  ZMarkStripe* stripe_at(size_t index);
+  ZMarkStripe* stripe_next(ZMarkStripe* stripe);
+  ZMarkStripe* stripe_for_worker(uint nworkers, uint worker_id);
+  ZMarkStripe* stripe_for_addr(uintptr_t addr);
+};
+
+class ZMarkThreadLocalStacks {
+private:
+  ZMarkStackMagazine* _magazine;
+  ZMarkStack*         _stacks[ZMarkStripesMax];
+
+  ZMarkStack* allocate_stack(ZMarkStackAllocator* allocator);
+  void free_stack(ZMarkStackAllocator* allocator, ZMarkStack* stack);
+
+  bool push_slow(ZMarkStackAllocator* allocator,
+                 ZMarkStripe* stripe,
+                 ZMarkStack** stackp,
+                 ZMarkStackEntry entry,
+                 bool publish);
+
+  bool pop_slow(ZMarkStackAllocator* allocator,
+                ZMarkStripe* stripe,
+                ZMarkStack** stackp,
+                ZMarkStackEntry& entry);
+
+public:
+  ZMarkThreadLocalStacks();
+
+  bool is_empty(const ZMarkStripeSet* stripes) const;
+
+  void install(ZMarkStripeSet* stripes,
+               ZMarkStripe* stripe,
+               ZMarkStack* stack);
+
+  bool push(ZMarkStackAllocator* allocator,
+            ZMarkStripeSet* stripes,
+            ZMarkStripe* stripe,
+            ZMarkStackEntry entry,
+            bool publish);
+
+  bool pop(ZMarkStackAllocator* allocator,
+           ZMarkStripeSet* stripes,
+           ZMarkStripe* stripe,
+           ZMarkStackEntry& entry);
+
+  bool flush(ZMarkStackAllocator* allocator,
+             ZMarkStripeSet* stripes);
+
+  void free(ZMarkStackAllocator* allocator);
+};
+
+#endif // SHARE_GC_Z_ZMARKSTACK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARKSTACK_INLINE_HPP
+#define SHARE_GC_Z_ZMARKSTACK_INLINE_HPP
+
+#include "gc/z/zMarkStack.hpp"
+#include "utilities/debug.hpp"
+#include "runtime/atomic.hpp"
+
+template <typename T, size_t S>
+inline ZStack<T, S>::ZStack() :
+    _top(0),
+    _next(NULL) {}
+
+template <typename T, size_t S>
+inline bool ZStack<T, S>::is_empty() const {
+  return _top == 0;
+}
+
+template <typename T, size_t S>
+inline bool ZStack<T, S>::is_full() const {
+  return _top == S;
+}
+
+template <typename T, size_t S>
+inline bool ZStack<T, S>::push(T value) {
+  if (is_full()) {
+    return false;
+  }
+
+  _slots[_top++] = value;
+  return true;
+}
+
+template <typename T, size_t S>
+inline bool ZStack<T, S>::pop(T& value) {
+  if (is_empty()) {
+    return false;
+  }
+
+  value = _slots[--_top];
+  return true;
+}
+
+template <typename T, size_t S>
+inline ZStack<T, S>* ZStack<T, S>::next() const {
+  return _next;
+}
+
+template <typename T, size_t S>
+inline ZStack<T, S>** ZStack<T, S>::next_addr() {
+  return &_next;
+}
+
+template <typename T>
+inline ZStackList<T>::ZStackList() :
+    _head(encode_versioned_pointer(NULL, 0)) {}
+
+template <typename T>
+inline T* ZStackList<T>::encode_versioned_pointer(const T* stack, uint32_t version) const {
+  uint64_t addr;
+
+  if (stack == NULL) {
+    addr = (uint32_t)-1;
+  } else {
+    addr = ((uint64_t)stack - ZMarkStackSpaceStart) >> ZMarkStackSizeShift;
+  }
+
+  return (T*)((addr << 32) | (uint64_t)version);
+}
+
+template <typename T>
+inline void ZStackList<T>::decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const {
+  const uint64_t addr = (uint64_t)vstack >> 32;
+
+  if (addr == (uint32_t)-1) {
+    *stack = NULL;
+  } else {
+    *stack = (T*)((addr << ZMarkStackSizeShift) + ZMarkStackSpaceStart);
+  }
+
+  *version = (uint32_t)(uint64_t)vstack;
+}
+
+template <typename T>
+inline bool ZStackList<T>::is_empty() const {
+  const T* vstack = _head;
+  T* stack = NULL;
+  uint32_t version = 0;
+
+  decode_versioned_pointer(vstack, &stack, &version);
+  return stack == NULL;
+}
+
+template <typename T>
+inline void ZStackList<T>::push_atomic(T* stack) {
+  T* vstack = _head;
+  uint32_t version = 0;
+
+  for (;;) {
+    decode_versioned_pointer(vstack, stack->next_addr(), &version);
+    T* const new_vstack = encode_versioned_pointer(stack, version + 1);
+    T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
+    if (prev_vstack == vstack) {
+      // Success
+      break;
+    }
+
+    // Retry
+    vstack = prev_vstack;
+  }
+}
+
+template <typename T>
+inline T* ZStackList<T>::pop_atomic() {
+  T* vstack = _head;
+  T* stack = NULL;
+  uint32_t version = 0;
+
+  for (;;) {
+    decode_versioned_pointer(vstack, &stack, &version);
+    if (stack == NULL) {
+      return NULL;
+    }
+
+    T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
+    T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
+    if (prev_vstack == vstack) {
+      // Success
+      return stack;
+    }
+
+    // Retry
+    vstack = prev_vstack;
+  }
+}
+
+inline bool ZMarkStripe::is_empty() const {
+  return _published.is_empty() && _overflowed.is_empty();
+}
+
+inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) {
+  // A stack is published either on the published list or the overflowed
+  // list. The published list is used by mutators publishing stacks for GC
+  // workers to work on, while the overflowed list is used by GC workers
+  // to publish stacks that overflowed. The intention here is to avoid
+  // contention between mutators and GC workers as much as possible, while
+  // still allowing GC workers to help out and steal work from each other.
+  if (publish) {
+    _published.push_atomic(stack);
+  } else {
+    _overflowed.push_atomic(stack);
+  }
+}
+
+inline ZMarkStack* ZMarkStripe::steal_stack() {
+  // Steal overflowed stacks first, then published stacks
+  ZMarkStack* const stack = _overflowed.pop_atomic();
+  if (stack != NULL) {
+    return stack;
+  }
+
+  return _published.pop_atomic();
+}
+
+inline size_t ZMarkStripeSet::nstripes() const {
+  return _nstripes;
+}
+
+inline size_t ZMarkStripeSet::stripe_id(const ZMarkStripe* stripe) const {
+  const size_t index = ((uintptr_t)stripe - (uintptr_t)_stripes) / sizeof(ZMarkStripe);
+  assert(index < _nstripes, "Invalid index");
+  return index;
+}
+
+inline ZMarkStripe* ZMarkStripeSet::stripe_at(size_t index) {
+  assert(index < _nstripes, "Invalid index");
+  return &_stripes[index];
+}
+
+inline ZMarkStripe* ZMarkStripeSet::stripe_next(ZMarkStripe* stripe) {
+  const size_t index = (stripe_id(stripe) + 1) & _nstripes_mask;
+  assert(index < _nstripes, "Invalid index");
+  return &_stripes[index];
+}
+
+inline ZMarkStripe* ZMarkStripeSet::stripe_for_addr(uintptr_t addr) {
+  const size_t index = (addr >> ZMarkStripeShift) & _nstripes_mask;
+  assert(index < _nstripes, "Invalid index");
+  return &_stripes[index];
+}
+
+inline void ZMarkThreadLocalStacks::install(ZMarkStripeSet* stripes,
+                                            ZMarkStripe* stripe,
+                                            ZMarkStack* stack) {
+  ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)];
+  assert(*stackp == NULL, "Should be empty");
+  *stackp = stack;
+}
+
+inline bool ZMarkThreadLocalStacks::push(ZMarkStackAllocator* allocator,
+                                         ZMarkStripeSet* stripes,
+                                         ZMarkStripe* stripe,
+                                         ZMarkStackEntry entry,
+                                         bool publish) {
+  ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)];
+  ZMarkStack* const stack = *stackp;
+  if (stack != NULL && stack->push(entry)) {
+    return true;
+  }
+
+  return push_slow(allocator, stripe, stackp, entry, publish);
+}
+
+inline bool ZMarkThreadLocalStacks::pop(ZMarkStackAllocator* allocator,
+                                        ZMarkStripeSet* stripes,
+                                        ZMarkStripe* stripe,
+                                        ZMarkStackEntry& entry) {
+  ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)];
+  ZMarkStack* const stack = *stackp;
+  if (stack != NULL && stack->pop(entry)) {
+    return true;
+  }
+
+  return pop_slow(allocator, stripe, stackp, entry);
+}
+
+#endif // SHARE_GC_Z_ZMARKSTACK_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkStackEntry.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARKSTACKENTRY_HPP
+#define SHARE_GC_Z_ZMARKSTACKENTRY_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+
+//
+// Mark stack entry layout
+// -----------------------
+//
+//  Object entry
+//  ------------
+//
+//   6
+//   3                                                                   2 1 0
+//  +---------------------------------------------------------------------+-+-+
+//  |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111111|1|1|
+//  +---------------------------------------------------------------------+-+-+
+//  |                                                                     | |
+//  |                                      1-1 Partial Array Flag (1-bit) * |
+//  |                                                                       |
+//  |                                                0-0 Final Flag (1-bit) *
+//  |
+//  * 63-2 Object Address (62-bits)
+//
+//
+//  Partial array entry
+//  -------------------
+//
+//   6                                 3  3
+//   3                                 2  1                               2 1 0
+//  +------------------------------------+---------------------------------+-+-+
+//  |11111111 11111111 11111111 11111111 |11111111 11111111 11111111 111111|1|1|
+//  +------------------------------------+---------------------------------+-+-+
+//  |                                    |                                 | |
+//  |                                    |  1-1 Partial Array Flag (1-bit) * |
+//  |                                    |                                   |
+//  |                                    |            0-0 Final Flag (1-bit) *
+//  |                                    |
+//  |                                    * 31-2 Partial Array Length (30-bits)
+//  |
+//  * 63-32 Partial Array Address Offset (32-bits)
+//
+
+class ZMarkStackEntry  {
+private:
+  typedef ZBitField<uint64_t, bool,      0,  1>  field_finalizable;
+  typedef ZBitField<uint64_t, bool,      1,  1>  field_partial_array;
+  typedef ZBitField<uint64_t, uintptr_t, 2,  62> field_object_address;
+  typedef ZBitField<uint64_t, size_t,    2,  30> field_partial_array_length;
+  typedef ZBitField<uint64_t, size_t,    32, 32> field_partial_array_offset;
+
+  uint64_t _entry;
+
+public:
+  ZMarkStackEntry() {
+    // This constructor is intentionally left emtpy and does not initialize
+    // _entry to allow it to be optimized out when instantiating ZMarkStack,
+    // which has a long array of ZMarkStackEntry elements, but doesn't care
+    // what _entry is initialized to.
+  }
+
+  ZMarkStackEntry(uintptr_t object_address, bool finalizable) :
+      _entry(field_object_address::encode(object_address) |
+             field_partial_array::encode(false) |
+             field_finalizable::encode(finalizable)) {}
+
+  ZMarkStackEntry(size_t partial_array_offset, size_t partial_array_length, bool finalizable) :
+      _entry(field_partial_array_offset::encode(partial_array_offset) |
+             field_partial_array_length::encode(partial_array_length) |
+             field_partial_array::encode(true) |
+             field_finalizable::encode(finalizable)) {}
+
+  bool finalizable() const {
+    return field_finalizable::decode(_entry);
+  }
+
+  bool partial_array() const {
+    return field_partial_array::decode(_entry);
+  }
+
+  size_t partial_array_offset() const {
+    return field_partial_array_offset::decode(_entry);
+  }
+
+  size_t partial_array_length() const {
+    return field_partial_array_length::decode(_entry);
+  }
+
+  uintptr_t object_address() const {
+    return field_object_address::decode(_entry);
+  }
+};
+
+#endif // SHARE_GC_Z_ZMARKSTACKENTRY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkTerminate.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARKTERMINATE_HPP
+#define SHARE_GC_Z_ZMARKTERMINATE_HPP
+
+#include "gc/z/zGlobals.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class ZMarkTerminate {
+private:
+  uint          _nworkers;
+  volatile uint _nworking_stage0 ATTRIBUTE_ALIGNED(ZCacheLineSize);
+  volatile uint _nworking_stage1;
+
+  bool enter_stage(volatile uint* nworking_stage);
+  void exit_stage(volatile uint* nworking_stage);
+  bool try_exit_stage(volatile uint* nworking_stage);
+
+public:
+  ZMarkTerminate();
+
+  void reset(uint nworkers);
+
+  bool enter_stage0();
+  void exit_stage0();
+  bool try_exit_stage0();
+
+  bool enter_stage1();
+  bool try_exit_stage1();
+};
+
+#endif // SHARE_GC_Z_ZMARKTERMINATE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMARKTERMINATE_INLINE_HPP
+#define SHARE_GC_Z_ZMARKTERMINATE_INLINE_HPP
+
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+
+inline ZMarkTerminate::ZMarkTerminate() :
+    _nworkers(0),
+    _nworking_stage0(0),
+    _nworking_stage1(0) {}
+
+inline bool ZMarkTerminate::enter_stage(volatile uint* nworking_stage) {
+  return Atomic::sub(1u, nworking_stage) == 0;
+}
+
+inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) {
+  Atomic::add(1u, nworking_stage);
+}
+
+inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) {
+  uint nworking = Atomic::load(nworking_stage);
+
+  for (;;) {
+    if (nworking == 0) {
+      return false;
+    }
+
+    const uint new_nworking = nworking + 1;
+    const uint prev_nworking = Atomic::cmpxchg(new_nworking, nworking_stage, nworking);
+    if (prev_nworking == nworking) {
+      // Success
+      return true;
+    }
+
+    // Retry
+    nworking = prev_nworking;
+  }
+}
+
+inline void ZMarkTerminate::reset(uint nworkers) {
+  _nworkers = _nworking_stage0 = _nworking_stage1 = nworkers;
+}
+
+inline bool ZMarkTerminate::enter_stage0() {
+  return enter_stage(&_nworking_stage0);
+}
+
+inline void ZMarkTerminate::exit_stage0() {
+  exit_stage(&_nworking_stage0);
+}
+
+inline bool ZMarkTerminate::try_exit_stage0() {
+  return try_exit_stage(&_nworking_stage0);
+}
+
+inline bool ZMarkTerminate::enter_stage1() {
+  return enter_stage(&_nworking_stage1);
+}
+
+inline bool ZMarkTerminate::try_exit_stage1() {
+  return try_exit_stage(&_nworking_stage1);
+}
+
+#endif // SHARE_GC_Z_ZMARKTERMINATE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMemory.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zList.inline.hpp"
+#include "gc/z/zMemory.inline.hpp"
+#include "memory/allocation.inline.hpp"
+
+uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
+  ZListIterator<ZMemory> iter(&_freelist);
+  for (ZMemory* area; iter.next(&area);) {
+    if (area->size() >= size) {
+      if (area->size() == size) {
+        // Exact match, remove area
+        const uintptr_t start = area->start();
+        _freelist.remove(area);
+        delete area;
+        return start;
+      } else {
+        // Larger than requested, shrink area
+        const uintptr_t start = area->start();
+        area->shrink_from_front(size);
+        return start;
+      }
+    }
+  }
+
+  // Out of memory
+  return UINTPTR_MAX;
+}
+
+uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
+  ZListReverseIterator<ZMemory> iter(&_freelist);
+  for (ZMemory* area; iter.next(&area);) {
+    if (area->size() >= size) {
+      if (area->size() == size) {
+        // Exact match, remove area
+        const uintptr_t start = area->start();
+        _freelist.remove(area);
+        delete area;
+        return start;
+      } else {
+        // Larger than requested, shrink area
+        area->shrink_from_back(size);
+        return area->end();
+      }
+    }
+  }
+
+  // Out of memory
+  return UINTPTR_MAX;
+}
+
+void ZMemoryManager::free(uintptr_t start, size_t size) {
+  assert(start != UINTPTR_MAX, "Invalid address");
+  const uintptr_t end = start + size;
+
+  ZListIterator<ZMemory> iter(&_freelist);
+  for (ZMemory* area; iter.next(&area);) {
+    if (start < area->start()) {
+      ZMemory* const prev = _freelist.prev(area);
+      if (prev != NULL && start == prev->end()) {
+        if (end == area->start()) {
+          // Merge with prev and current area
+          prev->grow_from_back(size + area->size());
+          _freelist.remove(area);
+          delete area;
+        } else {
+          // Merge with prev area
+          prev->grow_from_back(size);
+        }
+      } else if (end == area->start()) {
+        // Merge with current area
+        area->grow_from_front(size);
+      } else {
+        // Insert new area before current area
+        assert(end < area->start(), "Areas must not overlap");
+        ZMemory* new_area = new ZMemory(start, size);
+        _freelist.insert_before(area, new_area);
+      }
+
+      // Done
+      return;
+    }
+  }
+
+  // Insert last
+  ZMemory* const last = _freelist.last();
+  if (last != NULL && start == last->end()) {
+    // Merge with last area
+    last->grow_from_back(size);
+  } else {
+    // Insert new area last
+    ZMemory* new_area = new ZMemory(start, size);
+    _freelist.insert_last(new_area);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMemory.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMEMORY_HPP
+#define SHARE_GC_Z_ZMEMORY_HPP
+
+#include "gc/z/zList.hpp"
+#include "memory/allocation.hpp"
+
+class ZMemory : public CHeapObj<mtGC> {
+  friend class ZList<ZMemory>;
+
+private:
+  uintptr_t          _start;
+  uintptr_t          _end;
+  ZListNode<ZMemory> _node;
+
+public:
+  ZMemory(uintptr_t start, size_t size);
+
+  uintptr_t start() const;
+  uintptr_t end() const;
+  size_t size() const;
+
+  void shrink_from_front(size_t size);
+  void shrink_from_back(size_t size);
+  void grow_from_front(size_t size);
+  void grow_from_back(size_t size);
+};
+
+class ZMemoryManager {
+private:
+  ZList<ZMemory> _freelist;
+
+public:
+  uintptr_t alloc_from_front(size_t size);
+  uintptr_t alloc_from_back(size_t size);
+  void free(uintptr_t start, size_t size);
+};
+
+#endif // SHARE_GC_Z_ZMEMORY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMemory.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMEMORY_INLINE_HPP
+#define SHARE_GC_Z_ZMEMORY_INLINE_HPP
+
+#include "gc/z/zMemory.hpp"
+#include "utilities/debug.hpp"
+
+inline ZMemory::ZMemory(uintptr_t start, size_t size) :
+    _start(start),
+    _end(start + size) {}
+
+inline uintptr_t ZMemory::start() const {
+  return _start;
+}
+
+inline uintptr_t ZMemory::end() const {
+  return _end;
+}
+
+inline size_t ZMemory::size() const {
+  return end() - start();
+}
+
+inline void ZMemory::shrink_from_front(size_t size) {
+  assert(this->size() > size, "Too small");
+  _start += size;
+}
+
+inline void ZMemory::shrink_from_back(size_t size) {
+  assert(this->size() > size, "Too small");
+  _end -= size;
+}
+
+inline void ZMemory::grow_from_front(size_t size) {
+  assert(start() >= size, "Too big");
+  _start -= size;
+}
+
+inline void ZMemory::grow_from_back(size_t size) {
+  _end += size;
+}
+
+#endif // SHARE_GC_Z_ZMEMORY_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMessagePort.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMESSAGEPORT_HPP
+#define SHARE_GC_Z_ZMESSAGEPORT_HPP
+
+#include "gc/z/zFuture.hpp"
+#include "gc/z/zList.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+
+template <typename T> class ZMessageRequest;
+
+template <typename T>
+class ZMessagePort {
+private:
+  typedef ZMessageRequest<T> Request;
+
+  Monitor        _monitor;
+  bool           _has_message;
+  T              _message;
+  uint64_t       _seqnum;
+  ZList<Request> _queue;
+
+public:
+  ZMessagePort();
+
+  void send_sync(T message);
+  void send_async(T message);
+
+  T receive();
+  void ack();
+};
+
+class ZRendezvousPort {
+private:
+  ZMessagePort<bool> _port;
+
+public:
+  void signal();
+  void wait();
+  void ack();
+};
+
+#endif // SHARE_GC_Z_ZMESSAGEPORT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMessagePort.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMESSAGEPORT_INLINE_HPP
+#define SHARE_GC_Z_ZMESSAGEPORT_INLINE_HPP
+
+#include "gc/z/zMessagePort.hpp"
+#include "gc/z/zFuture.inline.hpp"
+#include "gc/z/zList.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+
+template <typename T>
+class ZMessageRequest : public StackObj {
+  friend class ZList<ZMessageRequest>;
+
+private:
+  T                          _message;
+  uint64_t                   _seqnum;
+  ZFuture<T>                 _result;
+  ZListNode<ZMessageRequest> _node;
+
+public:
+  void initialize(T message, uint64_t seqnum) {
+    _message = message;
+    _seqnum = seqnum;
+  }
+
+  T message() const {
+    return _message;
+  }
+
+  uint64_t seqnum() const {
+    return _seqnum;
+  }
+
+  void wait() {
+    const T message = _result.get();
+    assert(message == _message, "Message mismatch");
+  }
+
+  void satisfy(T message) {
+    _result.set(message);
+  }
+};
+
+template <typename T>
+inline ZMessagePort<T>::ZMessagePort() :
+    _monitor(Monitor::leaf,
+             "ZMessagePort",
+             Monitor::_allow_vm_block_flag,
+             Monitor::_safepoint_check_never),
+    _has_message(false),
+    _seqnum(0),
+    _queue() {}
+
+template <typename T>
+inline void ZMessagePort<T>::send_sync(T message) {
+  Request request;
+
+  {
+    // Enqueue message
+    MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+    request.initialize(message, _seqnum);
+    _queue.insert_last(&request);
+    ml.notify();
+  }
+
+  // Wait for completion
+  request.wait();
+}
+
+template <typename T>
+inline void ZMessagePort<T>::send_async(T message) {
+  MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+  if (!_has_message) {
+    // Post message
+    _message = message;
+    _has_message = true;
+    ml.notify();
+  }
+}
+
+template <typename T>
+inline T ZMessagePort<T>::receive() {
+  MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+
+  // Wait for message
+  while (!_has_message && _queue.is_empty()) {
+    ml.wait(Monitor::_no_safepoint_check_flag);
+  }
+
+  // Increment request sequence number
+  _seqnum++;
+
+  if (!_has_message) {
+    // Message available in the queue
+    _message = _queue.first()->message();
+    _has_message = true;
+  }
+
+  return _message;
+}
+
+template <typename T>
+inline void ZMessagePort<T>::ack() {
+  MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+
+  if (!_has_message) {
+    // Nothing to ack
+    return;
+  }
+
+  // Satisfy requests (and duplicates) in queue
+  ZListIterator<Request> iter(&_queue);
+  for (Request* request; iter.next(&request);) {
+    if (request->message() == _message && request->seqnum() < _seqnum) {
+      // Dequeue and satisfy request. Note that the dequeue operation must
+      // happen first, since the request will immediately be deallocated
+      // once it has been satisfied.
+      _queue.remove(request);
+      request->satisfy(_message);
+    }
+  }
+
+  if (_queue.is_empty()) {
+    // Queue is empty
+    _has_message = false;
+  } else {
+    // Post first message in queue
+    _message = _queue.first()->message();
+  }
+}
+
+inline void ZRendezvousPort::signal() {
+  _port.send_sync(true /* ignored */);
+}
+
+inline void ZRendezvousPort::wait() {
+  _port.receive();
+}
+
+inline void ZRendezvousPort::ack() {
+  _port.ack();
+}
+
+#endif // SHARE_GC_Z_ZMESSAGEPORT_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMetronome.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zMetronome.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/timer.hpp"
+#include "utilities/ticks.hpp"
+
+ZMetronome::ZMetronome(uint64_t hz) :
+    _monitor(Monitor::leaf, "ZMetronome", false, Monitor::_safepoint_check_never),
+    _interval_ms(MILLIUNITS / hz),
+    _start_ms(0),
+    _nticks(0),
+    _stopped(false) {}
+
+uint64_t ZMetronome::nticks() const {
+  return _nticks;
+}
+
+bool ZMetronome::wait_for_tick() {
+  if (_nticks++ == 0) {
+    // First tick, set start time
+    const Ticks now = Ticks::now();
+    _start_ms = TimeHelper::counter_to_millis(now.value());
+  }
+
+  for (;;) {
+    // We might wake up spuriously from wait, so always recalculate
+    // the timeout after a wakeup to see if we need to wait again.
+    const Ticks now = Ticks::now();
+    const uint64_t now_ms = TimeHelper::counter_to_millis(now.value());
+    const uint64_t next_ms = _start_ms + (_interval_ms * _nticks);
+    const int64_t timeout_ms = next_ms - now_ms;
+
+    MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+    if (!_stopped && timeout_ms > 0) {
+      // Wait
+      ml.wait(Monitor::_no_safepoint_check_flag, timeout_ms);
+    } else {
+      // Tick
+      return !_stopped;
+    }
+  }
+}
+
+void ZMetronome::stop() {
+  MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+  _stopped = true;
+  ml.notify();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zMetronome.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZMETRONOME_HPP
+#define SHARE_GC_Z_ZMETRONOME_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+
+class ZMetronome : public StackObj {
+private:
+  Monitor        _monitor;
+  const uint64_t _interval_ms;
+  uint64_t       _start_ms;
+  uint64_t       _nticks;
+  bool           _stopped;
+
+public:
+  ZMetronome(uint64_t hz);
+
+  uint64_t nticks() const;
+  bool wait_for_tick();
+  void stop();
+};
+
+#endif // SHARE_GC_Z_ZMETRONOME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodTable.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "code/relocInfo.hpp"
+#include "code/nativeInst.hpp"
+#include "code/nmethod.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHash.inline.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+
+class ZNMethodWithImmediateOops {
+private:
+  nmethod* const _nm;
+  const size_t   _nimmediate_oops;
+
+  static size_t header_size();
+
+  ZNMethodWithImmediateOops(nmethod* nm, const GrowableArray<oop*>& immediate_oops);
+
+public:
+  static ZNMethodWithImmediateOops* create(nmethod* nm, const GrowableArray<oop*>& immediate_oops);
+  static void destroy(ZNMethodWithImmediateOops* nmi);
+
+  nmethod* method() const;
+  size_t immediate_oops_count() const;
+  oop** immediate_oops_begin() const;
+  oop** immediate_oops_begin_safe() const;
+  oop** immediate_oops_end() const;
+};
+
+size_t ZNMethodWithImmediateOops::header_size() {
+  const size_t size = sizeof(ZNMethodWithImmediateOops);
+  assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
+  return size;
+}
+
+ZNMethodWithImmediateOops::ZNMethodWithImmediateOops(nmethod* nm, const GrowableArray<oop*>& immediate_oops) :
+    _nm(nm),
+    _nimmediate_oops(immediate_oops.length()) {
+  // Save all immediate oops
+  for (size_t i = 0; i < _nimmediate_oops; i++) {
+    immediate_oops_begin()[i] = immediate_oops.at(i);
+  }
+}
+
+ZNMethodWithImmediateOops* ZNMethodWithImmediateOops::create(nmethod* nm, const GrowableArray<oop*>& immediate_oops) {
+  // Allocate memory for the ZNMethodWithImmediateOops object
+  // plus the immediate oop* array that follows right after.
+  const size_t size = header_size() + (sizeof(oop*) * immediate_oops.length());
+  void* const method_with_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
+  return ::new (method_with_immediate_oops) ZNMethodWithImmediateOops(nm, immediate_oops);
+}
+
+void ZNMethodWithImmediateOops::destroy(ZNMethodWithImmediateOops* nmi) {
+  FREE_C_HEAP_ARRAY(uint8_t, nmi);
+}
+
+nmethod* ZNMethodWithImmediateOops::method() const {
+  return _nm;
+}
+
+size_t ZNMethodWithImmediateOops::immediate_oops_count() const {
+  return _nimmediate_oops;
+}
+
+oop** ZNMethodWithImmediateOops::immediate_oops_begin() const {
+  // The immediate oop* array starts immediately after this object
+  return (oop**)((uintptr_t)this + header_size());
+}
+
+oop** ZNMethodWithImmediateOops::immediate_oops_begin_safe() const {
+  // Non-entrant nmethods have a jump instruction patched into the beginning
+  // of the verified entry point, which could have overwritten an immediate
+  // oop. If so, make sure we skip over that oop.
+  if (_nm->is_not_entrant()) {
+    oop* const first_immediate_oop = *immediate_oops_begin();
+    oop* const safe_begin = (oop*)(_nm->verified_entry_point() + NativeJump::instruction_size);
+    if (first_immediate_oop < safe_begin) {
+      // First immediate oop overwritten, skip it
+      return immediate_oops_begin() + 1;
+    }
+  }
+
+  // First immediate oop not overwritten
+  return immediate_oops_begin();
+}
+
+
+oop** ZNMethodWithImmediateOops::immediate_oops_end() const {
+  return immediate_oops_begin() + immediate_oops_count();
+}
+
+ZNMethodTableEntry* ZNMethodTable::_table = NULL;
+size_t ZNMethodTable::_size = 0;
+size_t ZNMethodTable::_nregistered = 0;
+size_t ZNMethodTable::_nunregistered = 0;
+volatile size_t ZNMethodTable::_claimed = 0;
+
+ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
+  GrowableArray<oop*> immediate_oops;
+  bool non_immediate_oops = false;
+
+  // Find all oops relocations
+  RelocIterator iter(nm);
+  while (iter.next()) {
+    if (iter.type() != relocInfo::oop_type) {
+      // Not an oop
+      continue;
+    }
+
+    oop_Relocation* r = iter.oop_reloc();
+
+    if (!r->oop_is_immediate()) {
+      // Non-immediate oop found
+      non_immediate_oops = true;
+      continue;
+    }
+
+    if (r->oop_value() != NULL) {
+      // Non-NULL immediate oop found. NULL oops can safely be
+      // ignored since the method will be re-registered if they
+      // are later patched to be non-NULL.
+      immediate_oops.push(r->oop_addr());
+    }
+  }
+
+  // oops_count() returns the number of oops in the oop table plus one
+  if (immediate_oops.is_empty() && nm->oops_count() == 1) {
+    // No oops found, return empty entry
+    return ZNMethodTableEntry();
+  }
+
+  if (immediate_oops.is_empty()) {
+    // No immediate oops found, return entry without immediate oops
+    return ZNMethodTableEntry(nm, non_immediate_oops);
+  }
+
+  // Return entry with immediate oops
+  return ZNMethodTableEntry(ZNMethodWithImmediateOops::create(nm, immediate_oops), non_immediate_oops);
+}
+
+void ZNMethodTable::destroy_entry(ZNMethodTableEntry entry) {
+  if (entry.immediate_oops()) {
+    ZNMethodWithImmediateOops::destroy(entry.method_with_immediate_oops());
+  }
+}
+
+nmethod* ZNMethodTable::method(ZNMethodTableEntry entry) {
+  return entry.immediate_oops() ? entry.method_with_immediate_oops()->method() : entry.method();
+}
+
+size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
+  assert(is_power_of_2(size), "Invalid size");
+  const size_t mask = size - 1;
+  const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
+  return hash & mask;
+}
+
+size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
+  assert(is_power_of_2(size), "Invalid size");
+  const size_t mask = size - 1;
+  return (prev_index + 1) & mask;
+}
+
+bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
+  const nmethod* const nm = method(entry);
+  size_t index = first_index(nm, size);
+
+  for (;;) {
+    const ZNMethodTableEntry table_entry = table[index];
+
+    if (!table_entry.registered() && !table_entry.unregistered()) {
+      // Insert new entry
+      table[index] = entry;
+      return true;
+    }
+
+    if (table_entry.registered() && method(table_entry) == nm) {
+      // Replace existing entry
+      destroy_entry(table_entry);
+      table[index] = entry;
+      return false;
+    }
+
+    index = next_index(index, size);
+  }
+}
+
+bool ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, const nmethod* nm) {
+  if (size == 0) {
+    // Table is empty
+    return false;
+  }
+
+  size_t index = first_index(nm, size);
+
+  for (;;) {
+    const ZNMethodTableEntry table_entry = table[index];
+
+    if (!table_entry.registered() && !table_entry.unregistered()) {
+      // Entry not found
+      return false;
+    }
+
+    if (table_entry.registered() && method(table_entry) == nm) {
+      // Remove entry
+      destroy_entry(table_entry);
+      table[index] = ZNMethodTableEntry(true /* unregistered */);
+      return true;
+    }
+
+    index = next_index(index, size);
+  }
+}
+
+void ZNMethodTable::rebuild(size_t new_size) {
+  assert(is_power_of_2(new_size), "Invalid size");
+
+  log_debug(gc, nmethod)("Rebuilding NMethod Table: "
+                         SIZE_FORMAT "->" SIZE_FORMAT " entries, "
+                         SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
+                         SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
+                         _size, new_size,
+                         _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
+                         _nunregistered, percent_of(_nunregistered, _size), 0.0);
+
+  // Allocate new table
+  ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
+
+  // Transfer all registered entries
+  for (size_t i = 0; i < _size; i++) {
+    const ZNMethodTableEntry entry = _table[i];
+    if (entry.registered()) {
+      register_entry(new_table, new_size, entry);
+    }
+  }
+
+  // Delete old table
+  delete [] _table;
+
+  // Install new table
+  _table = new_table;
+  _size = new_size;
+  _nunregistered = 0;
+}
+
+void ZNMethodTable::rebuild_if_needed() {
+  // The hash table uses linear probing. To avoid wasting memory while
+  // at the same time maintaining good hash collision behavior we want
+  // to keep the table occupancy between 30% and 70%. The table always
+  // grows/shrinks by doubling/halving its size. Pruning of unregistered
+  // entries is done by rebuilding the table with or without resizing it.
+  const size_t min_size = 1024;
+  const size_t shrink_threshold = _size * 0.30;
+  const size_t prune_threshold = _size * 0.65;
+  const size_t grow_threshold = _size * 0.70;
+
+  if (_size == 0) {
+    // Initialize table
+    rebuild(min_size);
+  } else if (_nregistered < shrink_threshold && _size > min_size) {
+    // Shrink table
+    rebuild(_size / 2);
+  } else if (_nregistered + _nunregistered > grow_threshold) {
+    // Prune or grow table
+    if (_nregistered < prune_threshold) {
+      // Prune table
+      rebuild(_size);
+    } else {
+      // Grow table
+      rebuild(_size * 2);
+    }
+  }
+}
+
+void ZNMethodTable::log_register(const nmethod* nm, ZNMethodTableEntry entry) {
+  LogTarget(Trace, gc, nmethod) log;
+  if (!log.is_enabled()) {
+    return;
+  }
+
+  log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
+            "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
+            nm->method()->method_holder()->external_name(),
+            nm->method()->name()->as_C_string(),
+            p2i(nm),
+            nm->compiler_name(),
+            nm->oops_count() - 1,
+            entry.immediate_oops() ? entry.method_with_immediate_oops()->immediate_oops_count() : 0,
+            BOOL_TO_STR(entry.non_immediate_oops()));
+
+  LogTarget(Trace, gc, nmethod, oops) log_oops;
+  if (!log_oops.is_enabled()) {
+    return;
+  }
+
+  // Print nmethod oops table
+  oop* const begin = nm->oops_begin();
+  oop* const end = nm->oops_end();
+  for (oop* p = begin; p < end; p++) {
+    log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
+                   (p - begin), p2i(*p), (*p)->klass()->external_name());
+  }
+
+  if (entry.immediate_oops()) {
+    // Print nmethod immediate oops
+    const ZNMethodWithImmediateOops* const nmi = entry.method_with_immediate_oops();
+    oop** const begin = nmi->immediate_oops_begin();
+    oop** const end = nmi->immediate_oops_end();
+    for (oop** p = begin; p < end; p++) {
+      log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
+                     (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
+    }
+  }
+}
+
+void ZNMethodTable::log_unregister(const nmethod* nm) {
+  LogTarget(Debug, gc, nmethod) log;
+  if (!log.is_enabled()) {
+    return;
+  }
+
+  log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
+            nm->method()->method_holder()->external_name(),
+            nm->method()->name()->as_C_string(),
+            p2i(nm));
+}
+
+size_t ZNMethodTable::registered_nmethods() {
+  return _nregistered;
+}
+
+size_t ZNMethodTable::unregistered_nmethods() {
+  return _nunregistered;
+}
+
+void ZNMethodTable::register_nmethod(nmethod* nm) {
+  ResourceMark rm;
+
+  // Create entry
+  const ZNMethodTableEntry entry = create_entry(nm);
+
+  log_register(nm, entry);
+
+  if (!entry.registered()) {
+    // Method doesn't have any oops, ignore it
+    return;
+  }
+
+  // Grow/Shrink/Prune table if needed
+  rebuild_if_needed();
+
+  // Insert new entry
+  if (register_entry(_table, _size, entry)) {
+    // New entry registered. When register_entry() instead returns
+    // false the nmethod was already in the table so we do not want
+    // to increase number of registered entries in that case.
+    _nregistered++;
+  }
+}
+
+void ZNMethodTable::unregister_nmethod(nmethod* nm) {
+  ResourceMark rm;
+
+  log_unregister(nm);
+
+  // Remove entry
+  if (unregister_entry(_table, _size, nm)) {
+    // Entry was unregistered. When unregister_entry() instead returns
+    // false the nmethod was not in the table (because it didn't have
+    // any oops) so we do not want to decrease the number of registered
+    // entries in that case.
+    _nregistered--;
+    _nunregistered++;
+  }
+}
+
+void ZNMethodTable::gc_prologue() {
+  _claimed = 0;
+}
+
+void ZNMethodTable::gc_epilogue() {
+  assert(_claimed >= _size, "Failed to claim all table entries");
+}
+
+void ZNMethodTable::entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl) {
+  nmethod* const nm = method(entry);
+  if (!nm->is_alive()) {
+    // No need to visit oops
+    return;
+  }
+
+  // Process oops table
+  oop* const begin = nm->oops_begin();
+  oop* const end = nm->oops_end();
+  for (oop* p = begin; p < end; p++) {
+    if (*p != Universe::non_oop_word()) {
+      cl->do_oop(p);
+    }
+  }
+
+  if (entry.immediate_oops()) {
+    // Process immediate oops
+    const ZNMethodWithImmediateOops* const nmi = entry.method_with_immediate_oops();
+    oop** const begin = nmi->immediate_oops_begin_safe();
+    oop** const end = nmi->immediate_oops_end();
+    for (oop** p = begin; p < end; p++) {
+      cl->do_oop(*p);
+    }
+  }
+
+  if (entry.non_immediate_oops()) {
+    // Process non-immediate oops
+    nm->fix_oop_relocations();
+  }
+}
+
+void ZNMethodTable::oops_do(OopClosure* cl) {
+  for (;;) {
+    // Claim table partition. Each partition is currently sized to span
+    // two cache lines. This number is just a guess, but seems to work well.
+    const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
+    const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _size);
+    const size_t partition_end = MIN2(partition_start + partition_size, _size);
+    if (partition_start == partition_end) {
+      // End of table
+      break;
+    }
+
+    // Process table partition
+    for (size_t i = partition_start; i < partition_end; i++) {
+      const ZNMethodTableEntry entry = _table[i];
+      if (entry.registered()) {
+        entry_oops_do(entry, cl);
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodTable.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZNMETHODTABLE_HPP
+#define SHARE_GC_Z_ZNMETHODTABLE_HPP
+
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zNMethodTableEntry.hpp"
+#include "memory/allocation.hpp"
+
+class ZNMethodTable : public AllStatic {
+private:
+  static ZNMethodTableEntry* _table;
+  static size_t              _size;
+  static size_t              _nregistered;
+  static size_t              _nunregistered;
+  static volatile size_t     _claimed ATTRIBUTE_ALIGNED(ZCacheLineSize);
+
+  static ZNMethodTableEntry create_entry(nmethod* nm);
+  static void destroy_entry(ZNMethodTableEntry entry);
+
+  static nmethod* method(ZNMethodTableEntry entry);
+
+  static size_t first_index(const nmethod* nm, size_t size);
+  static size_t next_index(size_t prev_index, size_t size);
+
+  static bool register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry);
+  static bool unregister_entry(ZNMethodTableEntry* table, size_t size, const nmethod* nm);
+
+  static void rebuild(size_t new_size);
+  static void rebuild_if_needed();
+
+  static void log_register(const nmethod* nm, ZNMethodTableEntry entry);
+  static void log_unregister(const nmethod* nm);
+
+  static void entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl);
+
+public:
+  static size_t registered_nmethods();
+  static size_t unregistered_nmethods();
+
+  static void register_nmethod(nmethod* nm);
+  static void unregister_nmethod(nmethod* nm);
+
+  static void gc_prologue();
+  static void gc_epilogue();
+
+  static void oops_do(OopClosure* cl);
+};
+
+#endif // SHARE_GC_Z_ZNMETHODTABLE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodTableEntry.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZNMETHODTABLEENTRY_HPP
+#define SHARE_GC_Z_ZNMETHODTABLEENTRY_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+
+//
+// NMethod table entry layout
+// --------------------------
+//
+//   6
+//   3                                                                  3 2 1 0
+//  +--------------------------------------------------------------------+-+-+-+
+//  |11111111 11111111 11111111 11111111 11111111 11111111 11111111 11111|1|1|1|
+//  +--------------------------------------------------------------------+-+-+-+
+//  |                                                                    | | |
+//  |                               2-2 Non-immediate Oops Flag (1-bits) * | |
+//  |                                                                      | |
+//  |                        1-1 Immediate Oops/Unregistered Flag (1-bits) * |
+//  |                                                                        |
+//  |                                           0-0 Registered Flag (1-bits) *
+//  |
+//  * 63-3 NMethod/ZNMethodWithImmediateOops Address (61-bits)
+//
+
+class nmethod;
+class ZNMethodWithImmediateOops;
+
+class ZNMethodTableEntry : public CHeapObj<mtGC> {
+private:
+  typedef ZBitField<uint64_t, bool,                       0,  1>    field_registered;
+  typedef ZBitField<uint64_t, bool,                       1,  1>    field_unregistered;
+  typedef ZBitField<uint64_t, bool,                       1,  1>    field_immediate_oops;
+  typedef ZBitField<uint64_t, bool,                       2,  1>    field_non_immediate_oops;
+  typedef ZBitField<uint64_t, nmethod*,                   3, 61, 3> field_method;
+  typedef ZBitField<uint64_t, ZNMethodWithImmediateOops*, 3, 61, 3> field_method_with_immediate_oops;
+
+  uint64_t _entry;
+
+public:
+  ZNMethodTableEntry(bool unregistered = false) :
+      _entry(field_unregistered::encode(unregistered) |
+             field_registered::encode(false)) {}
+
+  ZNMethodTableEntry(nmethod* method, bool non_immediate_oops) :
+      _entry(field_method::encode(method) |
+             field_non_immediate_oops::encode(non_immediate_oops) |
+             field_immediate_oops::encode(false) |
+             field_registered::encode(true)) {}
+
+  ZNMethodTableEntry(ZNMethodWithImmediateOops* method_with_immediate_oops, bool non_immediate_oops) :
+      _entry(field_method_with_immediate_oops::encode(method_with_immediate_oops) |
+             field_non_immediate_oops::encode(non_immediate_oops) |
+             field_immediate_oops::encode(true) |
+             field_registered::encode(true)) {}
+
+  bool registered() const {
+    return field_registered::decode(_entry);
+  }
+
+  bool unregistered() const {
+    return field_unregistered::decode(_entry);
+  }
+
+  bool immediate_oops() const {
+    return field_immediate_oops::decode(_entry);
+  }
+
+  bool non_immediate_oops() const {
+    return field_non_immediate_oops::decode(_entry);
+  }
+
+  nmethod* method() const {
+    return field_method::decode(_entry);
+  }
+
+  ZNMethodWithImmediateOops* method_with_immediate_oops() const {
+    return field_method_with_immediate_oops::decode(_entry);
+  }
+};
+
+#endif // SHARE_GC_Z_ZNMETHODTABLEENTRY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNUMA.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "gc/z/zNUMA.hpp"
+#include "logging/log.hpp"
+#include "runtime/os.hpp"
+
+bool ZNUMA::_enabled;
+
+void ZNUMA::initialize() {
+  initialize_platform();
+
+  log_info(gc, init)("NUMA Support: %s", to_string());
+  if (is_enabled()) {
+    log_info(gc, init)("NUMA Nodes: %u", count());
+  }
+}
+
+bool ZNUMA::is_enabled() {
+  return _enabled;
+}
+
+void ZNUMA::memory_interleave(uintptr_t addr, size_t size) {
+  if (!_enabled) {
+    // NUMA support not enabled
+    return;
+  }
+
+  os::numa_make_global((char*)addr, size);
+}
+
+const char* ZNUMA::to_string() {
+  return _enabled ? "Enabled" : "Disabled";
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNUMA.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZNUMA_HPP
+#define SHARE_GC_Z_ZNUMA_HPP
+
+#include "memory/allocation.hpp"
+
+class ZNUMA : public AllStatic {
+private:
+  static bool _enabled;
+
+  static void initialize_platform();
+
+public:
+  static void initialize();
+  static bool is_enabled();
+
+  static uint32_t count();
+  static uint32_t id();
+
+  static uint32_t memory_id(uintptr_t addr);
+  static void memory_interleave(uintptr_t addr, size_t size);
+
+  static const char* to_string();
+};
+
+#endif // SHARE_GC_Z_ZNUMA_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zObjectAllocator.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zThread.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/threadSMR.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
+static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
+static const ZStatSubPhase ZSubPhasePauseRetireTLABS("Pause Retire TLABS");
+static const ZStatSubPhase ZSubPhasePauseRemapTLABS("Pause Remap TLABS");
+
+ZObjectAllocator::ZObjectAllocator(uint nworkers) :
+    _nworkers(nworkers),
+    _used(0),
+    _shared_medium_page(NULL),
+    _shared_small_page(NULL),
+    _worker_small_page(NULL) {}
+
+ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
+  ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
+  if (page != NULL) {
+    // Increment used bytes
+    Atomic::add(size, _used.addr());
+  }
+
+  return page;
+}
+
+uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
+                                                        uint8_t page_type,
+                                                        size_t page_size,
+                                                        size_t size,
+                                                        ZAllocationFlags flags) {
+  uintptr_t addr = 0;
+  ZPage* page = *shared_page;
+
+  if (page != NULL) {
+    addr = page->alloc_object_atomic(size);
+  }
+
+  if (addr == 0) {
+    // Allocate new page
+    ZPage* const new_page = alloc_page(page_type, page_size, flags);
+    if (new_page != NULL) {
+      // Allocate object before installing the new page
+      addr = new_page->alloc_object(size);
+
+    retry:
+      // Install new page
+      ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
+      if (prev_page != page) {
+        if (prev_page == NULL) {
+          // Previous page was retired, retry installing the new page
+          page = prev_page;
+          goto retry;
+        }
+
+        // Another page already installed, try allocation there first
+        const uintptr_t prev_addr = prev_page->alloc_object_atomic(size);
+        if (prev_addr == 0) {
+          // Allocation failed, retry installing the new page
+          page = prev_page;
+          goto retry;
+        }
+
+        // Allocation succeeded in already installed page
+        addr = prev_addr;
+
+        // Undo new page allocation
+        ZHeap::heap()->undo_alloc_page(new_page);
+      }
+    }
+  }
+
+  return addr;
+}
+
+uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
+  assert(ZThread::is_java(), "Should be a Java thread");
+
+  uintptr_t addr = 0;
+
+  // Allocate new large page
+  const size_t page_size = align_up(size, ZPageSizeMin);
+  ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
+  if (page != NULL) {
+    // Allocate the object
+    addr = page->alloc_object(size);
+  }
+
+  return addr;
+}
+
+uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
+  return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
+}
+
+uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
+  assert(ZThread::is_java() || ZThread::is_vm(), "Should be a Java or VM thread");
+
+  if (flags.relocation() && flags.java_thread() && UseTLAB) {
+    // For relocations from Java threads, try TLAB allocation first
+    const uintptr_t addr = (uintptr_t)Thread::current()->tlab().allocate(ZUtils::bytes_to_words(size));
+    if (addr != 0) {
+      return addr;
+    }
+  }
+
+  // Non-worker small page allocation can never use the reserve
+  flags.set_no_reserve();
+
+  return alloc_object_in_shared_page(_shared_small_page.addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
+}
+
+uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
+  assert(ZThread::is_worker(), "Should be a worker thread");
+
+  ZPage* page = _worker_small_page.get();
+  uintptr_t addr = 0;
+
+  if (page != NULL) {
+    addr = page->alloc_object(size);
+  }
+
+  if (addr == 0) {
+    // Allocate new page
+    page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
+    if (page != NULL) {
+      addr = page->alloc_object(size);
+    }
+    _worker_small_page.set(page);
+  }
+
+  return addr;
+}
+
+uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
+  if (flags.worker_thread()) {
+    return alloc_small_object_from_worker(size, flags);
+  } else {
+    return alloc_small_object_from_nonworker(size, flags);
+  }
+}
+
+uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
+  if (size <= ZObjectSizeLimitSmall) {
+    // Small
+    return alloc_small_object(size, flags);
+  } else if (size <= ZObjectSizeLimitMedium) {
+    // Medium
+    return alloc_medium_object(size, flags);
+  } else {
+    // Large
+    return alloc_large_object(size, flags);
+  }
+}
+
+uintptr_t ZObjectAllocator::alloc_object(size_t size) {
+  assert(ZThread::is_java(), "Must be a Java thread");
+
+  ZAllocationFlags flags;
+  flags.set_java_thread();
+  flags.set_no_reserve();
+
+  if (!ZStallOnOutOfMemory) {
+    flags.set_non_blocking();
+  }
+
+  return alloc_object(size, flags);
+}
+
+uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
+  assert(ZThread::is_java() || ZThread::is_worker() || ZThread::is_vm(), "Unknown thread");
+
+  ZAllocationFlags flags;
+  flags.set_relocation();
+  flags.set_non_blocking();
+
+  if (ZThread::is_worker()) {
+    flags.set_worker_thread();
+  } else if (ZThread::is_java()) {
+    flags.set_java_thread();
+  }
+
+  return alloc_object(size, flags);
+}
+
+bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
+  assert(page->type() == ZPageTypeLarge, "Invalid page type");
+
+  // Undo page allocation
+  ZHeap::heap()->undo_alloc_page(page);
+  return true;
+}
+
+bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
+  assert(page->type() == ZPageTypeMedium, "Invalid page type");
+
+  // Try atomic undo on shared page
+  return page->undo_alloc_object_atomic(addr, size);
+}
+
+bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
+  assert(page->type() == ZPageTypeSmall, "Invalid page type");
+
+  if (ZThread::is_java()) {
+    // Try undo allocation in TLAB
+    if (Thread::current()->tlab().undo_allocate((HeapWord*)addr, ZUtils::bytes_to_words(size))) {
+      return true;
+    }
+  }
+
+  // Try atomic undo on shared page
+  return page->undo_alloc_object_atomic(addr, size);
+}
+
+bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
+  assert(page->type() == ZPageTypeSmall, "Invalid page type");
+  assert(page == _worker_small_page.get(), "Invalid page");
+
+  // Non-atomic undo on worker-local page
+  const bool success = page->undo_alloc_object(addr, size);
+  assert(success, "Should always succeed");
+  return success;
+}
+
+bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
+  if (ZThread::is_worker()) {
+    return undo_alloc_small_object_from_worker(page, addr, size);
+  } else {
+    return undo_alloc_small_object_from_nonworker(page, addr, size);
+  }
+}
+
+bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
+  const uint8_t type = page->type();
+
+  if (type == ZPageTypeSmall) {
+    return undo_alloc_small_object(page, addr, size);
+  } else if (type == ZPageTypeMedium) {
+    return undo_alloc_medium_object(page, addr, size);
+  } else {
+    return undo_alloc_large_object(page);
+  }
+}
+
+void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
+  if (undo_alloc_object(page, addr, size)) {
+    ZStatInc(ZCounterUndoObjectAllocationSucceeded);
+  } else {
+    ZStatInc(ZCounterUndoObjectAllocationFailed);
+    log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
+                  addr, size, ZThread::id(), ZThread::name());
+  }
+}
+
+size_t ZObjectAllocator::used() const {
+  size_t total_used = 0;
+
+  ZPerCPUConstIterator<size_t> iter(&_used);
+  for (const size_t* cpu_used; iter.next(&cpu_used);) {
+    total_used += *cpu_used;
+  }
+
+  return total_used;
+}
+
+size_t ZObjectAllocator::remaining() const {
+  assert(ZThread::is_java(), "Should be a Java thread");
+
+  ZPage* page = _shared_small_page.get();
+  if (page != NULL) {
+    return page->remaining();
+  }
+
+  return 0;
+}
+
+void ZObjectAllocator::retire_tlabs() {
+  ZStatTimer timer(ZSubPhasePauseRetireTLABS);
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+  // Retire TLABs
+  if (UseTLAB) {
+    ZCollectedHeap* heap = ZCollectedHeap::heap();
+    heap->accumulate_statistics_all_tlabs();
+    heap->ensure_parsability(true /* retire_tlabs */);
+    heap->resize_all_tlabs();
+  }
+
+  // Reset used
+  _used.set_all(0);
+
+  // Reset allocation pages
+  _shared_medium_page.set(NULL);
+  _shared_small_page.set_all(NULL);
+  _worker_small_page.set_all(NULL);
+}
+
+static void remap_tlab_address(HeapWord** p) {
+  *p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
+}
+
+void ZObjectAllocator::remap_tlabs() {
+  ZStatTimer timer(ZSubPhasePauseRemapTLABS);
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+
+  if (UseTLAB) {
+    for (JavaThreadIteratorWithHandle iter; JavaThread* thread = iter.next(); ) {
+      thread->tlab().addresses_do(remap_tlab_address);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zObjectAllocator.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZOBJECTALLOCATOR_HPP
+#define SHARE_GC_Z_ZOBJECTALLOCATOR_HPP
+
+#include "gc/z/zAllocationFlags.hpp"
+#include "gc/z/zPage.hpp"
+#include "gc/z/zValue.hpp"
+#include "memory/allocation.hpp"
+
+class ZObjectAllocator {
+private:
+  const uint         _nworkers;
+  ZPerCPU<size_t>    _used;
+  ZContended<ZPage*> _shared_medium_page;
+  ZPerCPU<ZPage*>    _shared_small_page;
+  ZPerWorker<ZPage*> _worker_small_page;
+
+  ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
+
+  // Allocate an object in a shared page. Allocate and
+  // atomically install a new page if neccesary.
+  uintptr_t alloc_object_in_shared_page(ZPage** shared_page,
+                                        uint8_t page_type,
+                                        size_t page_size,
+                                        size_t size,
+                                        ZAllocationFlags flags);
+
+  uintptr_t alloc_large_object(size_t size, ZAllocationFlags flags);
+  uintptr_t alloc_medium_object(size_t size, ZAllocationFlags flags);
+  uintptr_t alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags);
+  uintptr_t alloc_small_object_from_worker(size_t size, ZAllocationFlags flags);
+  uintptr_t alloc_small_object(size_t size, ZAllocationFlags flags);
+  uintptr_t alloc_object(size_t size, ZAllocationFlags flags);
+
+  bool undo_alloc_large_object(ZPage* page);
+  bool undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size);
+  bool undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size);
+  bool undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size);
+  bool undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size);
+  bool undo_alloc_object(ZPage* page, uintptr_t addr, size_t size);
+
+public:
+  ZObjectAllocator(uint nworkers);
+
+  uintptr_t alloc_object(size_t size);
+
+  uintptr_t alloc_object_for_relocation(size_t size);
+  void undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size);
+
+  size_t used() const;
+  size_t remaining() const;
+
+  void retire_tlabs();
+  void remap_tlabs();
+};
+
+#endif // SHARE_GC_Z_ZOBJECTALLOCATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zOop.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZOOP_HPP
+#define SHARE_GC_Z_ZOOP_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+
+class ZOop : public AllStatic {
+public:
+  static oop to_oop(uintptr_t value);
+  static uintptr_t to_address(oop o);
+
+  static bool is_good(oop o);
+  static bool is_good_or_null(oop o);
+
+  static oop good(oop);
+};
+
+#endif // SHARE_GC_Z_ZOOP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zOop.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZOOP_INLINE_HPP
+#define SHARE_GC_Z_ZOOP_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zOop.hpp"
+#include "oops/oopsHierarchy.hpp"
+
+inline oop ZOop::to_oop(uintptr_t value) {
+  return cast_to_oop(value);
+}
+
+inline uintptr_t ZOop::to_address(oop o) {
+  return cast_from_oop<uintptr_t>(o);
+}
+
+inline bool ZOop::is_good(oop o) {
+  return ZAddress::is_good(to_address(o));
+}
+
+inline bool ZOop::is_good_or_null(oop o) {
+  return ZAddress::is_good_or_null(to_address(o));
+}
+
+inline oop ZOop::good(oop o) {
+  return to_oop(ZAddress::good(to_address(o)));
+}
+
+#endif // SHARE_GC_Z_ZOOP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zOopClosures.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+static void z_verify_loaded_object(const oop* p, const oop obj) {
+  guarantee(ZOop::is_good_or_null(obj),
+            "Bad oop " PTR_FORMAT " found at " PTR_FORMAT ", expected " PTR_FORMAT,
+            p2i(obj), p2i(p), p2i(ZOop::good(obj)));
+  guarantee(oopDesc::is_oop_or_null(obj),
+            "Bad object " PTR_FORMAT " found at " PTR_FORMAT,
+            p2i(obj), p2i(p));
+}
+
+ZVerifyHeapOopClosure::ZVerifyHeapOopClosure(oop base)
+    : _base(base) {}
+
+void ZVerifyHeapOopClosure::do_oop(oop* p) {
+  guarantee(ZHeap::heap()->is_in((uintptr_t)p), "oop* " PTR_FORMAT " not in heap", p2i(p));
+
+  const oop obj = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
+  z_verify_loaded_object(p, obj);
+}
+
+void ZVerifyHeapOopClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+void ZVerifyRootOopClosure::do_oop(oop* p) {
+  guarantee(!ZHeap::heap()->is_in((uintptr_t)p), "oop* " PTR_FORMAT " in heap", p2i(p));
+
+  const oop obj = RootAccess<>::oop_load(p);
+  z_verify_loaded_object(p, obj);
+}
+
+void ZVerifyRootOopClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+void ZVerifyObjectClosure::do_object(oop o) {
+  ZVerifyHeapOopClosure cl(o);
+  o->oop_iterate(&cl);
+}
+
+// Generate Z specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zOopClosures.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZOOPCLOSURES_HPP
+#define SHARE_GC_Z_ZOOPCLOSURES_HPP
+
+#include "memory/iterator.hpp"
+
+class ZLoadBarrierOopClosure : public ExtendedOopClosure {
+public:
+  void do_oop_nv(oop* p);
+  void do_oop_nv(narrowOop* p);
+
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+
+#ifdef ASSERT
+  virtual bool should_verify_oops() {
+    return false;
+  }
+#endif
+};
+
+class ZMarkRootOopClosure : public OopClosure {
+public:
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
+class ZRelocateRootOopClosure : public OopClosure {
+public:
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
+template <bool finalizable>
+class ZMarkBarrierOopClosure : public ExtendedOopClosure {
+public:
+  ZMarkBarrierOopClosure();
+
+  void do_oop_nv(oop* p);
+  void do_oop_nv(narrowOop* p);
+
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+
+#ifdef ASSERT
+  virtual bool should_verify_oops() {
+    return false;
+  }
+#endif
+};
+
+class ZPhantomIsAliveObjectClosure : public BoolObjectClosure {
+public:
+  virtual bool do_object_b(oop o);
+};
+
+class ZPhantomKeepAliveOopClosure : public OopClosure {
+public:
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
+class ZPhantomCleanOopClosure : public OopClosure {
+public:
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
+class ZVerifyHeapOopClosure : public ExtendedOopClosure {
+private:
+  const oop _base;
+
+public:
+  ZVerifyHeapOopClosure(oop base);
+
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+
+#ifdef ASSERT
+  // Verification handled by the closure itself.
+  virtual bool should_verify_oops() {
+    return false;
+  }
+#endif
+};
+
+class ZVerifyRootOopClosure : public OopClosure {
+public:
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
+class ZVerifyObjectClosure : public ObjectClosure {
+public:
+  virtual void do_object(oop o);
+};
+
+#endif // SHARE_GC_Z_ZOOPCLOSURES_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zOopClosures.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZOOPCLOSURES_INLINE_HPP
+#define SHARE_GC_Z_ZOOPCLOSURES_INLINE_HPP
+
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zOopClosures.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+
+inline void ZLoadBarrierOopClosure::do_oop_nv(oop* p) {
+  ZBarrier::load_barrier_on_oop_field(p);
+}
+
+inline void ZLoadBarrierOopClosure::do_oop_nv(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+inline void ZLoadBarrierOopClosure::do_oop(oop* p) {
+  do_oop_nv(p);
+}
+
+inline void ZLoadBarrierOopClosure::do_oop(narrowOop* p) {
+  do_oop_nv(p);
+}
+
+inline void ZMarkRootOopClosure::do_oop(oop* p) {
+  ZBarrier::mark_barrier_on_root_oop_field(p);
+}
+
+inline void ZMarkRootOopClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+inline void ZRelocateRootOopClosure::do_oop(oop* p) {
+  ZBarrier::relocate_barrier_on_root_oop_field(p);
+}
+
+inline void ZRelocateRootOopClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+template <bool finalizable>
+inline ZMarkBarrierOopClosure<finalizable>::ZMarkBarrierOopClosure() :
+    ExtendedOopClosure(finalizable ? NULL : ZHeap::heap()->reference_discoverer()) {}
+
+template <bool finalizable>
+inline void ZMarkBarrierOopClosure<finalizable>::do_oop_nv(oop* p) {
+  ZBarrier::mark_barrier_on_oop_field(p, finalizable);
+}
+
+template <bool finalizable>
+inline void ZMarkBarrierOopClosure<finalizable>::do_oop_nv(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+template <bool finalizable>
+inline void ZMarkBarrierOopClosure<finalizable>::do_oop(oop* p) {
+  do_oop_nv(p);
+}
+
+template <bool finalizable>
+inline void ZMarkBarrierOopClosure<finalizable>::do_oop(narrowOop* p) {
+  do_oop_nv(p);
+}
+
+inline bool ZPhantomIsAliveObjectClosure::do_object_b(oop o) {
+  return ZBarrier::is_alive_barrier_on_phantom_oop(o);
+}
+
+inline void ZPhantomKeepAliveOopClosure::do_oop(oop* p) {
+  ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
+}
+
+inline void ZPhantomKeepAliveOopClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+inline void ZPhantomCleanOopClosure::do_oop(oop* p) {
+  // Read the oop once, to make sure the liveness check
+  // and the later clearing uses the same value.
+  const oop obj = *(volatile oop*)p;
+  if (ZBarrier::is_alive_barrier_on_phantom_oop(obj)) {
+    ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
+  } else {
+    // The destination could have been modified/reused, in which case
+    // we don't want to clear it. However, no one could write the same
+    // oop here again (the object would be strongly live and we would
+    // not consider clearing such oops), so therefore we don't have an
+    // ABA problem here.
+    Atomic::cmpxchg(oop(NULL), p, obj);
+  }
+}
+
+inline void ZPhantomCleanOopClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+#endif // SHARE_GC_Z_ZOOPCLOSURES_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPage.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zForwardingTable.inline.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zLiveMap.inline.hpp"
+#include "gc/z/zMark.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zThread.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/orderAccess.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+static const ZStatCounter ZCounterRelocationContention("Contention", "Relocation Contention", ZStatUnitOpsPerSecond);
+
+ZPage::ZPage(uint8_t type, ZVirtualMemory vmem, ZPhysicalMemory pmem) :
+    _type(type),
+    _pinned(0),
+    _numa_id((uint8_t)-1),
+    _seqnum(0),
+    _virtual(vmem),
+    _top(start()),
+    _livemap(object_max_count()),
+    _refcount(0),
+    _forwarding(),
+    _physical(pmem) {
+  assert(!_physical.is_null(), "Should not be null");
+  assert(!_virtual.is_null(), "Should not be null");
+  assert((type == ZPageTypeSmall && size() == ZPageSizeSmall) ||
+         (type == ZPageTypeMedium && size() == ZPageSizeMedium) ||
+         (type == ZPageTypeLarge && is_aligned(size(), ZPageSizeMin)),
+         "Page type/size mismatch");
+}
+
+ZPage::~ZPage() {
+  assert(!is_active(), "Should not be active");
+  assert(is_detached(), "Should be detached");
+}
+
+void ZPage::reset() {
+  assert(!is_active(), "Should not be active");
+  assert(!is_pinned(), "Should not be pinned");
+  assert(!is_detached(), "Should not be detached");
+
+  _seqnum = ZGlobalSeqNum;
+  _top = start();
+  _livemap.reset();
+
+  // Make sure we don't make the page active before
+  // the reset of the above fields are visible.
+  OrderAccess::storestore();
+
+  _refcount = 1;
+}
+
+uintptr_t ZPage::relocate_object_inner(uintptr_t from_index, uintptr_t from_offset) {
+  ZForwardingTableCursor cursor;
+
+  // Lookup address in forwarding table
+  const ZForwardingTableEntry entry = _forwarding.find(from_index, &cursor);
+  if (entry.from_index() == from_index) {
+    // Already relocated, return new address
+    return entry.to_offset();
+  }
+
+  // Not found in forwarding table, relocate object
+  assert(is_object_marked(from_offset), "Should be marked");
+
+  if (is_pinned()) {
+    // In-place forward
+    return _forwarding.insert(from_index, from_offset, &cursor);
+  }
+
+  // Allocate object
+  const uintptr_t from_good = ZAddress::good(from_offset);
+  const size_t size = ZUtils::object_size(from_good);
+  const uintptr_t to_good = ZHeap::heap()->alloc_object_for_relocation(size);
+  if (to_good == 0) {
+    // Failed, in-place forward
+    return _forwarding.insert(from_index, from_offset, &cursor);
+  }
+
+  // Copy object
+  ZUtils::object_copy(from_good, to_good, size);
+
+  // Update forwarding table
+  const uintptr_t to_offset = ZAddress::offset(to_good);
+  const uintptr_t to_offset_final = _forwarding.insert(from_index, to_offset, &cursor);
+  if (to_offset_final == to_offset) {
+    // Relocation succeeded
+    return to_offset;
+  }
+
+  // Relocation contention
+  ZStatInc(ZCounterRelocationContention);
+  log_trace(gc)("Relocation contention, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT
+                ", entry: " SIZE_FORMAT ", oop: " PTR_FORMAT ", size: " SIZE_FORMAT,
+                ZThread::id(), ZThread::name(), p2i(this), cursor, from_good, size);
+
+  // Try undo allocation
+  ZHeap::heap()->undo_alloc_object_for_relocation(to_good, size);
+
+  return to_offset_final;
+}
+
+uintptr_t ZPage::relocate_object(uintptr_t from) {
+  assert(ZHeap::heap()->is_relocating(from), "Should be relocating");
+
+  const uintptr_t from_offset = ZAddress::offset(from);
+  const uintptr_t from_index = (from_offset - start()) >> object_alignment_shift();
+  const uintptr_t to_offset = relocate_object_inner(from_index, from_offset);
+  if (from_offset == to_offset) {
+    // In-place forwarding, pin page
+    set_pinned();
+  }
+
+  return ZAddress::good(to_offset);
+}
+
+uintptr_t ZPage::forward_object(uintptr_t from) {
+  assert(ZHeap::heap()->is_relocating(from), "Should be relocated");
+
+  // Lookup address in forwarding table
+  const uintptr_t from_offset = ZAddress::offset(from);
+  const uintptr_t from_index = (from_offset - start()) >> object_alignment_shift();
+  const ZForwardingTableEntry entry = _forwarding.find(from_index);
+  assert(entry.from_index() == from_index, "Should be forwarded");
+
+  return ZAddress::good(entry.to_offset());
+}
+
+void ZPage::print_on(outputStream* out) const {
+  out->print_cr(" %-6s  " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s%s%s%s%s",
+                type_to_string(), start(), top(), end(),
+                is_allocating()  ? " Allocating"  : "",
+                is_relocatable() ? " Relocatable" : "",
+                is_forwarding()  ? " Forwarding"  : "",
+                is_pinned()      ? " Pinned"      : "",
+                is_detached()    ? " Detached"    : "",
+                !is_active()     ? " Inactive"    : "");
+}
+
+void ZPage::print() const {
+  print_on(tty);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPage.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGE_HPP
+#define SHARE_GC_Z_ZPAGE_HPP
+
+#include "gc/z/zForwardingTable.hpp"
+#include "gc/z/zList.hpp"
+#include "gc/z/zLiveMap.hpp"
+#include "gc/z/zPhysicalMemory.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "memory/allocation.hpp"
+
+class ZPage : public CHeapObj<mtGC> {
+  friend class VMStructs;
+  friend class ZList<ZPage>;
+
+private:
+  // Always hot
+  const uint8_t        _type;             // Page type
+  volatile uint8_t     _pinned;           // Pinned flag
+  uint8_t              _numa_id;          // NUMA node affinity
+  uint32_t             _seqnum;           // Allocation sequence number
+  const ZVirtualMemory _virtual;          // Virtual start/end address
+  volatile uintptr_t   _top;              // Virtual top address
+  ZLiveMap             _livemap;          // Live map
+
+  // Hot when relocated and cached
+  volatile uint32_t    _refcount;         // Page reference count
+  ZForwardingTable     _forwarding;       // Forwarding table
+  ZPhysicalMemory      _physical;         // Physical memory for page
+  ZListNode<ZPage>     _node;             // Page list node
+
+  const char* type_to_string() const;
+  uint32_t object_max_count() const;
+  uintptr_t relocate_object_inner(uintptr_t from_index, uintptr_t from_offset);
+
+  bool is_object_marked(uintptr_t addr) const;
+  bool is_object_strongly_marked(uintptr_t addr) const;
+
+public:
+  ZPage(uint8_t type, ZVirtualMemory vmem, ZPhysicalMemory pmem);
+  ~ZPage();
+
+  size_t object_alignment_shift() const;
+  size_t object_alignment() const;
+
+  uint8_t type() const;
+  uintptr_t start() const;
+  uintptr_t end() const;
+  size_t size() const;
+  uintptr_t top() const;
+  size_t remaining() const;
+
+  uint8_t numa_id();
+
+  ZPhysicalMemory& physical_memory();
+  const ZVirtualMemory& virtual_memory() const;
+
+  void reset();
+
+  bool inc_refcount();
+  bool dec_refcount();
+
+  bool is_in(uintptr_t addr) const;
+
+  uintptr_t block_start(uintptr_t addr) const;
+  size_t block_size(uintptr_t addr) const;
+  bool block_is_obj(uintptr_t addr) const;
+
+  bool is_active() const;
+  bool is_allocating() const;
+  bool is_relocatable() const;
+  bool is_detached() const;
+
+  bool is_mapped() const;
+  void set_pre_mapped();
+
+  bool is_pinned() const;
+  void set_pinned();
+
+  bool is_forwarding() const;
+  void set_forwarding();
+  void reset_forwarding();
+  void verify_forwarding() const;
+
+  bool is_marked() const;
+  bool is_object_live(uintptr_t addr) const;
+  bool is_object_strongly_live(uintptr_t addr) const;
+  bool mark_object(uintptr_t addr, bool finalizable, bool& inc_live);
+
+  void inc_live_atomic(uint32_t objects, size_t bytes);
+  size_t live_bytes() const;
+
+  void object_iterate(ObjectClosure* cl);
+
+  uintptr_t alloc_object(size_t size);
+  uintptr_t alloc_object_atomic(size_t size);
+
+  bool undo_alloc_object(uintptr_t addr, size_t size);
+  bool undo_alloc_object_atomic(uintptr_t addr, size_t size);
+
+  uintptr_t relocate_object(uintptr_t from);
+  uintptr_t forward_object(uintptr_t from);
+
+  void print_on(outputStream* out) const;
+  void print() const;
+};
+
+#endif // SHARE_GC_Z_ZPAGE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPage.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGE_INLINE_HPP
+#define SHARE_GC_Z_ZPAGE_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zForwardingTable.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLiveMap.inline.hpp"
+#include "gc/z/zMark.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zPage.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "gc/z/zVirtualMemory.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+inline const char* ZPage::type_to_string() const {
+  switch (type()) {
+  case ZPageTypeSmall:
+    return "Small";
+
+  case ZPageTypeMedium:
+    return "Medium";
+
+  default:
+    assert(type() == ZPageTypeLarge, "Invalid page type");
+    return "Large";
+  }
+}
+
+inline uint32_t ZPage::object_max_count() const {
+  switch (type()) {
+  case ZPageTypeLarge:
+    // A large page can only contain a single
+    // object aligned to the start of the page.
+    return 1;
+
+  default:
+    return (uint32_t)(size() >> object_alignment_shift());
+  }
+}
+
+inline size_t ZPage::object_alignment_shift() const {
+  switch (type()) {
+  case ZPageTypeSmall:
+    return ZObjectAlignmentSmallShift;
+
+  case ZPageTypeMedium:
+    return ZObjectAlignmentMediumShift;
+
+  default:
+    assert(type() == ZPageTypeLarge, "Invalid page type");
+    return ZObjectAlignmentLargeShift;
+  }
+}
+
+inline size_t ZPage::object_alignment() const {
+  switch (type()) {
+  case ZPageTypeSmall:
+    return ZObjectAlignmentSmall;
+
+  case ZPageTypeMedium:
+    return ZObjectAlignmentMedium;
+
+  default:
+    assert(type() == ZPageTypeLarge, "Invalid page type");
+    return ZObjectAlignmentLarge;
+  }
+}
+
+inline uint8_t ZPage::type() const {
+  return _type;
+}
+
+inline uintptr_t ZPage::start() const {
+  return _virtual.start();
+}
+
+inline uintptr_t ZPage::end() const {
+  return _virtual.end();
+}
+
+inline size_t ZPage::size() const {
+  return _virtual.size();
+}
+
+inline uintptr_t ZPage::top() const {
+  return _top;
+}
+
+inline size_t ZPage::remaining() const {
+  return end() - top();
+}
+
+inline ZPhysicalMemory& ZPage::physical_memory() {
+  return _physical;
+}
+
+inline const ZVirtualMemory& ZPage::virtual_memory() const {
+  return _virtual;
+}
+
+inline uint8_t ZPage::numa_id() {
+  if (_numa_id == (uint8_t)-1) {
+    _numa_id = (uint8_t)ZNUMA::memory_id(ZAddress::good(start()));
+  }
+
+  return _numa_id;
+}
+
+inline bool ZPage::inc_refcount() {
+  for (uint32_t prev_refcount = _refcount; prev_refcount > 0; prev_refcount = _refcount) {
+    if (Atomic::cmpxchg(prev_refcount + 1, &_refcount, prev_refcount) == prev_refcount) {
+      return true;
+    }
+  }
+  return false;
+}
+
+inline bool ZPage::dec_refcount() {
+  assert(is_active(), "Should be active");
+  return Atomic::sub(1u, &_refcount) == 0;
+}
+
+inline bool ZPage::is_in(uintptr_t addr) const {
+  const uintptr_t offset = ZAddress::offset(addr);
+  return offset >= start() && offset < top();
+}
+
+inline uintptr_t ZPage::block_start(uintptr_t addr) const {
+  if (block_is_obj(addr)) {
+    return addr;
+  } else {
+    return ZAddress::good(top());
+  }
+}
+
+inline size_t ZPage::block_size(uintptr_t addr) const {
+  if (block_is_obj(addr)) {
+    return ZUtils::object_size(addr);
+  } else {
+    return end() - top();
+  }
+}
+
+inline bool ZPage::block_is_obj(uintptr_t addr) const {
+  return ZAddress::offset(addr) < top();
+}
+
+inline bool ZPage::is_active() const {
+  return _refcount > 0;
+}
+
+inline bool ZPage::is_allocating() const {
+  return is_active() && _seqnum == ZGlobalSeqNum;
+}
+
+inline bool ZPage::is_relocatable() const {
+  return is_active() && _seqnum < ZGlobalSeqNum;
+}
+
+inline bool ZPage::is_detached() const {
+  return _physical.is_null();
+}
+
+inline bool ZPage::is_mapped() const {
+  return _seqnum > 0;
+}
+
+inline void ZPage::set_pre_mapped() {
+  // The _seqnum variable is also used to signal that the virtual and physical
+  // memory has been mapped. So, we need to set it to non-zero when the memory
+  // has been pre-mapped.
+  _seqnum = 1;
+}
+
+inline bool ZPage::is_pinned() const {
+  return _pinned;
+}
+
+inline void ZPage::set_pinned() {
+  _pinned = 1;
+}
+
+inline bool ZPage::is_forwarding() const {
+  return !_forwarding.is_null();
+}
+
+inline void ZPage::set_forwarding() {
+  assert(is_marked(), "Should be marked");
+  _forwarding.setup(_livemap.live_objects());
+}
+
+inline void ZPage::reset_forwarding() {
+  _forwarding.reset();
+  _pinned = 0;
+}
+
+inline void ZPage::verify_forwarding() const {
+  _forwarding.verify(object_max_count(), _livemap.live_objects());
+}
+
+inline bool ZPage::is_marked() const {
+  assert(is_relocatable(), "Invalid page state");
+  return _livemap.is_marked();
+}
+
+inline bool ZPage::is_object_marked(uintptr_t addr) const {
+  const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2;
+  return _livemap.get(index);
+}
+
+inline bool ZPage::is_object_strongly_marked(uintptr_t addr) const {
+  const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2;
+  return _livemap.get(index + 1);
+}
+
+inline bool ZPage::is_object_live(uintptr_t addr) const {
+  return is_allocating() || is_object_marked(addr);
+}
+
+inline bool ZPage::is_object_strongly_live(uintptr_t addr) const {
+  return is_allocating() || is_object_strongly_marked(addr);
+}
+
+inline bool ZPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) {
+  assert(ZAddress::is_marked(addr), "Invalid address");
+  assert(is_relocatable(), "Invalid page state");
+  assert(is_in(addr), "Invalid address");
+
+  // Set mark bit
+  const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2;
+  return _livemap.set_atomic(index, finalizable, inc_live);
+}
+
+inline void ZPage::inc_live_atomic(uint32_t objects, size_t bytes) {
+  _livemap.inc_live_atomic(objects, bytes);
+}
+
+inline size_t ZPage::live_bytes() const {
+  assert(is_marked(), "Should be marked");
+  return _livemap.live_bytes();
+}
+
+inline void ZPage::object_iterate(ObjectClosure* cl) {
+  _livemap.iterate(cl, ZAddress::good(start()), object_alignment_shift());
+}
+
+inline uintptr_t ZPage::alloc_object(size_t size) {
+  assert(is_allocating(), "Invalid state");
+
+  const size_t aligned_size = align_up(size, object_alignment());
+  const uintptr_t addr = top();
+  const uintptr_t new_top = addr + aligned_size;
+
+  if (new_top > end()) {
+    // Not enough space left
+    return 0;
+  }
+
+  _top = new_top;
+
+  // Fill alignment padding if needed
+  if (aligned_size != size) {
+    ZUtils::insert_filler_object(addr + size, aligned_size - size);
+  }
+
+  return ZAddress::good(addr);
+}
+
+inline uintptr_t ZPage::alloc_object_atomic(size_t size) {
+  assert(is_allocating(), "Invalid state");
+
+  const size_t aligned_size = align_up(size, object_alignment());
+  uintptr_t addr = top();
+
+  for (;;) {
+    const uintptr_t new_top = addr + aligned_size;
+    if (new_top > end()) {
+      // Not enough space left
+      return 0;
+    }
+
+    const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr);
+    if (prev_top == addr) {
+      // Fill alignment padding if needed
+      if (aligned_size != size) {
+        ZUtils::insert_filler_object(addr + size, aligned_size - size);
+      }
+
+      // Success
+      return ZAddress::good(addr);
+    }
+
+    // Retry
+    addr = prev_top;
+  }
+}
+
+inline bool ZPage::undo_alloc_object(uintptr_t addr, size_t size) {
+  assert(is_allocating(), "Invalid state");
+
+  const uintptr_t offset = ZAddress::offset(addr);
+  const size_t aligned_size = align_up(size, object_alignment());
+  const uintptr_t old_top = top();
+  const uintptr_t new_top = old_top - aligned_size;
+
+  if (new_top != offset) {
+    // Failed to undo allocation, not the last allocated object
+    return false;
+  }
+
+  _top = new_top;
+
+  // Success
+  return true;
+}
+
+inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) {
+  assert(is_allocating(), "Invalid state");
+
+  const uintptr_t offset = ZAddress::offset(addr);
+  const size_t aligned_size = align_up(size, object_alignment());
+  uintptr_t old_top = top();
+
+  for (;;) {
+    const uintptr_t new_top = old_top - aligned_size;
+    if (new_top != offset) {
+      // Failed to undo allocation, not the last allocated object
+      return false;
+    }
+
+    const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top);
+    if (prev_top == old_top) {
+      // Success
+      return true;
+    }
+
+    // Retry
+    old_top = prev_top;
+  }
+}
+
+#endif // SHARE_GC_Z_ZPAGE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zFuture.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPageAllocator.hpp"
+#include "gc/z/zPageCache.inline.hpp"
+#include "gc/z/zPreMappedMemory.inline.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTracer.inline.hpp"
+#include "runtime/init.hpp"
+
+static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
+static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
+
+class ZPageAllocRequest : public StackObj {
+  friend class ZList<ZPageAllocRequest>;
+
+private:
+  const uint8_t                _type;
+  const size_t                 _size;
+  const ZAllocationFlags       _flags;
+  const unsigned int           _total_collections;
+  ZListNode<ZPageAllocRequest> _node;
+  ZFuture<ZPage*>              _result;
+
+public:
+  ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
+      _type(type),
+      _size(size),
+      _flags(flags),
+      _total_collections(total_collections) {}
+
+  uint8_t type() const {
+    return _type;
+  }
+
+  size_t size() const {
+    return _size;
+  }
+
+  ZAllocationFlags flags() const {
+    return _flags;
+  }
+
+  unsigned int total_collections() const {
+    return _total_collections;
+  }
+
+  ZPage* wait() {
+    return _result.get();
+  }
+
+  void satisfy(ZPage* page) {
+    _result.set(page);
+  }
+};
+
+ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
+
+ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) :
+    _virtual(),
+    _physical(max_capacity, ZPageSizeMin),
+    _cache(),
+    _pre_mapped(_virtual, _physical, min_capacity),
+    _max_reserve(max_reserve),
+    _used_high(0),
+    _used_low(0),
+    _used(0),
+    _allocated(0),
+    _reclaimed(0),
+    _queue(),
+    _detached() {}
+
+bool ZPageAllocator::is_initialized() const {
+  return _physical.is_initialized() &&
+         _virtual.is_initialized() &&
+         _pre_mapped.is_initialized();
+}
+
+size_t ZPageAllocator::max_capacity() const {
+  return _physical.max_capacity();
+}
+
+size_t ZPageAllocator::capacity() const {
+  return _physical.capacity();
+}
+
+size_t ZPageAllocator::max_reserve() const {
+  return _max_reserve;
+}
+
+size_t ZPageAllocator::used_high() const {
+  return _used_high;
+}
+
+size_t ZPageAllocator::used_low() const {
+  return _used_low;
+}
+
+size_t ZPageAllocator::used() const {
+  return _used;
+}
+
+size_t ZPageAllocator::allocated() const {
+  return _allocated;
+}
+
+size_t ZPageAllocator::reclaimed() const {
+  return _reclaimed > 0 ? (size_t)_reclaimed : 0;
+}
+
+void ZPageAllocator::reset_statistics() {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  _allocated = 0;
+  _reclaimed = 0;
+  _used_high = _used_low = _used;
+}
+
+void ZPageAllocator::increase_used(size_t size, bool relocation) {
+  if (relocation) {
+    // Allocating a page for the purpose of relocation has a
+    // negative contribution to the number of relcaimed bytes.
+    _reclaimed -= size;
+  }
+  _allocated += size;
+  _used += size;
+  if (_used > _used_high) {
+    _used_high = _used;
+  }
+}
+
+void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
+  if (reclaimed) {
+    // Only pages explicitly released with the reclaimed flag set
+    // counts as reclaimed bytes. This flag is typically true when
+    // a worker releases a page after relocation, and is typically
+    // false when we release a page to undo an allocation.
+    _reclaimed += size;
+  }
+  _used -= size;
+  if (_used < _used_low) {
+    _used_low = _used;
+  }
+}
+
+size_t ZPageAllocator::available(ZAllocationFlags flags) const {
+  size_t available = max_capacity() - used();
+  assert(_physical.available() + _pre_mapped.available() + _cache.available()  == available, "Should be equal");
+
+  if (flags.no_reserve()) {
+    // The memory reserve should not be considered free
+    available -= MIN2(available, max_reserve());
+  }
+
+  return available;
+}
+
+ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
+  // Allocate physical memory
+  const ZPhysicalMemory pmem = _physical.alloc(size);
+  if (pmem.is_null()) {
+    // Out of memory
+    return NULL;
+  }
+
+  // Allocate virtual memory
+  const ZVirtualMemory vmem = _virtual.alloc(size);
+  if (vmem.is_null()) {
+    // Out of address space
+    _physical.free(pmem);
+    return NULL;
+  }
+
+  // Allocate page
+  return new ZPage(type, vmem, pmem);
+}
+
+void ZPageAllocator::flush_pre_mapped() {
+  if (_pre_mapped.available() == 0) {
+    return;
+  }
+
+  // Detach the memory mapping.
+  detach_memory(_pre_mapped.virtual_memory(), _pre_mapped.physical_memory());
+
+  _pre_mapped.clear();
+}
+
+void ZPageAllocator::map_page(ZPage* page) {
+  // Map physical memory
+  _physical.map(page->physical_memory(), page->start());
+}
+
+void ZPageAllocator::detach_page(ZPage* page) {
+  // Detach the memory mapping.
+  detach_memory(page->virtual_memory(), page->physical_memory());
+
+  // Add to list of detached pages
+  _detached.insert_last(page);
+}
+
+void ZPageAllocator::destroy_page(ZPage* page) {
+  assert(page->is_detached(), "Invalid page state");
+
+  // Free virtual memory
+  {
+    ZLocker locker(&_lock);
+    _virtual.free(page->virtual_memory());
+  }
+
+  delete page;
+}
+
+void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) {
+  ZLocker locker(&_lock);
+  list->transfer(&_detached);
+}
+
+void ZPageAllocator::flush_cache(size_t size) {
+  ZList<ZPage> list;
+
+  _cache.flush(&list, size);
+
+  for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
+    detach_page(page);
+  }
+}
+
+void ZPageAllocator::check_out_of_memory_during_initialization() {
+  if (!is_init_completed()) {
+    vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
+  }
+}
+
+ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) {
+  const size_t available_total = available(flags);
+  if (available_total < size) {
+    // Not enough free memory
+    return NULL;
+  }
+
+  // Try allocating from the page cache
+  ZPage* const cached_page = _cache.alloc_page(type, size);
+  if (cached_page != NULL) {
+    return cached_page;
+  }
+
+  // Try allocate from the pre-mapped memory
+  ZPage* const pre_mapped_page = _pre_mapped.alloc_page(type, size);
+  if (pre_mapped_page != NULL) {
+    return pre_mapped_page;
+  }
+
+  // Flush any remaining pre-mapped memory so that
+  // subsequent allocations can use the physical memory.
+  flush_pre_mapped();
+
+  // Check if physical memory is available
+  const size_t available_physical = _physical.available();
+  if (available_physical < size) {
+    // Flush cache to free up more physical memory
+    flush_cache(size - available_physical);
+  }
+
+  // Create new page and allocate physical memory
+  return create_page(type, size);
+}
+
+ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
+  ZPage* const page = alloc_page_common_inner(type, size, flags);
+  if (page == NULL) {
+    // Out of memory
+    return NULL;
+  }
+
+  // Update used statistics
+  increase_used(size, flags.relocation());
+
+  // Send trace event
+  ZTracer::tracer()->report_page_alloc(size, used(), available(flags), _cache.available(), flags);
+
+  return page;
+}
+
+ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
+  // Prepare to block
+  ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
+
+  _lock.lock();
+
+  // Try non-blocking allocation
+  ZPage* page = alloc_page_common(type, size, flags);
+  if (page == NULL) {
+    // Allocation failed, enqueue request
+    _queue.insert_last(&request);
+  }
+
+  _lock.unlock();
+
+  if (page == NULL) {
+    // Allocation failed
+    ZStatTimer timer(ZCriticalPhaseAllocationStall);
+
+    // We can only block if VM is fully initialized
+    check_out_of_memory_during_initialization();
+
+    do {
+      // Start asynchronous GC
+      ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
+
+      // Wait for allocation to complete or fail
+      page = request.wait();
+    } while (page == gc_marker);
+  }
+
+  return page;
+}
+
+ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
+  ZLocker locker(&_lock);
+  return alloc_page_common(type, size, flags);
+}
+
+ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
+  ZPage* const page = flags.non_blocking()
+                      ? alloc_page_nonblocking(type, size, flags)
+                      : alloc_page_blocking(type, size, flags);
+  if (page == NULL) {
+    // Out of memory
+    return NULL;
+  }
+
+  // Map page if needed
+  if (!page->is_mapped()) {
+    map_page(page);
+  }
+
+  // Reset page. This updates the page's sequence number and must
+  // be done after page allocation, which potentially blocked in
+  // a safepoint where the global sequence number was updated.
+  page->reset();
+
+  // Update allocation statistics. Exclude worker threads to avoid
+  // artificial inflation of the allocation rate due to relocation.
+  if (!flags.worker_thread()) {
+    // Note that there are two allocation rate counters, which have
+    // different purposes and are sampled at different frequencies.
+    const size_t bytes = page->size();
+    ZStatInc(ZCounterAllocationRate, bytes);
+    ZStatInc(ZStatAllocRate::counter(), bytes);
+  }
+
+  return page;
+}
+
+void ZPageAllocator::satisfy_alloc_queue() {
+  for (;;) {
+    ZPageAllocRequest* const request = _queue.first();
+    if (request == NULL) {
+      // Allocation queue is empty
+      return;
+    }
+
+    ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
+    if (page == NULL) {
+      // Allocation could not be satisfied, give up
+      return;
+    }
+
+    // Allocation succeeded, dequeue and satisfy request. Note that
+    // the dequeue operation must happen first, since the request
+    // will immediately be deallocated once it has been satisfied.
+    _queue.remove(request);
+    request->satisfy(page);
+  }
+}
+
+void ZPageAllocator::detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory& pmem) {
+  const uintptr_t addr = vmem.start();
+
+  // Unmap physical memory
+  _physical.unmap(pmem, addr);
+
+  // Free physical memory
+  _physical.free(pmem);
+
+  // Clear physical mapping
+  pmem.clear();
+}
+
+void ZPageAllocator::flip_page(ZPage* page) {
+  const ZPhysicalMemory& pmem = page->physical_memory();
+  const uintptr_t addr = page->start();
+
+  // Flip physical mapping
+  _physical.flip(pmem, addr);
+}
+
+void ZPageAllocator::flip_pre_mapped() {
+  if (_pre_mapped.available() == 0) {
+    // Nothing to flip
+    return;
+  }
+
+  const ZPhysicalMemory& pmem = _pre_mapped.physical_memory();
+  const ZVirtualMemory& vmem = _pre_mapped.virtual_memory();
+
+  // Flip physical mapping
+  _physical.flip(pmem, vmem.start());
+}
+
+void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
+  ZLocker locker(&_lock);
+
+  // Update used statistics
+  decrease_used(page->size(), reclaimed);
+
+  // Cache page
+  _cache.free_page(page);
+
+  // Try satisfy blocked allocations
+  satisfy_alloc_queue();
+}
+
+void ZPageAllocator::check_out_of_memory() {
+  ZLocker locker(&_lock);
+
+  ZPageAllocRequest* const first = _queue.first();
+  if (first == NULL) {
+    // Allocation queue is empty
+    return;
+  }
+
+  // Fail the allocation request if it was enqueued before the
+  // last GC cycle started, otherwise start a new GC cycle.
+  if (first->total_collections() < ZCollectedHeap::heap()->total_collections()) {
+    // Out of memory, fail all enqueued requests
+    for (ZPageAllocRequest* request = _queue.remove_first(); request != NULL; request = _queue.remove_first()) {
+      request->satisfy(NULL);
+    }
+  } else {
+    // Start another GC cycle, keep all enqueued requests
+    first->satisfy(gc_marker);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageAllocator.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGEALLOCATOR_HPP
+#define SHARE_GC_Z_ZPAGEALLOCATOR_HPP
+
+#include "gc/z/zAllocationFlags.hpp"
+#include "gc/z/zList.hpp"
+#include "gc/z/zLock.hpp"
+#include "gc/z/zPageCache.hpp"
+#include "gc/z/zPhysicalMemory.hpp"
+#include "gc/z/zPreMappedMemory.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "memory/allocation.hpp"
+
+class ZPageAllocRequest;
+
+class ZPageAllocator {
+  friend class VMStructs;
+
+private:
+  ZLock                    _lock;
+  ZVirtualMemoryManager    _virtual;
+  ZPhysicalMemoryManager   _physical;
+  ZPageCache               _cache;
+  ZPreMappedMemory         _pre_mapped;
+  const size_t             _max_reserve;
+  size_t                   _used_high;
+  size_t                   _used_low;
+  size_t                   _used;
+  size_t                   _allocated;
+  ssize_t                  _reclaimed;
+  ZList<ZPageAllocRequest> _queue;
+  ZList<ZPage>             _detached;
+
+  static ZPage* const      gc_marker;
+
+  void increase_used(size_t size, bool relocation);
+  void decrease_used(size_t size, bool reclaimed);
+
+  size_t available(ZAllocationFlags flags) const;
+
+  ZPage* create_page(uint8_t type, size_t size);
+  void map_page(ZPage* page);
+  void detach_page(ZPage* page);
+  void flush_pre_mapped();
+  void flush_cache(size_t size);
+
+  void check_out_of_memory_during_initialization();
+
+  ZPage* alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags);
+  ZPage* alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags);
+  ZPage* alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags);
+  ZPage* alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags);
+
+  void satisfy_alloc_queue();
+
+  void detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory& pmem);
+
+public:
+  ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve);
+
+  bool is_initialized() const;
+
+  size_t max_capacity() const;
+  size_t capacity() const;
+  size_t max_reserve() const;
+  size_t used_high() const;
+  size_t used_low() const;
+  size_t used() const;
+  size_t allocated() const;
+  size_t reclaimed() const;
+
+  void reset_statistics();
+
+  ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
+  void flip_page(ZPage* page);
+  void free_page(ZPage* page, bool reclaimed);
+  void destroy_page(ZPage* page);
+
+  void flush_detached_pages(ZList<ZPage>* list);
+
+  void flip_pre_mapped();
+
+  void check_out_of_memory();
+};
+
+#endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageCache.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zList.inline.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPageCache.hpp"
+#include "gc/z/zStat.hpp"
+#include "logging/log.hpp"
+
+static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
+static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
+static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
+static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
+
+ZPageCache::ZPageCache() :
+    _available(0),
+    _small(),
+    _medium(),
+    _large() {}
+
+ZPage* ZPageCache::alloc_small_page() {
+  const uint32_t numa_id = ZNUMA::id();
+  const uint32_t numa_count = ZNUMA::count();
+
+  // Try NUMA local page cache
+  ZPage* const l1_page = _small.get(numa_id).remove_first();
+  if (l1_page != NULL) {
+    ZStatInc(ZCounterPageCacheHitL1);
+    return l1_page;
+  }
+
+  // Try NUMA remote page cache(s)
+  uint32_t remote_numa_id = numa_id + 1;
+  const uint32_t remote_numa_count = numa_count - 1;
+  for (uint32_t i = 0; i < remote_numa_count; i++) {
+    if (remote_numa_id == numa_count) {
+      remote_numa_id = 0;
+    }
+
+    ZPage* const l2_page = _small.get(remote_numa_id).remove_first();
+    if (l2_page != NULL) {
+      ZStatInc(ZCounterPageCacheHitL2);
+      return l2_page;
+    }
+
+    remote_numa_id++;
+  }
+
+  ZStatInc(ZCounterPageCacheMiss);
+  return NULL;
+}
+
+ZPage* ZPageCache::alloc_medium_page() {
+  ZPage* const l1_page = _medium.remove_first();
+  if (l1_page != NULL) {
+    ZStatInc(ZCounterPageCacheHitL1);
+    return l1_page;
+  }
+
+  ZStatInc(ZCounterPageCacheMiss);
+  return NULL;
+}
+
+ZPage* ZPageCache::alloc_large_page(size_t size) {
+  // Find a page with the right size
+  ZListIterator<ZPage> iter(&_large);
+  for (ZPage* l1_page; iter.next(&l1_page);) {
+    if (l1_page->size() == size) {
+      // Page found
+      _large.remove(l1_page);
+      ZStatInc(ZCounterPageCacheHitL1);
+      return l1_page;
+    }
+  }
+
+  ZStatInc(ZCounterPageCacheMiss);
+  return NULL;
+}
+
+ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
+  ZPage* page;
+
+  if (type == ZPageTypeSmall) {
+    page = alloc_small_page();
+  } else if (type == ZPageTypeMedium) {
+    page = alloc_medium_page();
+  } else {
+    page = alloc_large_page(size);
+  }
+
+  if (page != NULL) {
+    _available -= page->size();
+  }
+
+  return page;
+}
+
+void ZPageCache::free_page(ZPage* page) {
+  assert(!page->is_active(), "Invalid page state");
+  assert(!page->is_pinned(), "Invalid page state");
+  assert(!page->is_detached(), "Invalid page state");
+
+  const uint8_t type = page->type();
+  if (type == ZPageTypeSmall) {
+    _small.get(page->numa_id()).insert_first(page);
+  } else if (type == ZPageTypeMedium) {
+    _medium.insert_first(page);
+  } else {
+    _large.insert_first(page);
+  }
+
+  _available += page->size();
+}
+
+void ZPageCache::flush_list(ZList<ZPage>* from, size_t requested, ZList<ZPage>* to, size_t* flushed) {
+  while (*flushed < requested) {
+    // Flush least recently used
+    ZPage* const page = from->remove_last();
+    if (page == NULL) {
+      break;
+    }
+
+    *flushed += page->size();
+    to->insert_last(page);
+  }
+}
+
+void ZPageCache::flush_per_numa_lists(ZPerNUMA<ZList<ZPage> >* from, size_t requested, ZList<ZPage>* to, size_t* flushed) {
+  const uint32_t numa_count = ZNUMA::count();
+  uint32_t numa_empty = 0;
+  uint32_t numa_next = 0;
+
+  // Flush lists round-robin
+  while (*flushed < requested) {
+    ZPage* const page = from->get(numa_next).remove_last();
+
+    if (++numa_next == numa_count) {
+      numa_next = 0;
+    }
+
+    if (page == NULL) {
+      // List is empty
+      if (++numa_empty == numa_count) {
+        // All lists are empty
+        break;
+      }
+
+      // Try next list
+      continue;
+    }
+
+    // Flush page
+    numa_empty = 0;
+    *flushed += page->size();
+    to->insert_last(page);
+  }
+}
+
+void ZPageCache::flush(ZList<ZPage>* to, size_t requested) {
+  size_t flushed = 0;
+
+  // Prefer flushing large, then medium and last small pages
+  flush_list(&_large, requested, to, &flushed);
+  flush_list(&_medium, requested, to, &flushed);
+  flush_per_numa_lists(&_small, requested, to, &flushed);
+
+  ZStatInc(ZCounterPageCacheFlush, flushed);
+
+  log_info(gc, heap)("Page Cache Flushed: "
+                     SIZE_FORMAT "M requested, "
+                     SIZE_FORMAT "M(" SIZE_FORMAT "M->" SIZE_FORMAT "M) flushed",
+                     requested / M, flushed / M , _available / M, (_available - flushed) / M);
+
+  _available -= flushed;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageCache.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGECACHE_HPP
+#define SHARE_GC_Z_ZPAGECACHE_HPP
+
+#include "gc/z/zList.hpp"
+#include "gc/z/zPage.hpp"
+#include "gc/z/zValue.hpp"
+#include "memory/allocation.hpp"
+
+class ZPageCache {
+private:
+  size_t                  _available;
+
+  ZPerNUMA<ZList<ZPage> > _small;
+  ZList<ZPage>            _medium;
+  ZList<ZPage>            _large;
+
+  ZPage* alloc_small_page();
+  ZPage* alloc_medium_page();
+  ZPage* alloc_large_page(size_t size);
+
+  void flush_list(ZList<ZPage>* from, size_t requested, ZList<ZPage>* to, size_t* flushed);
+  void flush_per_numa_lists(ZPerNUMA<ZList<ZPage> >* from, size_t requested, ZList<ZPage>* to, size_t* flushed);
+
+public:
+  ZPageCache();
+
+  size_t available() const;
+
+  ZPage* alloc_page(uint8_t type, size_t size);
+  void free_page(ZPage* page);
+
+  void flush(ZList<ZPage>* to, size_t requested);
+};
+
+#endif // SHARE_GC_Z_ZPAGECACHE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageCache.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
+#define SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
+
+#include "gc/z/zPageCache.hpp"
+
+inline size_t ZPageCache::available() const {
+  return _available;
+}
+
+#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageTable.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPageTable.inline.hpp"
+#include "runtime/orderAccess.hpp"
+#include "utilities/debug.hpp"
+
+ZPageTable::ZPageTable() :
+    _map() {}
+
+ZPageTableEntry ZPageTable::get_entry(ZPage* page) const {
+  const uintptr_t addr = ZAddress::good(page->start());
+  return _map.get(addr);
+}
+
+void ZPageTable::put_entry(ZPage* page, ZPageTableEntry entry) {
+  // Make sure a newly created page is globally visible before
+  // updating the pagetable.
+  OrderAccess::storestore();
+
+  const uintptr_t start = ZAddress::good(page->start());
+  const uintptr_t end = start + page->size();
+  for (uintptr_t addr = start; addr < end; addr += ZPageSizeMin) {
+    _map.put(addr, entry);
+  }
+}
+
+void ZPageTable::insert(ZPage* page) {
+  assert(get_entry(page).page() == NULL ||
+         get_entry(page).page() == page, "Invalid entry");
+
+  // Cached pages stays in the pagetable and we must not re-insert
+  // those when they get re-allocated because they might also be
+  // relocating and we don't want to clear their relocating bit.
+  if (get_entry(page).page() == NULL) {
+    ZPageTableEntry entry(page, false /* relocating */);
+    put_entry(page, entry);
+  }
+
+  assert(get_entry(page).page() == page, "Invalid entry");
+}
+
+void ZPageTable::remove(ZPage* page) {
+  assert(get_entry(page).page() == page, "Invalid entry");
+
+  ZPageTableEntry entry;
+  put_entry(page, entry);
+
+  assert(get_entry(page).page() == NULL, "Invalid entry");
+}
+
+void ZPageTable::set_relocating(ZPage* page) {
+  assert(get_entry(page).page() == page, "Invalid entry");
+  assert(!get_entry(page).relocating(), "Invalid entry");
+
+  ZPageTableEntry entry(page, true /* relocating */);
+  put_entry(page, entry);
+
+  assert(get_entry(page).page() == page, "Invalid entry");
+  assert(get_entry(page).relocating(), "Invalid entry");
+}
+
+void ZPageTable::clear_relocating(ZPage* page) {
+  assert(get_entry(page).page() == page, "Invalid entry");
+  assert(get_entry(page).relocating(), "Invalid entry");
+
+  ZPageTableEntry entry(page, false /* relocating */);
+  put_entry(page, entry);
+
+  assert(get_entry(page).page() == page, "Invalid entry");
+  assert(!get_entry(page).relocating(), "Invalid entry");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageTable.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGETABLE_HPP
+#define SHARE_GC_Z_ZPAGETABLE_HPP
+
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zPageTableEntry.hpp"
+#include "memory/allocation.hpp"
+
+class ZPage;
+
+class ZPageTable {
+  friend class VMStructs;
+  friend class ZPageTableIterator;
+
+private:
+  ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> _map;
+
+  ZPageTableEntry get_entry(ZPage* page) const;
+  void put_entry(ZPage* page, ZPageTableEntry entry);
+
+public:
+  ZPageTable();
+
+  ZPage* get(uintptr_t addr) const;
+  void insert(ZPage* page);
+  void remove(ZPage* page);
+
+  bool is_relocating(uintptr_t addr) const;
+  void set_relocating(ZPage* page);
+  void clear_relocating(ZPage* page);
+};
+
+class ZPageTableIterator : public StackObj {
+private:
+  ZAddressRangeMapIterator<ZPageTableEntry, ZPageSizeMinShift> _iter;
+  ZPage*                                                       _prev;
+
+public:
+  ZPageTableIterator(const ZPageTable* pagetable);
+
+  bool next(ZPage** page);
+};
+
+#endif // SHARE_GC_Z_ZPAGETABLE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageTable.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGETABLE_INLINE_HPP
+#define SHARE_GC_Z_ZPAGETABLE_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zAddressRangeMap.inline.hpp"
+#include "gc/z/zPageTable.hpp"
+
+inline ZPage* ZPageTable::get(uintptr_t addr) const {
+  return _map.get(addr).page();
+}
+
+inline bool ZPageTable::is_relocating(uintptr_t addr) const {
+  return _map.get(addr).relocating();
+}
+
+inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* pagetable) :
+    _iter(&pagetable->_map),
+    _prev(NULL) {}
+
+inline bool ZPageTableIterator::next(ZPage** page) {
+  ZPageTableEntry entry;
+
+  while (_iter.next(&entry)) {
+    ZPage* const next = entry.page();
+    if (next != NULL && next != _prev) {
+      // Next page found
+      *page = _prev = next;
+      return true;
+    }
+  }
+
+  // No more pages
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZPAGETABLE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPageTableEntry.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPAGETABLEENTRY_HPP
+#define SHARE_GC_Z_ZPAGETABLEENTRY_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+
+//
+// Page table entry layout
+// -----------------------
+//
+//   6
+//   3                                                                    1 0
+//  +----------------------------------------------------------------------+-+
+//  |11111111 11111111 11111111 11111111 11111111 11111111 11111111 1111111|1|
+//  +----------------------------------------------------------------------+-+
+//  |                                                                      |
+//  |                                          0-0 Relocating Flag (1-bit) *
+//  |
+//  |
+//  |
+//  * 63-1 Page address (63-bits)
+//
+
+class ZPage;
+
+class ZPageTableEntry {
+private:
+  typedef ZBitField<uint64_t, bool,   0, 1>     field_relocating;
+  typedef ZBitField<uint64_t, ZPage*, 1, 63, 1> field_page;
+
+  uint64_t _entry;
+
+public:
+  ZPageTableEntry() :
+      _entry(0) {}
+
+  ZPageTableEntry(ZPage* page, bool relocating) :
+      _entry(field_page::encode(page) |
+             field_relocating::encode(relocating)) {}
+
+  bool relocating() const {
+    return field_relocating::decode(_entry);
+  }
+
+  ZPage* page() const {
+    return field_page::decode(_entry);
+  }
+};
+
+#endif // SHARE_GC_Z_ZPAGETABLEENTRY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/debug.hpp"
+
+ZPhysicalMemory::ZPhysicalMemory() :
+    _nsegments(0),
+    _segments(NULL) {}
+
+ZPhysicalMemory::ZPhysicalMemory(size_t size) :
+    _nsegments(0),
+    _segments(NULL) {
+  add_segment(ZPhysicalMemorySegment(0, size));
+}
+
+ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
+    _nsegments(0),
+    _segments(NULL) {
+  add_segment(segment);
+}
+
+size_t ZPhysicalMemory::size() const {
+  size_t size = 0;
+
+  for (size_t i = 0; i < _nsegments; i++) {
+    size += _segments[i].size();
+  }
+
+  return size;
+}
+
+void ZPhysicalMemory::add_segment(ZPhysicalMemorySegment segment) {
+  // Try merge with last segment
+  if (_nsegments > 0) {
+    ZPhysicalMemorySegment& last = _segments[_nsegments - 1];
+    assert(last.end() <= segment.start(), "Segments added out of order");
+    if (last.end() == segment.start()) {
+      // Merge
+      last.expand(segment.size());
+      return;
+    }
+  }
+
+  // Make room for a new segment
+  const size_t size = sizeof(ZPhysicalMemorySegment) * (_nsegments + 1);
+  _segments = (ZPhysicalMemorySegment*)ReallocateHeap((char*)_segments, size, mtGC);
+
+  // Add new segment
+  _segments[_nsegments] = segment;
+  _nsegments++;
+}
+
+ZPhysicalMemory ZPhysicalMemory::split(size_t split_size) {
+  // Only splitting of single-segment instances have been implemented.
+  assert(nsegments() == 1, "Can only have one segment");
+  assert(split_size <= size(), "Invalid size");
+  return ZPhysicalMemory(_segments[0].split(split_size));
+}
+
+void ZPhysicalMemory::clear() {
+  if (_segments != NULL) {
+    FreeHeap(_segments);
+    _segments = NULL;
+    _nsegments = 0;
+  }
+}
+
+ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity, size_t granule_size) :
+    _backing(max_capacity, granule_size),
+    _max_capacity(max_capacity),
+    _capacity(0),
+    _used(0) {}
+
+bool ZPhysicalMemoryManager::is_initialized() const {
+  return _backing.is_initialized();
+}
+
+bool ZPhysicalMemoryManager::ensure_available(size_t size) {
+  const size_t unused_capacity = _capacity - _used;
+  if (unused_capacity >= size) {
+    // Enough unused capacity available
+    return true;
+  }
+
+  const size_t expand_with = size - unused_capacity;
+  const size_t new_capacity = _capacity + expand_with;
+  if (new_capacity > _max_capacity) {
+    // Can not expand beyond max capacity
+    return false;
+  }
+
+  // Expand
+  if (!_backing.expand(_capacity, new_capacity)) {
+    log_error(gc)("Failed to expand Java heap with " SIZE_FORMAT "%s",
+                  byte_size_in_proper_unit(expand_with),
+                  proper_unit_for_byte_size(expand_with));
+    return false;
+  }
+
+  _capacity = new_capacity;
+
+  return true;
+}
+
+void ZPhysicalMemoryManager::nmt_commit(ZPhysicalMemory pmem, uintptr_t offset) {
+  const uintptr_t addr = _backing.nmt_address(offset);
+  const size_t size = pmem.size();
+  MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
+}
+
+void ZPhysicalMemoryManager::nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset) {
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    const uintptr_t addr = _backing.nmt_address(offset);
+    const size_t size = pmem.size();
+
+    Tracker tracker(Tracker::uncommit);
+    tracker.record((address)addr, size);
+  }
+}
+
+ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
+  if (!ensure_available(size)) {
+    // Not enough memory available
+    return ZPhysicalMemory();
+  }
+
+  _used += size;
+  return _backing.alloc(size);
+}
+
+void ZPhysicalMemoryManager::free(ZPhysicalMemory pmem) {
+  _backing.free(pmem);
+  _used -= pmem.size();
+}
+
+void ZPhysicalMemoryManager::map(ZPhysicalMemory pmem, uintptr_t offset) {
+  // Map page
+  _backing.map(pmem, offset);
+
+  // Update native memory tracker
+  nmt_commit(pmem, offset);
+}
+
+void ZPhysicalMemoryManager::unmap(ZPhysicalMemory pmem, uintptr_t offset) {
+  // Update native memory tracker
+  nmt_uncommit(pmem, offset);
+
+  // Unmap page
+  _backing.unmap(pmem, offset);
+}
+
+void ZPhysicalMemoryManager::flip(ZPhysicalMemory pmem, uintptr_t offset) {
+  _backing.flip(pmem, offset);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPHYSICALMEMORY_HPP
+#define SHARE_GC_Z_ZPHYSICALMEMORY_HPP
+
+#include "memory/allocation.hpp"
+#include OS_CPU_HEADER(gc/z/zPhysicalMemoryBacking)
+
+class ZPhysicalMemorySegment {
+private:
+  uintptr_t _start;
+  uintptr_t _end;
+
+public:
+  ZPhysicalMemorySegment(uintptr_t start, size_t size);
+
+  uintptr_t start() const;
+  uintptr_t end() const;
+  size_t size() const;
+
+  void expand(size_t size);
+  ZPhysicalMemorySegment split(size_t size);
+};
+
+class ZPhysicalMemory {
+private:
+  size_t                  _nsegments;
+  ZPhysicalMemorySegment* _segments;
+
+public:
+  ZPhysicalMemory();
+  ZPhysicalMemory(size_t size);
+  ZPhysicalMemory(const ZPhysicalMemorySegment& segment);
+
+  bool is_null() const;
+  size_t size() const;
+
+  size_t nsegments() const;
+  ZPhysicalMemorySegment segment(size_t index) const;
+  void add_segment(ZPhysicalMemorySegment segment);
+
+  ZPhysicalMemory split(size_t size);
+  void clear();
+};
+
+class ZPhysicalMemoryManager {
+  friend class VMStructs;
+
+private:
+  ZPhysicalMemoryBacking _backing;
+  const size_t           _max_capacity;
+  size_t                 _capacity;
+  size_t                 _used;
+
+  bool ensure_available(size_t size);
+
+  void nmt_commit(ZPhysicalMemory pmem, uintptr_t offset);
+  void nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset);
+
+public:
+  ZPhysicalMemoryManager(size_t max_capacity, size_t granule_size);
+
+  bool is_initialized() const;
+
+  size_t max_capacity() const;
+  size_t capacity() const;
+  size_t used() const;
+  size_t available() const;
+
+  ZPhysicalMemory alloc(size_t size);
+  void free(ZPhysicalMemory pmem);
+
+  void map(ZPhysicalMemory pmem, uintptr_t offset);
+  void unmap(ZPhysicalMemory pmem, uintptr_t offset);
+  void flip(ZPhysicalMemory pmem, uintptr_t offset);
+};
+
+#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
+#define SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
+
+#include "gc/z/zPhysicalMemory.hpp"
+#include "utilities/debug.hpp"
+
+inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size) :
+    _start(start),
+    _end(start + size) {}
+
+inline uintptr_t ZPhysicalMemorySegment::start() const {
+  return _start;
+}
+
+inline uintptr_t ZPhysicalMemorySegment::end() const {
+  return _end;
+}
+
+inline size_t ZPhysicalMemorySegment::size() const {
+  return end() - start();
+}
+
+inline void ZPhysicalMemorySegment::expand(size_t size) {
+  _end += size;
+}
+
+inline ZPhysicalMemorySegment ZPhysicalMemorySegment::split(size_t split_size) {
+  assert(split_size <= size(), "Invalid size");
+  ZPhysicalMemorySegment segment(_start, split_size);
+  _start += split_size;
+  return segment;
+}
+
+inline bool ZPhysicalMemory::is_null() const {
+  return _nsegments == 0;
+}
+
+inline size_t ZPhysicalMemory::nsegments() const {
+  return _nsegments;
+}
+
+inline ZPhysicalMemorySegment ZPhysicalMemory::segment(size_t index) const {
+  assert(index < _nsegments, "Invalid segment index");
+  return _segments[index];
+}
+
+inline size_t ZPhysicalMemoryManager::max_capacity() const {
+  return _max_capacity;
+}
+
+inline size_t ZPhysicalMemoryManager::capacity() const {
+  return _capacity;
+}
+
+inline size_t ZPhysicalMemoryManager::used() const {
+  return _used;
+}
+
+inline size_t ZPhysicalMemoryManager::available() const {
+  return _max_capacity - _used;
+}
+
+#endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPreMappedMemory.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPreMappedMemory.inline.hpp"
+#include "gc/z/zVirtualMemory.inline.hpp"
+#include "logging/log.hpp"
+
+ZPreMappedMemory::ZPreMappedMemory(ZVirtualMemoryManager &vmm, ZPhysicalMemoryManager &pmm, size_t size) :
+    _vmem(),
+    _pmem(),
+    _initialized(false) {
+  if (!vmm.is_initialized() || !pmm.is_initialized()) {
+    // Not initialized
+    return;
+  }
+
+  // Pre-mapping and pre-touching memory can take a long time. Log a message
+  // to help the user understand why the JVM might seem slow to start.
+  log_info(gc, init)("Pre-touching: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
+  log_info(gc, init)("Pre-mapping: " SIZE_FORMAT "M", size / M);
+
+  _pmem = pmm.alloc(size);
+  if (_pmem.is_null()) {
+    // Out of memory
+    return;
+  }
+
+  _vmem = vmm.alloc(size, true /* alloc_from_front */);
+  if (_vmem.is_null()) {
+    // Out of address space
+    pmm.free(_pmem);
+    return;
+  }
+
+  // Map physical memory
+  pmm.map(_pmem, _vmem.start());
+
+  _initialized = true;
+}
+
+ZPage* ZPreMappedMemory::alloc_page(uint8_t type, size_t size) {
+  if (size > available()) {
+    // Not enough pre-mapped memory
+    return NULL;
+  }
+
+  // Take a chunk of the pre-mapped memory
+  const ZPhysicalMemory pmem = _pmem.split(size);
+  const ZVirtualMemory  vmem = _vmem.split(size);
+
+  ZPage* const page = new ZPage(type, vmem, pmem);
+  page->set_pre_mapped();
+
+  return page;
+}
+
+void ZPreMappedMemory::clear() {
+  assert(_pmem.is_null(), "Should be detached");
+  _vmem.clear();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPreMappedMemory.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPREMAPPEDMEMORY_HPP
+#define SHARE_GC_Z_ZPREMAPPEDMEMORY_HPP
+
+#include "gc/z/zPhysicalMemory.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "memory/allocation.hpp"
+
+class ZPage;
+
+class ZPreMappedMemory {
+private:
+  ZVirtualMemory  _vmem;
+  ZPhysicalMemory _pmem;
+  bool            _initialized;
+
+public:
+  ZPreMappedMemory(ZVirtualMemoryManager &vmm, ZPhysicalMemoryManager &pmm, size_t size);
+
+  bool is_initialized() const;
+
+  ZPhysicalMemory& physical_memory();
+  const ZVirtualMemory& virtual_memory() const;
+
+  size_t available() const;
+
+  ZPage* alloc_page(uint8_t type, size_t size);
+
+  void clear();
+};
+
+#endif // SHARE_GC_Z_ZPREMAPPEDMEMORY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zPreMappedMemory.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZPREMAPPEDMEMORY_INLINE_HPP
+#define SHARE_GC_Z_ZPREMAPPEDMEMORY_INLINE_HPP
+
+#include "gc/z/zPreMappedMemory.hpp"
+
+inline bool ZPreMappedMemory::is_initialized() const {
+  return _initialized;
+}
+
+inline ZPhysicalMemory& ZPreMappedMemory::physical_memory() {
+  return _pmem;
+}
+
+inline const ZVirtualMemory& ZPreMappedMemory::virtual_memory() const {
+  return _vmem;
+}
+
+inline size_t ZPreMappedMemory::available() const {
+  return _vmem.size();
+}
+
+#endif // SHARE_GC_Z_ZPREMAPPEDMEMORY_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zReferenceProcessor.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.inline.hpp"
+#include "gc/shared/referencePolicy.hpp"
+#include "gc/shared/referenceProcessorStats.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zReferenceProcessor.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zTracer.inline.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "memory/universe.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+
+static const ZStatSubPhase ZSubPhaseConcurrentReferencesProcess("Concurrent References Process");
+static const ZStatSubPhase ZSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue");
+
+ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers) :
+    _workers(workers),
+    _soft_reference_policy(NULL),
+    _encountered_count(),
+    _discovered_count(),
+    _enqueued_count(),
+    _discovered_list(NULL),
+    _pending_list(NULL),
+    _pending_list_tail(_pending_list.addr()) {}
+
+void ZReferenceProcessor::set_soft_reference_policy(bool clear) {
+  static AlwaysClearPolicy always_clear_policy;
+  static LRUMaxHeapPolicy lru_max_heap_policy;
+
+  if (clear) {
+    log_info(gc, ref)("Clearing All Soft References");
+    _soft_reference_policy = &always_clear_policy;
+  } else {
+    _soft_reference_policy = &lru_max_heap_policy;
+  }
+
+  _soft_reference_policy->setup();
+}
+
+void ZReferenceProcessor::update_soft_reference_clock() const {
+  const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  java_lang_ref_SoftReference::set_clock(now);
+}
+
+bool ZReferenceProcessor::is_reference_inactive(oop obj) const {
+  // A non-null next field means the reference is inactive
+  return java_lang_ref_Reference::next(obj) != NULL;
+}
+
+ReferenceType ZReferenceProcessor::reference_type(oop obj) const {
+  return InstanceKlass::cast(obj->klass())->reference_type();
+}
+
+const char* ZReferenceProcessor::reference_type_name(ReferenceType type) const {
+  switch (type) {
+  case REF_SOFT:
+    return "Soft";
+
+  case REF_WEAK:
+    return "Weak";
+
+  case REF_FINAL:
+    return "Final";
+
+  case REF_PHANTOM:
+    return "Phantom";
+
+  default:
+    ShouldNotReachHere();
+    return NULL;
+  }
+}
+
+volatile oop* ZReferenceProcessor::reference_referent_addr(oop obj) const {
+  return (volatile oop*)java_lang_ref_Reference::referent_addr_raw(obj);
+}
+
+oop ZReferenceProcessor::reference_referent(oop obj) const {
+  return *reference_referent_addr(obj);
+}
+
+bool ZReferenceProcessor::is_referent_alive_or_null(oop obj, ReferenceType type) const {
+  volatile oop* const p = reference_referent_addr(obj);
+
+  // Check if the referent is alive or null, in which case we don't want to discover
+  // the reference. It can only be null if the application called Reference.enqueue()
+  // or Reference.clear().
+  if (type == REF_PHANTOM) {
+    const oop o = ZBarrier::weak_load_barrier_on_phantom_oop_field(p);
+    return o == NULL || ZHeap::heap()->is_object_live(ZOop::to_address(o));
+  } else {
+    const oop o = ZBarrier::weak_load_barrier_on_weak_oop_field(p);
+    return o == NULL || ZHeap::heap()->is_object_strongly_live(ZOop::to_address(o));
+  }
+}
+
+bool ZReferenceProcessor::is_referent_softly_alive(oop obj, ReferenceType type) const {
+  if (type != REF_SOFT) {
+    // Not a soft reference
+    return false;
+  }
+
+  // Ask soft reference policy
+  const jlong clock = java_lang_ref_SoftReference::clock();
+  assert(clock != 0, "Clock not initialized");
+  assert(_soft_reference_policy != NULL, "Policy not initialized");
+  return !_soft_reference_policy->should_clear_reference(obj, clock);
+}
+
+bool ZReferenceProcessor::should_drop_reference(oop obj, ReferenceType type) const {
+  // This check is racing with a call to Reference.clear() from the application.
+  // If the application clears the reference after this check it will still end
+  // up on the pending list, and there's nothing we can do about that without
+  // changing the Reference.clear() API. This check is also racing with a call
+  // to Reference.enqueue() from the application, which is unproblematic, since
+  // the application wants the reference to be enqueued anyway.
+  const oop o = reference_referent(obj);
+  if (o == NULL) {
+    // Reference has been cleared, by a call to Reference.enqueue()
+    // or Reference.clear() from the application, which means we
+    // should drop the reference.
+    return true;
+  }
+
+  // Check if the referent is still alive, in which case we should
+  // drop the reference.
+  if (type == REF_PHANTOM) {
+    return ZBarrier::is_alive_barrier_on_phantom_oop(o);
+  } else {
+    return ZBarrier::is_alive_barrier_on_weak_oop(o);
+  }
+}
+
+bool ZReferenceProcessor::should_mark_referent(ReferenceType type) const {
+  // Referents of final references (and its reachable sub graph) are
+  // always marked finalizable during discovery. This avoids the problem
+  // of later having to mark those objects if the referent is still final
+  // reachable during processing.
+  return type == REF_FINAL;
+}
+
+bool ZReferenceProcessor::should_clear_referent(ReferenceType type) const {
+  // Referents that were not marked must be cleared
+  return !should_mark_referent(type);
+}
+
+void ZReferenceProcessor::keep_referent_alive(oop obj, ReferenceType type) const {
+  volatile oop* const p = reference_referent_addr(obj);
+  if (type == REF_PHANTOM) {
+    ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
+  } else {
+    ZBarrier::keep_alive_barrier_on_weak_oop_field(p);
+  }
+}
+
+bool ZReferenceProcessor::discover_reference(oop obj, ReferenceType type) {
+  if (!RegisterReferences) {
+    // Reference processing disabled
+    return false;
+  }
+
+  log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
+
+  // Update statistics
+  _encountered_count.get()[type]++;
+
+  if (is_reference_inactive(obj) ||
+      is_referent_alive_or_null(obj, type) ||
+      is_referent_softly_alive(obj, type)) {
+    // Not discovered
+    return false;
+  }
+
+  discover(obj, type);
+
+  // Discovered
+  return true;
+}
+
+void ZReferenceProcessor::discover(oop obj, ReferenceType type) {
+  log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
+
+  // Update statistics
+  _discovered_count.get()[type]++;
+
+  // Mark referent finalizable
+  if (should_mark_referent(type)) {
+    oop* const referent_addr = (oop*)java_lang_ref_Reference::referent_addr_raw(obj);
+    ZBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */);
+  }
+
+  // Add reference to discovered list
+  assert(java_lang_ref_Reference::discovered(obj) == NULL, "Already discovered");
+  oop* const list = _discovered_list.addr();
+  java_lang_ref_Reference::set_discovered(obj, *list);
+  *list = obj;
+}
+
+oop ZReferenceProcessor::drop(oop obj, ReferenceType type) {
+  log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
+
+  // Keep referent alive
+  keep_referent_alive(obj, type);
+
+  // Unlink and return next in list
+  const oop next = java_lang_ref_Reference::discovered(obj);
+  java_lang_ref_Reference::set_discovered(obj, NULL);
+  return next;
+}
+
+oop* ZReferenceProcessor::keep(oop obj, ReferenceType type) {
+  log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
+
+  // Update statistics
+  _enqueued_count.get()[type]++;
+
+  // Clear referent
+  if (should_clear_referent(type)) {
+    java_lang_ref_Reference::set_referent(obj, NULL);
+  }
+
+  // Make reference inactive by self-looping the next field. We could be racing with a
+  // call to Reference.enqueue() from the application, which is why we are using a CAS
+  // to make sure we change the next field only if it is NULL. A failing CAS means the
+  // reference has already been enqueued. However, we don't check the result of the CAS,
+  // since we still have no option other than keeping the reference on the pending list.
+  // It's ok to have the reference both on the pending list and enqueued at the same
+  // time (the pending list is linked through the discovered field, while the reference
+  // queue is linked through the next field). When the ReferenceHandler thread later
+  // calls Reference.enqueue() we detect that it has already been enqueued and drop it.
+  oop* const next_addr = (oop*)java_lang_ref_Reference::next_addr_raw(obj);
+  Atomic::cmpxchg(obj, next_addr, oop(NULL));
+
+  // Return next in list
+  return (oop*)java_lang_ref_Reference::discovered_addr_raw(obj);
+}
+
+void ZReferenceProcessor::work() {
+  // Process discovered references
+  oop* const list = _discovered_list.addr();
+  oop* p = list;
+
+  while (*p != NULL) {
+    const oop obj = *p;
+    const ReferenceType type = reference_type(obj);
+
+    if (should_drop_reference(obj, type)) {
+      *p = drop(obj, type);
+    } else {
+      p = keep(obj, type);
+    }
+  }
+
+  // Prepend discovered references to internal pending list
+  if (*list != NULL) {
+    *p = Atomic::xchg(*list, _pending_list.addr());
+    if (*p == NULL) {
+      // First to prepend to list, record tail
+      _pending_list_tail = p;
+    }
+
+    // Clear discovered list
+    *list = NULL;
+  }
+}
+
+bool ZReferenceProcessor::is_empty() const {
+  ZPerWorkerConstIterator<oop> iter(&_discovered_list);
+  for (const oop* list; iter.next(&list);) {
+    if (*list != NULL) {
+      return false;
+    }
+  }
+
+  if (_pending_list.get() != NULL) {
+    return false;
+  }
+
+  return true;
+}
+
+void ZReferenceProcessor::reset_statistics() {
+  assert(is_empty(), "Should be empty");
+
+  // Reset encountered
+  ZPerWorkerIterator<Counters> iter_encountered(&_encountered_count);
+  for (Counters* counters; iter_encountered.next(&counters);) {
+    for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
+      (*counters)[i] = 0;
+    }
+  }
+
+  // Reset discovered
+  ZPerWorkerIterator<Counters> iter_discovered(&_discovered_count);
+  for (Counters* counters; iter_discovered.next(&counters);) {
+    for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
+      (*counters)[i] = 0;
+    }
+  }
+
+  // Reset enqueued
+  ZPerWorkerIterator<Counters> iter_enqueued(&_enqueued_count);
+  for (Counters* counters; iter_enqueued.next(&counters);) {
+    for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
+      (*counters)[i] = 0;
+    }
+  }
+}
+
+void ZReferenceProcessor::collect_statistics() {
+  Counters encountered = {};
+  Counters discovered = {};
+  Counters enqueued = {};
+
+  // Sum encountered
+  ZPerWorkerConstIterator<Counters> iter_encountered(&_encountered_count);
+  for (const Counters* counters; iter_encountered.next(&counters);) {
+    for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
+      encountered[i] += (*counters)[i];
+    }
+  }
+
+  // Sum discovered
+  ZPerWorkerConstIterator<Counters> iter_discovered(&_discovered_count);
+  for (const Counters* counters; iter_discovered.next(&counters);) {
+    for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
+      discovered[i] += (*counters)[i];
+    }
+  }
+
+  // Sum enqueued
+  ZPerWorkerConstIterator<Counters> iter_enqueued(&_enqueued_count);
+  for (const Counters* counters; iter_enqueued.next(&counters);) {
+    for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
+      enqueued[i] += (*counters)[i];
+    }
+  }
+
+  // Update statistics
+  ZStatReferences::set_soft(encountered[REF_SOFT], discovered[REF_SOFT], enqueued[REF_SOFT]);
+  ZStatReferences::set_weak(encountered[REF_WEAK], discovered[REF_WEAK], enqueued[REF_WEAK]);
+  ZStatReferences::set_final(encountered[REF_FINAL], discovered[REF_FINAL], enqueued[REF_FINAL]);
+  ZStatReferences::set_phantom(encountered[REF_PHANTOM], discovered[REF_PHANTOM], enqueued[REF_PHANTOM]);
+
+  // Trace statistics
+  const ReferenceProcessorStats stats(discovered[REF_SOFT],
+                                      discovered[REF_WEAK],
+                                      discovered[REF_FINAL],
+                                      discovered[REF_PHANTOM]);
+  ZTracer::tracer()->report_gc_reference_stats(stats);
+}
+
+class ZReferenceProcessorTask : public ZTask {
+private:
+  ZReferenceProcessor* const _reference_processor;
+
+public:
+  ZReferenceProcessorTask(ZReferenceProcessor* reference_processor) :
+      ZTask("ZReferenceProcessorTask"),
+      _reference_processor(reference_processor) {}
+
+  virtual void work() {
+    _reference_processor->work();
+  }
+};
+
+void ZReferenceProcessor::process_references() {
+  ZStatTimer timer(ZSubPhaseConcurrentReferencesProcess);
+
+  // Process discovered lists
+  ZReferenceProcessorTask task(this);
+  _workers->run_concurrent(&task);
+
+  // Update soft reference clock
+  update_soft_reference_clock();
+
+  // Collect, log and trace statistics
+  collect_statistics();
+}
+
+void ZReferenceProcessor::enqueue_references() {
+  ZStatTimer timer(ZSubPhaseConcurrentReferencesEnqueue);
+
+  if (_pending_list.get() == NULL) {
+    // Nothing to enqueue
+    return;
+  }
+
+  {
+    // Heap_lock protects external pending list
+    MonitorLockerEx ml(Heap_lock);
+
+    // Prepend internal pending list to external pending list
+    *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get());
+
+    // Notify ReferenceHandler thread
+    ml.notify_all();
+  }
+
+  // Reset internal pending list
+  _pending_list.set(NULL);
+  _pending_list_tail = _pending_list.addr();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zReferenceProcessor.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZREFERENCEPROCESSOR_HPP
+#define SHARE_GC_Z_ZREFERENCEPROCESSOR_HPP
+
+#include "gc/shared/referenceDiscoverer.hpp"
+#include "gc/z/zValue.hpp"
+
+class ReferencePolicy;
+class ZWorkers;
+
+class ZReferenceProcessor : public ReferenceDiscoverer {
+  friend class ZReferenceProcessorTask;
+
+private:
+  static const size_t reference_type_count = REF_PHANTOM + 1;
+  typedef size_t Counters[reference_type_count];
+
+  ZWorkers* const      _workers;
+  ReferencePolicy*     _soft_reference_policy;
+  ZPerWorker<Counters> _encountered_count;
+  ZPerWorker<Counters> _discovered_count;
+  ZPerWorker<Counters> _enqueued_count;
+  ZPerWorker<oop>      _discovered_list;
+  ZContended<oop>      _pending_list;
+  oop*                 _pending_list_tail;
+
+  void update_soft_reference_clock() const;
+
+  ReferenceType reference_type(oop obj) const;
+  const char* reference_type_name(ReferenceType type) const;
+  volatile oop* reference_referent_addr(oop obj) const;
+  oop reference_referent(oop obj) const;
+  bool is_reference_inactive(oop obj) const;
+  bool is_referent_alive_or_null(oop obj, ReferenceType type) const;
+  bool is_referent_softly_alive(oop obj, ReferenceType type) const;
+  bool should_drop_reference(oop obj, ReferenceType type) const;
+  bool should_mark_referent(ReferenceType type) const;
+  bool should_clear_referent(ReferenceType type) const;
+  void keep_referent_alive(oop obj, ReferenceType type) const;
+
+  void discover(oop obj, ReferenceType type);
+  oop drop(oop obj, ReferenceType type);
+  oop* keep(oop obj, ReferenceType type);
+
+  bool is_empty() const;
+
+  void work();
+  void collect_statistics();
+
+public:
+  ZReferenceProcessor(ZWorkers* workers);
+
+  void set_soft_reference_policy(bool clear);
+  void reset_statistics();
+
+  virtual bool discover_reference(oop reference, ReferenceType type);
+  void process_references();
+  void enqueue_references();
+};
+
+#endif // SHARE_GC_Z_ZREFERENCEPROCESSOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRelocate.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zPage.hpp"
+#include "gc/z/zRelocate.hpp"
+#include "gc/z/zRelocationSet.inline.hpp"
+#include "gc/z/zRootsIterator.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zWorkers.hpp"
+
+ZRelocate::ZRelocate(ZWorkers* workers) :
+    _workers(workers) {}
+
+class ZRelocateRootsTask : public ZTask {
+private:
+  ZRootsIterator _roots;
+
+public:
+  ZRelocateRootsTask() :
+      ZTask("ZRelocateRootsTask"),
+      _roots() {}
+
+  virtual void work() {
+    // During relocation we need to visit the JVMTI
+    // export weak roots to rehash the JVMTI tag map
+    ZRelocateRootOopClosure cl;
+    _roots.oops_do(&cl, true /* visit_jvmti_weak_export */);
+  }
+};
+
+void ZRelocate::start() {
+  ZRelocateRootsTask task;
+  _workers->run_parallel(&task);
+}
+
+class ZRelocateObjectClosure : public ObjectClosure {
+private:
+  ZPage* const _page;
+
+public:
+  ZRelocateObjectClosure(ZPage* page) :
+      _page(page) {}
+
+  virtual void do_object(oop o) {
+    _page->relocate_object(ZOop::to_address(o));
+  }
+};
+
+bool ZRelocate::work(ZRelocationSetParallelIterator* iter) {
+  bool success = true;
+
+  // Relocate pages in the relocation set
+  for (ZPage* page; iter->next(&page);) {
+    // Relocate objects in page
+    ZRelocateObjectClosure cl(page);
+    page->object_iterate(&cl);
+
+    if (ZVerifyForwarding) {
+      page->verify_forwarding();
+    }
+
+    if (page->is_pinned()) {
+      // Relocation failed, page is now pinned
+      success = false;
+    } else {
+      // Relocation succeeded, release page
+      ZHeap::heap()->release_page(page, true /* reclaimed */);
+    }
+  }
+
+  return success;
+}
+
+class ZRelocateTask : public ZTask {
+private:
+  ZRelocate* const               _relocate;
+  ZRelocationSetParallelIterator _iter;
+  bool                           _failed;
+
+public:
+  ZRelocateTask(ZRelocate* relocate, ZRelocationSet* relocation_set) :
+      ZTask("ZRelocateTask"),
+      _relocate(relocate),
+      _iter(relocation_set),
+      _failed(false) {}
+
+  virtual void work() {
+    if (!_relocate->work(&_iter)) {
+      _failed = true;
+    }
+  }
+
+  bool failed() const {
+    return _failed;
+  }
+};
+
+bool ZRelocate::relocate(ZRelocationSet* relocation_set) {
+  ZRelocateTask task(this, relocation_set);
+  _workers->run_concurrent(&task);
+  return !task.failed();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRelocate.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZRELOCATE_HPP
+#define SHARE_GC_Z_ZRELOCATE_HPP
+
+#include "gc/z/zRelocationSet.hpp"
+#include "gc/z/zWorkers.hpp"
+#include "memory/allocation.hpp"
+
+class ZRelocate {
+  friend class ZRelocateTask;
+
+private:
+  ZWorkers* const _workers;
+
+  bool work(ZRelocationSetParallelIterator* iter);
+
+public:
+  ZRelocate(ZWorkers* workers);
+
+  void start();
+  bool relocate(ZRelocationSet* relocation_set);
+};
+
+#endif // SHARE_GC_Z_ZRELOCATE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRelocationSet.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zRelocationSet.hpp"
+#include "memory/allocation.inline.hpp"
+
+ZRelocationSet::ZRelocationSet() :
+    _pages(NULL),
+    _npages(0) {}
+
+void ZRelocationSet::populate(const ZPage* const* group0, size_t ngroup0,
+                              const ZPage* const* group1, size_t ngroup1) {
+  _npages = ngroup0 + ngroup1;
+  _pages = REALLOC_C_HEAP_ARRAY(ZPage*, _pages, _npages, mtGC);
+
+  if (_pages != NULL) {
+    if (group0 != NULL) {
+      memcpy(_pages, group0, ngroup0 * sizeof(ZPage*));
+    }
+    if (group1 != NULL) {
+      memcpy(_pages + ngroup0, group1, ngroup1 * sizeof(ZPage*));
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRelocationSet.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZRELOCATIONSET_HPP
+#define SHARE_GC_Z_ZRELOCATIONSET_HPP
+
+#include "memory/allocation.hpp"
+
+class ZPage;
+
+class ZRelocationSet {
+  template <bool> friend class ZRelocationSetIteratorImpl;
+
+private:
+  ZPage** _pages;
+  size_t  _npages;
+
+public:
+  ZRelocationSet();
+
+  void populate(const ZPage* const* group0, size_t ngroup0,
+                const ZPage* const* group1, size_t ngroup1);
+};
+
+template <bool parallel>
+class ZRelocationSetIteratorImpl : public StackObj {
+private:
+  ZRelocationSet* const _relocation_set;
+  size_t                _next;
+
+public:
+  ZRelocationSetIteratorImpl(ZRelocationSet* relocation_set);
+
+  bool next(ZPage** page);
+};
+
+// Iterator types
+#define ZRELOCATIONSET_SERIAL      false
+#define ZRELOCATIONSET_PARALLEL    true
+
+class ZRelocationSetIterator : public ZRelocationSetIteratorImpl<ZRELOCATIONSET_SERIAL> {
+public:
+  ZRelocationSetIterator(ZRelocationSet* relocation_set) :
+      ZRelocationSetIteratorImpl<ZRELOCATIONSET_SERIAL>(relocation_set) {}
+};
+
+class ZRelocationSetParallelIterator : public ZRelocationSetIteratorImpl<ZRELOCATIONSET_PARALLEL> {
+public:
+  ZRelocationSetParallelIterator(ZRelocationSet* relocation_set) :
+      ZRelocationSetIteratorImpl<ZRELOCATIONSET_PARALLEL>(relocation_set) {}
+};
+
+#endif // SHARE_GC_Z_ZRELOCATIONSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRelocationSet.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZRELOCATIONSET_INLINE_HPP
+#define SHARE_GC_Z_ZRELOCATIONSET_INLINE_HPP
+
+#include "gc/z/zRelocationSet.hpp"
+#include "runtime/atomic.hpp"
+
+template <bool parallel>
+inline ZRelocationSetIteratorImpl<parallel>::ZRelocationSetIteratorImpl(ZRelocationSet* relocation_set) :
+    _relocation_set(relocation_set),
+    _next(0) {}
+
+template <bool parallel>
+inline bool ZRelocationSetIteratorImpl<parallel>::next(ZPage** page) {
+  const size_t npages = _relocation_set->_npages;
+
+  if (parallel) {
+    if (_next < npages) {
+      const size_t next = Atomic::add(1u, &_next) - 1u;
+      if (next < npages) {
+        *page = _relocation_set->_pages[next];
+        return true;
+      }
+    }
+  } else {
+    if (_next < npages) {
+      *page = _relocation_set->_pages[_next++];
+      return true;
+    }
+  }
+
+  return false;
+}
+
+#endif // SHARE_GC_Z_ZRELOCATIONSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRelocationSetSelector.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zRelocationSet.hpp"
+#include "gc/z/zRelocationSetSelector.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
+
+ZRelocationSetSelectorGroup::ZRelocationSetSelectorGroup(const char* name,
+                                                         size_t page_size,
+                                                         size_t object_size_limit) :
+    _name(name),
+    _page_size(page_size),
+    _object_size_limit(object_size_limit),
+    _fragmentation_limit(page_size * (ZFragmentationLimit / 100)),
+    _registered_pages(),
+    _sorted_pages(NULL),
+    _nselected(0),
+    _relocating(0),
+    _fragmentation(0) {}
+
+ZRelocationSetSelectorGroup::~ZRelocationSetSelectorGroup() {
+  FREE_C_HEAP_ARRAY(const ZPage*, _sorted_pages);
+}
+
+void ZRelocationSetSelectorGroup::register_live_page(const ZPage* page, size_t garbage) {
+  if (garbage > _fragmentation_limit) {
+    _registered_pages.add(page);
+  } else {
+    _fragmentation += garbage;
+  }
+}
+
+void ZRelocationSetSelectorGroup::semi_sort() {
+  // Semi-sort registered pages by live bytes in ascending order
+  const size_t npartitions_shift = 11;
+  const size_t npartitions = (size_t)1 << npartitions_shift;
+  const size_t partition_size = _page_size >> npartitions_shift;
+  const size_t partition_size_shift = exact_log2(partition_size);
+  const size_t npages = _registered_pages.size();
+
+  size_t partition_slots[npartitions];
+  size_t partition_finger[npartitions];
+
+  // Allocate destination array
+  _sorted_pages = REALLOC_C_HEAP_ARRAY(const ZPage*, _sorted_pages, npages, mtGC);
+  debug_only(memset(_sorted_pages, 0, npages * sizeof(ZPage*)));
+
+  // Calculate partition slots
+  memset(partition_slots, 0, sizeof(partition_slots));
+  ZArrayIterator<const ZPage*> iter1(&_registered_pages);
+  for (const ZPage* page; iter1.next(&page);) {
+    const size_t index = page->live_bytes() >> partition_size_shift;
+    partition_slots[index]++;
+  }
+
+  // Calculate accumulated partition slots and fingers
+  size_t prev_partition_slots = 0;
+  for (size_t i = 0; i < npartitions; i++) {
+    partition_slots[i] += prev_partition_slots;
+    partition_finger[i] = prev_partition_slots;
+    prev_partition_slots = partition_slots[i];
+  }
+
+  // Sort pages into partitions
+  ZArrayIterator<const ZPage*> iter2(&_registered_pages);
+  for (const ZPage* page; iter2.next(&page);) {
+    const size_t index = page->live_bytes() >> partition_size_shift;
+    const size_t finger = partition_finger[index]++;
+    assert(_sorted_pages[finger] == NULL, "Invalid finger");
+    _sorted_pages[finger] = page;
+  }
+}
+
+void ZRelocationSetSelectorGroup::select() {
+  // Calculate the number of pages to relocate by successively including pages in
+  // a candidate relocation set and calculate the maximum space requirement for
+  // their live objects.
+  const size_t npages = _registered_pages.size();
+  size_t selected_from = 0;
+  size_t selected_to = 0;
+  size_t from_size = 0;
+
+  semi_sort();
+
+  for (size_t from = 1; from <= npages; from++) {
+    // Add page to the candidate relocation set
+    from_size += _sorted_pages[from - 1]->live_bytes();
+
+    // Calculate the maximum number of pages needed by the candidate relocation set.
+    // By subtracting the object size limit from the pages size we get the maximum
+    // number of pages that the relocation set is guaranteed to fit in, regardless
+    // of in which order the objects are relocated.
+    const size_t to = ceil((double)(from_size) / (double)(_page_size - _object_size_limit));
+
+    // Calculate the relative difference in reclaimable space compared to our
+    // currently selected final relocation set. If this number is larger than the
+    // acceptable fragmentation limit, then the current candidate relocation set
+    // becomes our new final relocation set.
+    const size_t diff_from = from - selected_from;
+    const size_t diff_to = to - selected_to;
+    const double diff_reclaimable = 100 - percent_of(diff_to, diff_from);
+    if (diff_reclaimable > ZFragmentationLimit) {
+      selected_from = from;
+      selected_to = to;
+    }
+
+    log_trace(gc, reloc)("Candidate Relocation Set (%s Pages): "
+                         SIZE_FORMAT "->" SIZE_FORMAT ", %.1f%% relative defragmentation, %s",
+                         _name, from, to, diff_reclaimable, (selected_from == from) ? "Selected" : "Rejected");
+  }
+
+  // Finalize selection
+  _nselected = selected_from;
+
+  // Update statistics
+  _relocating = from_size;
+  for (size_t i = _nselected; i < npages; i++) {
+    const ZPage* const page = _sorted_pages[i];
+    _fragmentation += page->size() - page->live_bytes();
+  }
+
+  log_debug(gc, reloc)("Relocation Set (%s Pages): " SIZE_FORMAT "->" SIZE_FORMAT ", " SIZE_FORMAT " skipped",
+                       _name, selected_from, selected_to, npages - _nselected);
+}
+
+const ZPage* const* ZRelocationSetSelectorGroup::selected() const {
+  return _sorted_pages;
+}
+
+size_t ZRelocationSetSelectorGroup::nselected() const {
+  return _nselected;
+}
+
+size_t ZRelocationSetSelectorGroup::relocating() const {
+  return _relocating;
+}
+
+size_t ZRelocationSetSelectorGroup::fragmentation() const {
+  return _fragmentation;
+}
+
+ZRelocationSetSelector::ZRelocationSetSelector() :
+    _small("Small", ZPageSizeSmall, ZObjectSizeLimitSmall),
+    _medium("Medium", ZPageSizeMedium, ZObjectSizeLimitMedium),
+    _live(0),
+    _garbage(0),
+    _fragmentation(0) {}
+
+void ZRelocationSetSelector::register_live_page(const ZPage* page) {
+  const uint8_t type = page->type();
+  const size_t live = page->live_bytes();
+  const size_t garbage = page->size() - live;
+
+  if (type == ZPageTypeSmall) {
+    _small.register_live_page(page, garbage);
+  } else if (type == ZPageTypeMedium) {
+    _medium.register_live_page(page, garbage);
+  } else {
+    _fragmentation += garbage;
+  }
+
+  _live += live;
+  _garbage += garbage;
+}
+
+void ZRelocationSetSelector::register_garbage_page(const ZPage* page) {
+  _garbage += page->size();
+}
+
+void ZRelocationSetSelector::select(ZRelocationSet* relocation_set) {
+  // Select pages to relocate. The resulting relocation set will be
+  // sorted such that medium pages comes first, followed by small
+  // pages. Pages within each page group will be semi-sorted by live
+  // bytes in ascending order. Relocating pages in this order allows
+  // us to start reclaiming memory more quickly.
+
+  // Select pages from each group
+  _medium.select();
+  _small.select();
+
+  // Populate relocation set
+  relocation_set->populate(_medium.selected(), _medium.nselected(),
+                           _small.selected(), _small.nselected());
+}
+
+size_t ZRelocationSetSelector::live() const {
+  return _live;
+}
+
+size_t ZRelocationSetSelector::garbage() const {
+  return _garbage;
+}
+
+size_t ZRelocationSetSelector::relocating() const {
+  return _small.relocating() + _medium.relocating();
+}
+
+size_t ZRelocationSetSelector::fragmentation() const {
+  return _fragmentation + _small.fragmentation() + _medium.fragmentation();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRelocationSetSelector.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZRELOCATIONSETSELECTOR_HPP
+#define SHARE_GC_Z_ZRELOCATIONSETSELECTOR_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.hpp"
+
+class ZPage;
+class ZRelocationSet;
+
+class ZRelocationSetSelectorGroup {
+private:
+  const char* const    _name;
+  const size_t         _page_size;
+  const size_t         _object_size_limit;
+  const size_t         _fragmentation_limit;
+
+  ZArray<const ZPage*> _registered_pages;
+  const ZPage**        _sorted_pages;
+  size_t               _nselected;
+  size_t               _relocating;
+  size_t               _fragmentation;
+
+  void semi_sort();
+
+public:
+  ZRelocationSetSelectorGroup(const char* name,
+                              size_t page_size,
+                              size_t object_size_limit);
+  ~ZRelocationSetSelectorGroup();
+
+  void register_live_page(const ZPage* page, size_t garbage);
+  void select();
+
+  const ZPage* const* selected() const;
+  size_t nselected() const;
+  size_t relocating() const;
+  size_t fragmentation() const;
+};
+
+class ZRelocationSetSelector : public StackObj {
+private:
+  ZRelocationSetSelectorGroup _small;
+  ZRelocationSetSelectorGroup _medium;
+  size_t                      _live;
+  size_t                      _garbage;
+  size_t                      _fragmentation;
+
+public:
+  ZRelocationSetSelector();
+
+  void register_live_page(const ZPage* page);
+  void register_garbage_page(const ZPage* page);
+  void select(ZRelocationSet* relocation_set);
+
+  size_t live() const;
+  size_t garbage() const;
+  size_t relocating() const;
+  size_t fragmentation() const;
+};
+
+#endif // SHARE_GC_Z_ZRELOCATIONSETSELECTOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zResurrection.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zResurrection.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/debug.hpp"
+
+volatile bool ZResurrection::_blocked = false;
+
+void ZResurrection::block() {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  _blocked = true;
+}
+
+void ZResurrection::unblock() {
+  // We use a storestore barrier to make sure all healed
+  // oops are visible before we unblock resurrection.
+  OrderAccess::storestore();
+  _blocked = false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zResurrection.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZRESURRECTION_HPP
+#define SHARE_GC_Z_ZRESURRECTION_HPP
+
+#include "memory/allocation.hpp"
+
+class ZResurrection : public AllStatic {
+private:
+  static volatile bool _blocked;
+
+public:
+  static bool is_blocked();
+  static void block();
+  static void unblock();
+};
+
+#endif // SHARE_GC_Z_ZRESURRECTION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zResurrection.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZRESURRECTION_INLINE_HPP
+#define SHARE_GC_Z_ZRESURRECTION_INLINE_HPP
+
+#include "gc/z/zResurrection.hpp"
+#include "runtime/orderAccess.hpp"
+
+inline bool ZResurrection::is_blocked() {
+  // We use a loadload barrier to make sure we are not
+  // seeing oops from a time when resurrection was blocked.
+  const bool blocked = _blocked;
+  OrderAccess::loadload();
+  return blocked;
+}
+
+#endif // SHARE_GC_Z_ZRESURRECTION_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
+#include "compiler/oopMap.hpp"
+#include "gc/shared/oopStorageParState.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zRootsIterator.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "memory/resourceArea.hpp"
+#include "memory/universe.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/synchronizer.hpp"
+#include "services/management.hpp"
+#include "utilities/debug.hpp"
+#if INCLUDE_JFR
+#include "jfr/jfr.hpp"
+#endif
+
+static const ZStatSubPhase ZSubPhasePauseRootsSetup("Pause Roots Setup");
+static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots");
+static const ZStatSubPhase ZSubPhasePauseRootsTeardown("Pause Roots Teardown");
+static const ZStatSubPhase ZSubPhasePauseRootsUniverse("Pause Roots Universe");
+static const ZStatSubPhase ZSubPhasePauseRootsVMWeakHandles("Pause Roots VMWeakHandles");
+static const ZStatSubPhase ZSubPhasePauseRootsJNIHandles("Pause Roots JNIHandles");
+static const ZStatSubPhase ZSubPhasePauseRootsJNIWeakHandles("Pause Roots JNIWeakHandles");
+static const ZStatSubPhase ZSubPhasePauseRootsObjectSynchronizer("Pause Roots ObjectSynchronizer");
+static const ZStatSubPhase ZSubPhasePauseRootsManagement("Pause Roots Management");
+static const ZStatSubPhase ZSubPhasePauseRootsJVMTIExport("Pause Roots JVMTIExport");
+static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
+static const ZStatSubPhase ZSubPhasePauseRootsJFRWeak("Pause Roots JRFWeak");
+static const ZStatSubPhase ZSubPhasePauseRootsSystemDictionary("Pause Roots SystemDictionary");
+static const ZStatSubPhase ZSubPhasePauseRootsClassLoaderDataGraph("Pause Roots ClassLoaderDataGraph");
+static const ZStatSubPhase ZSubPhasePauseRootsThreads("Pause Roots Threads");
+static const ZStatSubPhase ZSubPhasePauseRootsCodeCache("Pause Roots CodeCache");
+static const ZStatSubPhase ZSubPhasePauseRootsStringTable("Pause Roots StringTable");
+
+static const ZStatSubPhase ZSubPhasePauseWeakRootsSetup("Pause Weak Roots Setup");
+static const ZStatSubPhase ZSubPhasePauseWeakRoots("Pause Weak Roots");
+static const ZStatSubPhase ZSubPhasePauseWeakRootsTeardown("Pause Weak Roots Teardown");
+static const ZStatSubPhase ZSubPhasePauseWeakRootsVMWeakHandles("Pause Weak Roots VMWeakHandles");
+static const ZStatSubPhase ZSubPhasePauseWeakRootsJNIWeakHandles("Pause Weak Roots JNIWeakHandles");
+static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport");
+static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak");
+static const ZStatSubPhase ZSubPhasePauseWeakRootsSymbolTable("Pause Weak Roots SymbolTable");
+static const ZStatSubPhase ZSubPhasePauseWeakRootsStringTable("Pause Weak Roots StringTable");
+
+static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots");
+static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsVMWeakHandles("Concurrent Weak Roots VMWeakHandles");
+static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsJNIWeakHandles("Concurrent Weak Roots JNIWeakHandles");
+static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsStringTable("Concurrent Weak Roots StringTable");
+
+template <typename T, void (T::*F)(OopClosure*)>
+ZSerialOopsDo<T, F>::ZSerialOopsDo(T* iter) :
+    _iter(iter),
+    _claimed(false) {}
+
+template <typename T, void (T::*F)(OopClosure*)>
+void ZSerialOopsDo<T, F>::oops_do(OopClosure* cl) {
+  if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) {
+    (_iter->*F)(cl);
+  }
+}
+
+template <typename T, void (T::*F)(OopClosure*)>
+ZParallelOopsDo<T, F>::ZParallelOopsDo(T* iter) :
+    _iter(iter),
+    _completed(false) {}
+
+template <typename T, void (T::*F)(OopClosure*)>
+void ZParallelOopsDo<T, F>::oops_do(OopClosure* cl) {
+  if (!_completed) {
+    (_iter->*F)(cl);
+    if (!_completed) {
+      _completed = true;
+    }
+  }
+}
+
+template <typename T, void (T::*F)(BoolObjectClosure*, OopClosure*)>
+ZSerialWeakOopsDo<T, F>::ZSerialWeakOopsDo(T* iter) :
+    _iter(iter),
+    _claimed(false) {}
+
+template <typename T, void (T::*F)(BoolObjectClosure*, OopClosure*)>
+void ZSerialWeakOopsDo<T, F>::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl) {
+  if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) {
+    (_iter->*F)(is_alive, cl);
+  }
+}
+
+template <typename T, void (T::*F)(BoolObjectClosure*, OopClosure*)>
+ZParallelWeakOopsDo<T, F>::ZParallelWeakOopsDo(T* iter) :
+    _iter(iter),
+    _completed(false) {}
+
+template <typename T, void (T::*F)(BoolObjectClosure*, OopClosure*)>
+void ZParallelWeakOopsDo<T, F>::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl) {
+  if (!_completed) {
+    (_iter->*F)(is_alive, cl);
+    if (!_completed) {
+      _completed = true;
+    }
+  }
+}
+
+ZRootsIterator::ZRootsIterator() :
+    _vm_weak_handles_iter(SystemDictionary::vm_weak_oop_storage()),
+    _jni_handles_iter(JNIHandles::global_handles()),
+    _jni_weak_handles_iter(JNIHandles::weak_global_handles()),
+    _string_table_iter(StringTable::weak_storage()),
+    _universe(this),
+    _object_synchronizer(this),
+    _management(this),
+    _jvmti_export(this),
+    _jvmti_weak_export(this),
+    _jfr_weak(this),
+    _system_dictionary(this),
+    _vm_weak_handles(this),
+    _jni_handles(this),
+    _jni_weak_handles(this),
+    _class_loader_data_graph(this),
+    _threads(this),
+    _code_cache(this),
+    _string_table(this) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  ZStatTimer timer(ZSubPhasePauseRootsSetup);
+  Threads::change_thread_claim_parity();
+  ClassLoaderDataGraph::clear_claimed_marks();
+  COMPILER2_PRESENT(DerivedPointerTable::clear());
+  CodeCache::gc_prologue();
+  ZNMethodTable::gc_prologue();
+}
+
+ZRootsIterator::~ZRootsIterator() {
+  ZStatTimer timer(ZSubPhasePauseRootsTeardown);
+  ResourceMark rm;
+  ZNMethodTable::gc_epilogue();
+  CodeCache::gc_epilogue();
+  JvmtiExport::gc_epilogue();
+  COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+  Threads::assert_all_threads_claimed();
+}
+
+void ZRootsIterator::do_universe(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsUniverse);
+  Universe::oops_do(cl);
+}
+
+void ZRootsIterator::do_vm_weak_handles(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsVMWeakHandles);
+  _vm_weak_handles_iter.oops_do(cl);
+}
+
+void ZRootsIterator::do_jni_handles(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsJNIHandles);
+  _jni_handles_iter.oops_do(cl);
+}
+
+void ZRootsIterator::do_jni_weak_handles(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsJNIWeakHandles);
+  _jni_weak_handles_iter.oops_do(cl);
+}
+
+void ZRootsIterator::do_object_synchronizer(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsObjectSynchronizer);
+  ObjectSynchronizer::oops_do(cl);
+}
+
+void ZRootsIterator::do_management(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsManagement);
+  Management::oops_do(cl);
+}
+
+void ZRootsIterator::do_jvmti_export(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsJVMTIExport);
+  JvmtiExport::oops_do(cl);
+}
+
+void ZRootsIterator::do_jvmti_weak_export(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsJVMTIWeakExport);
+  AlwaysTrueClosure always_alive;
+  JvmtiExport::weak_oops_do(&always_alive, cl);
+}
+
+void ZRootsIterator::do_jfr_weak(OopClosure* cl) {
+#if INCLUDE_JFR
+  ZStatTimer timer(ZSubPhasePauseRootsJFRWeak);
+  AlwaysTrueClosure always_alive;
+  Jfr::weak_oops_do(&always_alive, cl);
+#endif
+}
+
+void ZRootsIterator::do_system_dictionary(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsSystemDictionary);
+  SystemDictionary::oops_do(cl);
+}
+
+void ZRootsIterator::do_class_loader_data_graph(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsClassLoaderDataGraph);
+  CLDToOopClosure cld_cl(cl);
+  ClassLoaderDataGraph::cld_do(&cld_cl);
+}
+
+class ZRootsIteratorThreadClosure : public ThreadClosure {
+private:
+  OopClosure* const _cl;
+
+public:
+  ZRootsIteratorThreadClosure(OopClosure* cl) :
+      _cl(cl) {}
+
+  virtual void do_thread(Thread* thread) {
+    if (thread->is_Java_thread()) {
+      // Update thread local adddress bad mask
+      ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
+    }
+
+    // Process thread oops
+    thread->oops_do(_cl, NULL);
+  }
+};
+
+void ZRootsIterator::do_threads(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsThreads);
+  ResourceMark rm;
+  ZRootsIteratorThreadClosure thread_cl(cl);
+  Threads::possibly_parallel_threads_do(true, &thread_cl);
+}
+
+void ZRootsIterator::do_code_cache(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsCodeCache);
+  ZNMethodTable::oops_do(cl);
+}
+
+void ZRootsIterator::do_string_table(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsStringTable);
+  _string_table_iter.oops_do(cl);
+}
+
+void ZRootsIterator::oops_do(OopClosure* cl, bool visit_jvmti_weak_export) {
+  ZStatTimer timer(ZSubPhasePauseRoots);
+  _universe.oops_do(cl);
+  _object_synchronizer.oops_do(cl);
+  _management.oops_do(cl);
+  _jvmti_export.oops_do(cl);
+  _system_dictionary.oops_do(cl);
+  _jni_handles.oops_do(cl);
+  _class_loader_data_graph.oops_do(cl);
+  _threads.oops_do(cl);
+  _code_cache.oops_do(cl);
+  if (!ZWeakRoots) {
+    _jvmti_weak_export.oops_do(cl);
+    _jfr_weak.oops_do(cl);
+    _vm_weak_handles.oops_do(cl);
+    _jni_weak_handles.oops_do(cl);
+    _string_table.oops_do(cl);
+  } else {
+    if (visit_jvmti_weak_export) {
+      _jvmti_weak_export.oops_do(cl);
+    }
+  }
+}
+
+ZWeakRootsIterator::ZWeakRootsIterator() :
+    _vm_weak_handles_iter(SystemDictionary::vm_weak_oop_storage()),
+    _jni_weak_handles_iter(JNIHandles::weak_global_handles()),
+    _string_table_iter(StringTable::weak_storage()),
+    _jvmti_weak_export(this),
+    _jfr_weak(this),
+    _vm_weak_handles(this),
+    _jni_weak_handles(this),
+    _symbol_table(this),
+    _string_table(this) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  ZStatTimer timer(ZSubPhasePauseWeakRootsSetup);
+  SymbolTable::clear_parallel_claimed_index();
+}
+
+ZWeakRootsIterator::~ZWeakRootsIterator() {
+  ZStatTimer timer(ZSubPhasePauseWeakRootsTeardown);
+}
+
+void ZWeakRootsIterator::do_vm_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseWeakRootsVMWeakHandles);
+  _vm_weak_handles_iter.weak_oops_do(is_alive, cl);
+}
+
+void ZWeakRootsIterator::do_jni_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseWeakRootsJNIWeakHandles);
+  _jni_weak_handles_iter.weak_oops_do(is_alive, cl);
+}
+
+void ZWeakRootsIterator::do_jvmti_weak_export(BoolObjectClosure* is_alive, OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseWeakRootsJVMTIWeakExport);
+  JvmtiExport::weak_oops_do(is_alive, cl);
+}
+
+void ZWeakRootsIterator::do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl) {
+#if INCLUDE_JFR
+  ZStatTimer timer(ZSubPhasePauseWeakRootsJFRWeak);
+  Jfr::weak_oops_do(is_alive, cl);
+#endif
+}
+
+void ZWeakRootsIterator::do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseWeakRootsSymbolTable);
+  int dummy;
+  SymbolTable::possibly_parallel_unlink(&dummy, &dummy);
+}
+
+void ZWeakRootsIterator::do_string_table(BoolObjectClosure* is_alive, OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseWeakRootsStringTable);
+  _string_table_iter.weak_oops_do(is_alive, cl);
+}
+
+void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseWeakRoots);
+  if (ZSymbolTableUnloading) {
+    _symbol_table.weak_oops_do(is_alive, cl);
+  }
+  if (ZWeakRoots) {
+    _jvmti_weak_export.weak_oops_do(is_alive, cl);
+    _jfr_weak.weak_oops_do(is_alive, cl);
+    if (!ZConcurrentVMWeakHandles) {
+      _vm_weak_handles.weak_oops_do(is_alive, cl);
+    }
+    if (!ZConcurrentJNIWeakGlobalHandles) {
+      _jni_weak_handles.weak_oops_do(is_alive, cl);
+    }
+    if (!ZConcurrentStringTable) {
+      _string_table.weak_oops_do(is_alive, cl);
+    }
+  }
+}
+
+void ZWeakRootsIterator::oops_do(OopClosure* cl) {
+  AlwaysTrueClosure always_alive;
+  weak_oops_do(&always_alive, cl);
+}
+
+ZConcurrentWeakRootsIterator::ZConcurrentWeakRootsIterator() :
+    _vm_weak_handles_iter(SystemDictionary::vm_weak_oop_storage()),
+    _jni_weak_handles_iter(JNIHandles::weak_global_handles()),
+    _string_table_iter(StringTable::weak_storage()),
+    _vm_weak_handles(this),
+    _jni_weak_handles(this),
+    _string_table(this) {}
+
+void ZConcurrentWeakRootsIterator::do_vm_weak_handles(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhaseConcurrentWeakRootsVMWeakHandles);
+  _vm_weak_handles_iter.oops_do(cl);
+}
+
+void ZConcurrentWeakRootsIterator::do_jni_weak_handles(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhaseConcurrentWeakRootsJNIWeakHandles);
+  _jni_weak_handles_iter.oops_do(cl);
+}
+
+void ZConcurrentWeakRootsIterator::do_string_table(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhaseConcurrentWeakRootsStringTable);
+  _string_table_iter.oops_do(cl);
+}
+
+void ZConcurrentWeakRootsIterator::oops_do(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhaseConcurrentWeakRoots);
+  if (ZWeakRoots) {
+    if (ZConcurrentVMWeakHandles) {
+      _vm_weak_handles.oops_do(cl);
+    }
+    if (ZConcurrentJNIWeakGlobalHandles) {
+      _jni_weak_handles.oops_do(cl);
+    }
+    if (ZConcurrentStringTable) {
+      _string_table.oops_do(cl);
+    }
+  }
+}
+
+ZThreadRootsIterator::ZThreadRootsIterator() :
+    _threads(this) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
+  ZStatTimer timer(ZSubPhasePauseRootsSetup);
+  Threads::change_thread_claim_parity();
+}
+
+ZThreadRootsIterator::~ZThreadRootsIterator() {
+  ZStatTimer timer(ZSubPhasePauseRootsTeardown);
+  Threads::assert_all_threads_claimed();
+}
+
+void ZThreadRootsIterator::do_threads(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRootsThreads);
+  ResourceMark rm;
+  Threads::possibly_parallel_oops_do(true, cl, NULL);
+}
+
+void ZThreadRootsIterator::oops_do(OopClosure* cl) {
+  ZStatTimer timer(ZSubPhasePauseRoots);
+  _threads.oops_do(cl);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRootsIterator.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZROOTSITERATOR_HPP
+#define SHARE_GC_Z_ZROOTSITERATOR_HPP
+
+#include "gc/shared/oopStorageParState.hpp"
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+typedef OopStorage::ParState<false /* concurrent */, false /* is_const */> ZOopStorageIterator;
+typedef OopStorage::ParState<true /* concurrent */, false /* is_const */>  ZConcurrentOopStorageIterator;
+
+template <typename T, void (T::*F)(OopClosure*)>
+class ZSerialOopsDo {
+private:
+  T* const      _iter;
+  volatile bool _claimed;
+
+public:
+  ZSerialOopsDo(T* iter);
+  void oops_do(OopClosure* cl);
+};
+
+template <typename T, void (T::*F)(OopClosure*)>
+class ZParallelOopsDo {
+private:
+  T* const      _iter;
+  volatile bool _completed;
+
+public:
+  ZParallelOopsDo(T* iter);
+  void oops_do(OopClosure* cl);
+};
+
+template <typename T, void (T::*F)(BoolObjectClosure*, OopClosure*)>
+class ZSerialWeakOopsDo {
+private:
+  T* const      _iter;
+  volatile bool _claimed;
+
+public:
+  ZSerialWeakOopsDo(T* iter);
+  void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl);
+};
+
+template <typename T, void (T::*F)(BoolObjectClosure*, OopClosure*)>
+class ZParallelWeakOopsDo {
+private:
+  T* const      _iter;
+  volatile bool _completed;
+
+public:
+  ZParallelWeakOopsDo(T* iter);
+  void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl);
+};
+
+class ZRootsIterator {
+private:
+  ZOopStorageIterator _vm_weak_handles_iter;
+  ZOopStorageIterator _jni_handles_iter;
+  ZOopStorageIterator _jni_weak_handles_iter;
+  ZOopStorageIterator _string_table_iter;
+
+  void do_universe(OopClosure* cl);
+  void do_vm_weak_handles(OopClosure* cl);
+  void do_jni_handles(OopClosure* cl);
+  void do_jni_weak_handles(OopClosure* cl);
+  void do_object_synchronizer(OopClosure* cl);
+  void do_management(OopClosure* cl);
+  void do_jvmti_export(OopClosure* cl);
+  void do_jvmti_weak_export(OopClosure* cl);
+  void do_jfr_weak(OopClosure* cl);
+  void do_system_dictionary(OopClosure* cl);
+  void do_class_loader_data_graph(OopClosure* cl);
+  void do_threads(OopClosure* cl);
+  void do_code_cache(OopClosure* cl);
+  void do_string_table(OopClosure* cl);
+
+  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_universe>                  _universe;
+  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_object_synchronizer>       _object_synchronizer;
+  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_management>                _management;
+  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_export>              _jvmti_export;
+  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_weak_export>         _jvmti_weak_export;
+  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jfr_weak>                  _jfr_weak;
+  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_system_dictionary>         _system_dictionary;
+  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_vm_weak_handles>         _vm_weak_handles;
+  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_jni_handles>             _jni_handles;
+  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_jni_weak_handles>        _jni_weak_handles;
+  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
+  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_threads>                 _threads;
+  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_code_cache>              _code_cache;
+  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_string_table>            _string_table;
+
+public:
+  ZRootsIterator();
+  ~ZRootsIterator();
+
+  void oops_do(OopClosure* cl, bool visit_jvmti_weak_export = false);
+};
+
+class ZWeakRootsIterator {
+private:
+  ZOopStorageIterator _vm_weak_handles_iter;
+  ZOopStorageIterator _jni_weak_handles_iter;
+  ZOopStorageIterator _string_table_iter;
+
+  void do_vm_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl);
+  void do_jni_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl);
+  void do_jvmti_weak_export(BoolObjectClosure* is_alive, OopClosure* cl);
+  void do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl);
+  void do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl);
+  void do_string_table(BoolObjectClosure* is_alive, OopClosure* cl);
+
+  ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jvmti_weak_export>  _jvmti_weak_export;
+  ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jfr_weak>           _jfr_weak;
+  ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_vm_weak_handles>  _vm_weak_handles;
+  ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jni_weak_handles> _jni_weak_handles;
+  ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_symbol_table>     _symbol_table;
+  ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_string_table>     _string_table;
+
+public:
+  ZWeakRootsIterator();
+  ~ZWeakRootsIterator();
+
+  void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl);
+  void oops_do(OopClosure* cl);
+};
+
+class ZConcurrentWeakRootsIterator {
+private:
+  ZConcurrentOopStorageIterator _vm_weak_handles_iter;
+  ZConcurrentOopStorageIterator _jni_weak_handles_iter;
+  ZConcurrentOopStorageIterator _string_table_iter;
+
+  void do_vm_weak_handles(OopClosure* cl);
+  void do_jni_weak_handles(OopClosure* cl);
+  void do_string_table(OopClosure* cl);
+
+  ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_vm_weak_handles>  _vm_weak_handles;
+  ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_jni_weak_handles> _jni_weak_handles;
+  ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_string_table>     _string_table;
+
+public:
+  ZConcurrentWeakRootsIterator();
+
+  void oops_do(OopClosure* cl);
+};
+
+class ZThreadRootsIterator {
+private:
+  void do_threads(OopClosure* cl);
+
+  ZParallelOopsDo<ZThreadRootsIterator, &ZThreadRootsIterator::do_threads> _threads;
+
+public:
+  ZThreadRootsIterator();
+  ~ZThreadRootsIterator();
+
+  void oops_do(OopClosure* cl);
+};
+
+#endif // SHARE_GC_Z_ZROOTSITERATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRuntimeWorkers.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zRuntimeWorkers.hpp"
+
+ZRuntimeWorkers::ZRuntimeWorkers() :
+    _workers("RuntimeWorker",
+             nworkers(),
+             false /* are_GC_task_threads */,
+             false /* are_ConcurrentGC_threads */) {
+
+  log_info(gc, init)("Runtime Workers: %u parallel", nworkers());
+
+  // Initialize worker threads
+  _workers.initialize_workers();
+  _workers.update_active_workers(nworkers());
+}
+
+uint ZRuntimeWorkers::nworkers() const {
+  return ParallelGCThreads;
+}
+
+WorkGang* ZRuntimeWorkers::workers() {
+  return &_workers;
+}
+
+void ZRuntimeWorkers::threads_do(ThreadClosure* tc) const {
+  _workers.threads_do(tc);
+}
+
+void ZRuntimeWorkers::print_threads_on(outputStream* st) const {
+  _workers.print_worker_threads_on(st);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zRuntimeWorkers.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZRUNTIMEWORKERS_HPP
+#define SHARE_GC_Z_ZRUNTIMEWORKERS_HPP
+
+#include "gc/shared/workgroup.hpp"
+
+class ZRuntimeWorkers {
+private:
+  WorkGang _workers;
+
+  uint nworkers() const;
+
+public:
+  ZRuntimeWorkers();
+
+  WorkGang* workers();
+
+  void threads_do(ThreadClosure* tc) const;
+  void print_threads_on(outputStream* st) const;
+};
+
+#endif // SHARE_GC_Z_ZRUNTIMEWORKERS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zServiceability.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/generationCounters.hpp"
+#include "gc/shared/hSpaceCounters.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zServiceability.hpp"
+#include "memory/metaspaceCounters.hpp"
+#include "runtime/perfData.hpp"
+
+class ZOldGenerationCounters : public GenerationCounters {
+public:
+  ZOldGenerationCounters(const char* name, size_t min_capacity, size_t max_capacity) :
+    // The "1, 1" parameters are for the n-th generation (=1) with 1 space.
+    GenerationCounters(name,
+                       1 /* ordinal */,
+                       1 /* spaces */,
+                       min_capacity /* min_capacity */,
+                       max_capacity /* max_capacity */,
+                       min_capacity /* curr_capacity */) {}
+
+  virtual void update_all() {
+    size_t committed = ZHeap::heap()->capacity();
+    _current_size->set_value(committed);
+  }
+};
+
+// Class to expose perf counters used by jstat.
+class ZServiceabilityCounters : public CHeapObj<mtGC> {
+private:
+  ZOldGenerationCounters _old_collection_counters;
+  HSpaceCounters         _old_space_counters;
+
+public:
+  ZServiceabilityCounters(size_t min_capacity, size_t max_capacity);
+
+  void update_sizes();
+};
+
+ZServiceabilityCounters::ZServiceabilityCounters(size_t min_capacity, size_t max_capacity) :
+    // generation.1
+    _old_collection_counters("old",
+                             min_capacity,
+                             max_capacity),
+    // generation.1.space.0
+    _old_space_counters(_old_collection_counters.name_space(),
+                        "space",
+                        0 /* ordinal */,
+                        max_capacity /* max_capacity */,
+                        min_capacity /* init_capacity */) {}
+
+void ZServiceabilityCounters::update_sizes() {
+  if (UsePerfData) {
+    size_t capacity = ZHeap::heap()->capacity();
+    size_t used = MIN2(ZHeap::heap()->used(), capacity);
+
+    _old_space_counters.update_capacity(capacity);
+    _old_space_counters.update_used(used);
+
+    _old_collection_counters.update_all();
+
+    MetaspaceCounters::update_performance_counters();
+    CompressedClassSpaceCounters::update_performance_counters();
+  }
+}
+
+ZServiceabilityMemoryPool::ZServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity) :
+    CollectedMemoryPool("ZHeap",
+                        min_capacity,
+                        max_capacity,
+                        true /* support_usage_threshold */) {}
+
+size_t ZServiceabilityMemoryPool::used_in_bytes() {
+  return ZHeap::heap()->used();
+}
+
+MemoryUsage ZServiceabilityMemoryPool::get_memory_usage() {
+  const size_t committed = ZHeap::heap()->capacity();
+  const size_t used      = MIN2(ZHeap::heap()->used(), committed);
+
+  return MemoryUsage(initial_size(), used, committed, max_size());
+}
+
+ZServiceabilityMemoryManager::ZServiceabilityMemoryManager(ZServiceabilityMemoryPool* pool)
+    : GCMemoryManager("ZGC", "end of major GC") {
+  add_pool(pool);
+}
+
+ZServiceability::ZServiceability(size_t min_capacity, size_t max_capacity) :
+    _min_capacity(min_capacity),
+    _max_capacity(max_capacity),
+    _memory_pool(_min_capacity, _max_capacity),
+    _memory_manager(&_memory_pool),
+    _counters(NULL) {}
+
+void ZServiceability::initialize() {
+  _counters = new ZServiceabilityCounters(_min_capacity, _max_capacity);
+}
+
+MemoryPool* ZServiceability::memory_pool() {
+  return &_memory_pool;
+}
+
+GCMemoryManager* ZServiceability::memory_manager() {
+  return &_memory_manager;
+}
+
+ZServiceabilityCounters* ZServiceability::counters() {
+  return _counters;
+}
+
+ZServiceabilityMemoryUsageTracker::~ZServiceabilityMemoryUsageTracker() {
+  MemoryService::track_memory_usage();
+}
+
+ZServiceabilityManagerStatsTracer::ZServiceabilityManagerStatsTracer(bool is_gc_begin, bool is_gc_end) :
+    _stats(ZHeap::heap()->serviceability_memory_manager(),
+           ZCollectedHeap::heap()->gc_cause() /* cause */,
+           is_gc_begin /* recordGCBeginTime */,
+           is_gc_begin /* recordPreGCUsage */,
+           true        /* recordPeakUsage */,
+           is_gc_end   /* recordPostGCusage */,
+           true        /* recordAccumulatedGCTime */,
+           is_gc_end   /* recordGCEndTime */,
+           is_gc_end   /* countCollection */) {}
+
+ZServiceabilityCountersTracer::ZServiceabilityCountersTracer() {
+  // Nothing to trace with TraceCollectorStats, since ZGC has
+  // neither a young collector or a full collector.
+}
+
+ZServiceabilityCountersTracer::~ZServiceabilityCountersTracer() {
+  ZHeap::heap()->serviceability_counters()->update_sizes();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zServiceability.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZSERVICEABILITY_HPP
+#define SHARE_GC_Z_ZSERVICEABILITY_HPP
+
+#include "memory/allocation.hpp"
+#include "services/memoryManager.hpp"
+#include "services/memoryPool.hpp"
+#include "services/memoryService.hpp"
+
+class ZServiceabilityCounters;
+
+class ZServiceabilityMemoryPool : public CollectedMemoryPool {
+public:
+  ZServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity);
+
+  virtual size_t used_in_bytes();
+  virtual MemoryUsage get_memory_usage();
+};
+
+class ZServiceabilityMemoryManager : public GCMemoryManager {
+public:
+  ZServiceabilityMemoryManager(ZServiceabilityMemoryPool* pool);
+};
+
+class ZServiceability {
+private:
+  const size_t                 _min_capacity;
+  const size_t                 _max_capacity;
+  ZServiceabilityMemoryPool    _memory_pool;
+  ZServiceabilityMemoryManager _memory_manager;
+  ZServiceabilityCounters*     _counters;
+
+public:
+  ZServiceability(size_t min_capacity, size_t max_capacity);
+
+  void initialize();
+
+  MemoryPool* memory_pool();
+  GCMemoryManager* memory_manager();
+  ZServiceabilityCounters* counters();
+};
+
+class ZServiceabilityMemoryUsageTracker {
+public:
+  ~ZServiceabilityMemoryUsageTracker();
+};
+
+class ZServiceabilityManagerStatsTracer {
+private:
+  TraceMemoryManagerStats _stats;
+
+public:
+  ZServiceabilityManagerStatsTracer(bool is_gc_begin, bool is_gc_end);
+};
+
+class ZServiceabilityCountersTracer {
+public:
+  ZServiceabilityCountersTracer();
+  ~ZServiceabilityCountersTracer();
+};
+
+template <bool IsGCStart, bool IsGCEnd>
+class ZServiceabilityTracer : public StackObj {
+private:
+  ZServiceabilityMemoryUsageTracker _memory_usage_tracker;
+  ZServiceabilityManagerStatsTracer _manager_stats_tracer;
+  ZServiceabilityCountersTracer     _counters_tracer;
+
+public:
+  ZServiceabilityTracer() :
+      _memory_usage_tracker(),
+      _manager_stats_tracer(IsGCStart, IsGCEnd),
+      _counters_tracer() {}
+};
+
+typedef ZServiceabilityTracer<true,  false> ZServiceabilityMarkStartTracer;
+typedef ZServiceabilityTracer<false, false> ZServiceabilityMarkEndTracer;
+typedef ZServiceabilityTracer<false, true>  ZServiceabilityRelocateStartTracer;
+
+#endif // SHARE_GC_Z_ZSERVICEABILITY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zStat.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,1366 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zCPU.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTracer.inline.hpp"
+#include "gc/z/zUtils.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/os.hpp"
+#include "runtime/timer.hpp"
+#include "utilities/align.hpp"
+#include "utilities/compilerWarnings.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/ticks.hpp"
+
+//
+// Stat sampler/counter data
+//
+struct ZStatSamplerData {
+  uint64_t _nsamples;
+  uint64_t _sum;
+  uint64_t _max;
+
+  ZStatSamplerData() :
+    _nsamples(0),
+    _sum(0),
+    _max(0) {}
+
+  void add(const ZStatSamplerData& new_sample) {
+    _nsamples += new_sample._nsamples;
+    _sum += new_sample._nsamples;
+    _max = MAX2(_max, new_sample._max);
+  }
+};
+
+struct ZStatCounterData {
+  uint64_t _counter;
+
+  ZStatCounterData() :
+    _counter(0) {}
+};
+
+//
+// Stat sampler history
+//
+template <size_t size>
+class ZStatSamplerHistoryInterval {
+private:
+  size_t           _next;
+  ZStatSamplerData _samples[size];
+  ZStatSamplerData _accumulated;
+  ZStatSamplerData _total;
+
+public:
+  ZStatSamplerHistoryInterval() :
+      _next(0),
+      _samples(),
+      _total(),
+      _accumulated() {}
+
+  bool add(const ZStatSamplerData& new_sample) {
+    // Insert sample
+    const ZStatSamplerData old_sample = _samples[_next];
+    _samples[_next] = new_sample;
+
+    // Adjust accumulated
+    _accumulated._nsamples += new_sample._nsamples;
+    _accumulated._sum += new_sample._sum;
+    _accumulated._max = MAX2(_accumulated._max, new_sample._max);
+
+    // Adjust total
+    _total._nsamples -= old_sample._nsamples;
+    _total._sum -= old_sample._sum;
+    _total._nsamples += new_sample._nsamples;
+    _total._sum += new_sample._sum;
+    if (_total._max < new_sample._max) {
+      // Found new max
+      _total._max = new_sample._max;
+    } else if (_total._max == old_sample._max) {
+      // Removed old max, reset and find new max
+      _total._max = 0;
+      for (size_t i = 0; i < size; i++) {
+        if (_total._max < _samples[i]._max) {
+          _total._max = _samples[i]._max;
+        }
+      }
+    }
+
+    // Adjust next
+    if (++_next == size) {
+      _next = 0;
+
+      // Clear accumulated
+      const ZStatSamplerData zero;
+      _accumulated = zero;
+
+      // Became full
+      return true;
+    }
+
+    // Not yet full
+    return false;
+  }
+
+  const ZStatSamplerData& total() const {
+    return _total;
+  }
+
+  const ZStatSamplerData& accumulated() const {
+    return _accumulated;
+  }
+};
+
+class ZStatSamplerHistory : public CHeapObj<mtGC> {
+private:
+  ZStatSamplerHistoryInterval<10> _10seconds;
+  ZStatSamplerHistoryInterval<60> _10minutes;
+  ZStatSamplerHistoryInterval<60> _10hours;
+  ZStatSamplerData                _total;
+
+  uint64_t avg(uint64_t sum, uint64_t nsamples) const {
+    return (nsamples > 0) ? sum / nsamples : 0;
+  }
+
+public:
+  ZStatSamplerHistory() :
+      _10seconds(),
+      _10minutes(),
+      _10hours(),
+      _total() {}
+
+  void add(const ZStatSamplerData& new_sample) {
+    if (_10seconds.add(new_sample)) {
+      if (_10minutes.add(_10seconds.total())) {
+        if (_10hours.add(_10minutes.total())) {
+          _total.add(_10hours.total());
+        }
+      }
+    }
+  }
+
+  uint64_t avg_10_seconds() const {
+    const uint64_t sum      = _10seconds.total()._sum;
+    const uint64_t nsamples = _10seconds.total()._nsamples;
+    return avg(sum, nsamples);
+  }
+
+  uint64_t avg_10_minutes() const {
+    const uint64_t sum      = _10seconds.accumulated()._sum +
+                              _10minutes.total()._sum;
+    const uint64_t nsamples = _10seconds.accumulated()._nsamples +
+                              _10minutes.total()._nsamples;
+    return avg(sum, nsamples);
+  }
+
+  uint64_t avg_10_hours() const {
+    const uint64_t sum      = _10seconds.accumulated()._sum +
+                              _10minutes.accumulated()._sum +
+                              _10hours.total()._sum;
+    const uint64_t nsamples = _10seconds.accumulated()._nsamples +
+                              _10minutes.accumulated()._nsamples +
+                              _10hours.total()._nsamples;
+    return avg(sum, nsamples);
+  }
+
+  uint64_t avg_total() const {
+    const uint64_t sum      = _10seconds.accumulated()._sum +
+                              _10minutes.accumulated()._sum +
+                              _10hours.accumulated()._sum +
+                              _total._sum;
+    const uint64_t nsamples = _10seconds.accumulated()._nsamples +
+                              _10minutes.accumulated()._nsamples +
+                              _10hours.accumulated()._nsamples +
+                              _total._nsamples;
+    return avg(sum, nsamples);
+  }
+
+  uint64_t max_10_seconds() const {
+    return _10seconds.total()._max;
+  }
+
+  uint64_t max_10_minutes() const {
+    return MAX2(_10seconds.accumulated()._max,
+                _10minutes.total()._max);
+  }
+
+  uint64_t max_10_hours() const {
+    return MAX3(_10seconds.accumulated()._max,
+                _10minutes.accumulated()._max,
+                _10hours.total()._max);
+  }
+
+  uint64_t max_total() const {
+    return MAX4(_10seconds.accumulated()._max,
+                _10minutes.accumulated()._max,
+                _10hours.accumulated()._max,
+                _total._max);
+  }
+};
+
+//
+// Stat unit printers
+//
+void ZStatUnitTime(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) {
+  log.print(" %10s: %-40s  "
+            "%9.3f / %-9.3f "
+            "%9.3f / %-9.3f "
+            "%9.3f / %-9.3f "
+            "%9.3f / %-9.3f   ms",
+            sampler.group(),
+            sampler.name(),
+            TimeHelper::counter_to_millis(history.avg_10_seconds()),
+            TimeHelper::counter_to_millis(history.max_10_seconds()),
+            TimeHelper::counter_to_millis(history.avg_10_minutes()),
+            TimeHelper::counter_to_millis(history.max_10_minutes()),
+            TimeHelper::counter_to_millis(history.avg_10_hours()),
+            TimeHelper::counter_to_millis(history.max_10_hours()),
+            TimeHelper::counter_to_millis(history.avg_total()),
+            TimeHelper::counter_to_millis(history.max_total()));
+}
+
+void ZStatUnitBytes(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) {
+  log.print(" %10s: %-40s  "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) "   MB",
+            sampler.group(),
+            sampler.name(),
+            history.avg_10_seconds() / M,
+            history.max_10_seconds() / M,
+            history.avg_10_minutes() / M,
+            history.max_10_minutes() / M,
+            history.avg_10_hours() / M,
+            history.max_10_hours() / M,
+            history.avg_total() / M,
+            history.max_total() / M);
+}
+
+void ZStatUnitThreads(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) {
+  log.print(" %10s: %-40s  "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) "   threads",
+            sampler.group(),
+            sampler.name(),
+            history.avg_10_seconds(),
+            history.max_10_seconds(),
+            history.avg_10_minutes(),
+            history.max_10_minutes(),
+            history.avg_10_hours(),
+            history.max_10_hours(),
+            history.avg_total(),
+            history.max_total());
+}
+
+void ZStatUnitBytesPerSecond(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) {
+  log.print(" %10s: %-40s  "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) "   MB/s",
+            sampler.group(),
+            sampler.name(),
+            history.avg_10_seconds() / M,
+            history.max_10_seconds() / M,
+            history.avg_10_minutes() / M,
+            history.max_10_minutes() / M,
+            history.avg_10_hours() / M,
+            history.max_10_hours() / M,
+            history.avg_total() / M,
+            history.max_total() / M);
+}
+
+void ZStatUnitOpsPerSecond(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) {
+  log.print(" %10s: %-40s  "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " "
+            UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) "   ops/s",
+            sampler.group(),
+            sampler.name(),
+            history.avg_10_seconds(),
+            history.max_10_seconds(),
+            history.avg_10_minutes(),
+            history.max_10_minutes(),
+            history.avg_10_hours(),
+            history.max_10_hours(),
+            history.avg_total(),
+            history.max_total());
+}
+
+//
+// Stat value
+//
+uintptr_t ZStatValue::_base = 0;
+uint32_t  ZStatValue::_cpu_offset = 0;
+
+ZStatValue::ZStatValue(const char* group,
+                          const char* name,
+                          uint32_t id,
+                          uint32_t size) :
+    _group(group),
+    _name(name),
+    _id(id),
+    _offset(_cpu_offset) {
+  assert(_base == 0, "Already initialized");
+  _cpu_offset += size;
+}
+
+template <typename T>
+T* ZStatValue::get_cpu_local(uint32_t cpu) const {
+  assert(_base != 0, "Not initialized");
+  const uintptr_t cpu_base = _base + (_cpu_offset * cpu);
+  const uintptr_t value_addr = cpu_base + _offset;
+  return (T*)value_addr;
+}
+
+void ZStatValue::initialize() {
+  // Finalize and align CPU offset
+  _cpu_offset = align_up(_cpu_offset, ZCacheLineSize);
+
+  // Allocation aligned memory
+  const size_t size = _cpu_offset * ZCPU::count();
+  _base = ZUtils::alloc_aligned(ZCacheLineSize, size);
+  memset((void*)_base, 0, size);
+}
+
+const char* ZStatValue::group() const {
+  return _group;
+}
+
+const char* ZStatValue::name() const {
+  return _name;
+}
+
+uint32_t ZStatValue::id() const {
+  return _id;
+}
+
+//
+// Stat iterable value
+//
+template <typename T> uint32_t ZStatIterableValue<T>::_count = 0;
+template <typename T> T*       ZStatIterableValue<T>::_first = NULL;
+
+template <typename T>
+ZStatIterableValue<T>::ZStatIterableValue(const char* group,
+                                          const char* name,
+                                          uint32_t size) :
+    ZStatValue(group, name, _count++, size),
+    _next(insert()) {}
+
+template <typename T>
+T* ZStatIterableValue<T>::insert() const {
+  T** current = &_first;
+
+  while (*current != NULL) {
+    // First sort by group, then by name
+    const int group_cmp = strcmp((*current)->group(), group());
+    const int name_cmp = strcmp((*current)->name(), name());
+    if ((group_cmp > 0) || (group_cmp == 0 && name_cmp > 0)) {
+      break;
+    }
+
+    current = &(*current)->_next;
+  }
+
+  T* const next = *current;
+  *current = (T*)this;
+  return next;
+}
+
+//
+// Stat sampler
+//
+ZStatSampler::ZStatSampler(const char* group, const char* name, ZStatUnitPrinter printer) :
+    ZStatIterableValue(group, name, sizeof(ZStatSamplerData)),
+    _printer(printer) {}
+
+ZStatSamplerData* ZStatSampler::get() const {
+  return get_cpu_local<ZStatSamplerData>(ZCPU::id());
+}
+
+ZStatSamplerData ZStatSampler::collect_and_reset() const {
+  ZStatSamplerData all;
+
+  const uint32_t ncpus = ZCPU::count();
+  for (uint32_t i = 0; i < ncpus; i++) {
+    ZStatSamplerData* const cpu_data = get_cpu_local<ZStatSamplerData>(i);
+    if (cpu_data->_nsamples > 0) {
+      const uint64_t nsamples = Atomic::xchg((uint64_t)0, &cpu_data->_nsamples);
+      const uint64_t sum = Atomic::xchg((uint64_t)0, &cpu_data->_sum);
+      const uint64_t max = Atomic::xchg((uint64_t)0, &cpu_data->_max);
+      all._nsamples += nsamples;
+      all._sum += sum;
+      if (all._max < max) {
+        all._max = max;
+      }
+    }
+  }
+
+  return all;
+}
+
+ZStatUnitPrinter ZStatSampler::printer() const {
+  return _printer;
+}
+
+//
+// Stat counter
+//
+ZStatCounter::ZStatCounter(const char* group, const char* name, ZStatUnitPrinter printer) :
+    ZStatIterableValue(group, name, sizeof(ZStatCounterData)),
+    _sampler(group, name, printer) {}
+
+ZStatCounterData* ZStatCounter::get() const {
+  return get_cpu_local<ZStatCounterData>(ZCPU::id());
+}
+
+void ZStatCounter::sample_and_reset() const {
+  uint64_t counter = 0;
+
+  const uint32_t ncpus = ZCPU::count();
+  for (uint32_t i = 0; i < ncpus; i++) {
+    ZStatCounterData* const cpu_data = get_cpu_local<ZStatCounterData>(i);
+    counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter);
+  }
+
+  ZStatSample(_sampler, counter);
+}
+
+//
+// Stat unsampled counter
+//
+ZStatUnsampledCounter::ZStatUnsampledCounter(const char* name) :
+    ZStatIterableValue("Unsampled", name, sizeof(ZStatCounterData)) {}
+
+ZStatCounterData* ZStatUnsampledCounter::get() const {
+  return get_cpu_local<ZStatCounterData>(ZCPU::id());
+}
+
+ZStatCounterData ZStatUnsampledCounter::collect_and_reset() const {
+  ZStatCounterData all;
+
+  const uint32_t ncpus = ZCPU::count();
+  for (uint32_t i = 0; i < ncpus; i++) {
+    ZStatCounterData* const cpu_data = get_cpu_local<ZStatCounterData>(i);
+    all._counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter);
+  }
+
+  return all;
+}
+
+//
+// Stat MMU (Mimimum Mutator Utilization)
+//
+ZStatMMUPause::ZStatMMUPause() :
+    _start(0.0),
+    _end(0.0) {}
+
+ZStatMMUPause::ZStatMMUPause(const Ticks& start, const Ticks& end) :
+    _start(TimeHelper::counter_to_millis(start.value())),
+    _end(TimeHelper::counter_to_millis(end.value())) {}
+
+double ZStatMMUPause::end() const {
+  return _end;
+}
+
+double ZStatMMUPause::overlap(double start, double end) const {
+  const double start_max = MAX2(start, _start);
+  const double end_min = MIN2(end, _end);
+
+  if (end_min > start_max) {
+    // Overlap found
+    return end_min - start_max;
+  }
+
+  // No overlap
+  return 0.0;
+}
+
+size_t ZStatMMU::_next = 0;
+size_t ZStatMMU::_npauses = 0;
+ZStatMMUPause ZStatMMU::_pauses[200];
+double ZStatMMU::_mmu_2ms = 100.0;
+double ZStatMMU::_mmu_5ms = 100.0;
+double ZStatMMU::_mmu_10ms = 100.0;
+double ZStatMMU::_mmu_20ms = 100.0;
+double ZStatMMU::_mmu_50ms = 100.0;
+double ZStatMMU::_mmu_100ms = 100.0;
+
+const ZStatMMUPause& ZStatMMU::pause(size_t index) {
+  return _pauses[(_next - index - 1) % ARRAY_SIZE(_pauses)];
+}
+
+double ZStatMMU::calculate_mmu(double time_slice) {
+  const double end = pause(0).end();
+  const double start = end - time_slice;
+  double time_paused = 0.0;
+
+  // Find all overlapping pauses
+  for (size_t i = 0; i < _npauses; i++) {
+    const double overlap = pause(i).overlap(start, end);
+    if (overlap == 0.0) {
+      // No overlap
+      break;
+    }
+
+    time_paused += overlap;
+  }
+
+  // Calculate MMU
+  const double time_mutator = time_slice - time_paused;
+  return percent_of(time_mutator, time_slice);
+}
+
+void ZStatMMU::register_pause(const Ticks& start, const Ticks& end) {
+  // Add pause
+  const size_t index = _next++ % ARRAY_SIZE(_pauses);
+  _pauses[index] = ZStatMMUPause(start, end);
+  _npauses = MIN2(_npauses + 1, ARRAY_SIZE(_pauses));
+
+  // Recalculate MMUs
+  _mmu_2ms    = MIN2(_mmu_2ms,   calculate_mmu(2));
+  _mmu_5ms    = MIN2(_mmu_5ms,   calculate_mmu(5));
+  _mmu_10ms   = MIN2(_mmu_10ms,  calculate_mmu(10));
+  _mmu_20ms   = MIN2(_mmu_20ms,  calculate_mmu(20));
+  _mmu_50ms   = MIN2(_mmu_50ms,  calculate_mmu(50));
+  _mmu_100ms  = MIN2(_mmu_100ms, calculate_mmu(100));
+}
+
+void ZStatMMU::print() {
+  log_info(gc, mmu)(
+     "MMU: 2ms/%.1f%%, 5ms/%.1f%%, 10ms/%.1f%%, 20ms/%.1f%%, 50ms/%.1f%%, 100ms/%.1f%%",
+     _mmu_2ms, _mmu_5ms, _mmu_10ms, _mmu_20ms, _mmu_50ms, _mmu_100ms);
+}
+
+//
+// Stat phases
+//
+ConcurrentGCTimer ZStatPhase::_timer;
+
+ZStatPhase::ZStatPhase(const char* group, const char* name) :
+    _sampler(group, name, ZStatUnitTime) {}
+
+void ZStatPhase::log_start(LogTargetHandle log, bool thread) const {
+  if (!log.is_enabled()) {
+    return;
+  }
+
+  if (thread) {
+    ResourceMark rm;
+    log.print("%s (%s)", name(), Thread::current()->name());
+  } else {
+    log.print("%s", name());
+  }
+}
+
+void ZStatPhase::log_end(LogTargetHandle log, const Tickspan& duration, bool thread) const {
+  if (!log.is_enabled()) {
+    return;
+  }
+
+  if (thread) {
+    ResourceMark rm;
+    log.print("%s (%s) %.3fms", name(), Thread::current()->name(), TimeHelper::counter_to_millis(duration.value()));
+  } else {
+    log.print("%s %.3fms", name(), TimeHelper::counter_to_millis(duration.value()));
+  }
+}
+
+ConcurrentGCTimer* ZStatPhase::timer() {
+  return &_timer;
+}
+
+const char* ZStatPhase::name() const {
+  return _sampler.name();
+}
+
+ZStatPhaseCycle::ZStatPhaseCycle(const char* name) :
+    ZStatPhase("Collector", name) {}
+
+void ZStatPhaseCycle::register_start(const Ticks& start) const {
+  timer()->register_gc_start(start);
+
+  ZTracer::tracer()->report_gc_start(ZCollectedHeap::heap()->gc_cause(), start);
+
+  ZCollectedHeap::heap()->print_heap_before_gc();
+  ZCollectedHeap::heap()->trace_heap_before_gc(ZTracer::tracer());
+
+  log_info(gc, start)("Garbage Collection (%s)",
+                       GCCause::to_string(ZCollectedHeap::heap()->gc_cause()));
+}
+
+#define ZUSED_FMT                       SIZE_FORMAT "M(%.0lf%%)"
+#define ZUSED_ARGS(size, max_capacity)  ((size) / M), (percent_of<size_t>(size, max_capacity))
+
+void ZStatPhaseCycle::register_end(const Ticks& start, const Ticks& end) const {
+  timer()->register_gc_end(end);
+
+  ZCollectedHeap::heap()->print_heap_after_gc();
+  ZCollectedHeap::heap()->trace_heap_after_gc(ZTracer::tracer());
+
+  ZTracer::tracer()->report_gc_end(end, timer()->time_partitions());
+
+  const Tickspan duration = end - start;
+  ZStatSample(_sampler, duration.value());
+
+  ZStatLoad::print();
+  ZStatMMU::print();
+  ZStatMark::print();
+  ZStatRelocation::print();
+  ZStatNMethods::print();
+  ZStatReferences::print();
+  ZStatHeap::print();
+
+  log_info(gc)("Garbage Collection (%s) " ZUSED_FMT "->" ZUSED_FMT,
+               GCCause::to_string(ZCollectedHeap::heap()->gc_cause()),
+               ZUSED_ARGS(ZStatHeap::used_at_mark_start(), ZStatHeap::max_capacity()),
+               ZUSED_ARGS(ZStatHeap::used_at_relocate_end(), ZStatHeap::max_capacity()));
+}
+
+Tickspan ZStatPhasePause::_max;
+
+ZStatPhasePause::ZStatPhasePause(const char* name) :
+    ZStatPhase("Phase", name) {}
+
+const Tickspan& ZStatPhasePause::max() {
+  return _max;
+}
+
+void ZStatPhasePause::register_start(const Ticks& start) const {
+  timer()->register_gc_pause_start(name(), start);
+
+  LogTarget(Debug, gc, phases, start) log;
+  log_start(log);
+}
+
+void ZStatPhasePause::register_end(const Ticks& start, const Ticks& end) const {
+  timer()->register_gc_pause_end(end);
+
+  const Tickspan duration = end - start;
+  ZStatSample(_sampler, duration.value());
+
+  // Track max pause time
+  if (_max < duration) {
+    _max = duration;
+  }
+
+  // Track minimum mutator utilization
+  ZStatMMU::register_pause(start, end);
+
+  LogTarget(Info, gc, phases) log;
+  log_end(log, duration);
+}
+
+ZStatPhaseConcurrent::ZStatPhaseConcurrent(const char* name) :
+    ZStatPhase("Phase", name) {}
+
+void ZStatPhaseConcurrent::register_start(const Ticks& start) const {
+  timer()->register_gc_concurrent_start(name(), start);
+
+  LogTarget(Debug, gc, phases, start) log;
+  log_start(log);
+}
+
+void ZStatPhaseConcurrent::register_end(const Ticks& start, const Ticks& end) const {
+  timer()->register_gc_concurrent_end(end);
+
+  const Tickspan duration = end - start;
+  ZStatSample(_sampler, duration.value());
+
+  LogTarget(Info, gc, phases) log;
+  log_end(log, duration);
+}
+
+ZStatSubPhase::ZStatSubPhase(const char* name) :
+    ZStatPhase("Subphase", name) {}
+
+void ZStatSubPhase::register_start(const Ticks& start) const {
+  LogTarget(Debug, gc, phases, start) log;
+  log_start(log, true /* thread */);
+}
+
+void ZStatSubPhase::register_end(const Ticks& start, const Ticks& end) const {
+  ZTracer::tracer()->report_thread_phase(*this, start, end);
+
+  const Tickspan duration = end - start;
+  ZStatSample(_sampler, duration.value());
+
+  LogTarget(Debug, gc, phases) log;
+  log_end(log, duration, true /* thread */);
+}
+
+ZStatCriticalPhase::ZStatCriticalPhase(const char* name, bool verbose) :
+    ZStatPhase("Critical", name),
+    _counter("Critical", name, ZStatUnitOpsPerSecond),
+    _verbose(verbose) {}
+
+void ZStatCriticalPhase::register_start(const Ticks& start) const {
+  LogTarget(Debug, gc, start) log;
+  log_start(log, true /* thread */);
+}
+
+void ZStatCriticalPhase::register_end(const Ticks& start, const Ticks& end) const {
+  ZTracer::tracer()->report_thread_phase(*this, start, end);
+
+  const Tickspan duration = end - start;
+  ZStatSample(_sampler, duration.value());
+  ZStatInc(_counter);
+
+  if (_verbose) {
+    LogTarget(Info, gc) log;
+    log_end(log, duration, true /* thread */);
+  } else {
+    LogTarget(Debug, gc) log;
+    log_end(log, duration, true /* thread */);
+  }
+}
+
+//
+// Stat sample/inc
+//
+void ZStatSample(const ZStatSampler& sampler, uint64_t value, bool trace) {
+  ZStatSamplerData* const cpu_data = sampler.get();
+  Atomic::add(1u, &cpu_data->_nsamples);
+  Atomic::add(value, &cpu_data->_sum);
+
+  uint64_t max = cpu_data->_max;
+  for (;;) {
+    if (max >= value) {
+      // Not max
+      break;
+    }
+
+    const uint64_t new_max = value;
+    const uint64_t prev_max = Atomic::cmpxchg(new_max, &cpu_data->_max, max);
+    if (prev_max == max) {
+      // Success
+      break;
+    }
+
+    // Retry
+    max = prev_max;
+  }
+
+  if (trace) {
+    ZTracer::tracer()->report_stat_sampler(sampler, value);
+  }
+}
+
+void ZStatInc(const ZStatCounter& counter, uint64_t increment, bool trace) {
+  ZStatCounterData* const cpu_data = counter.get();
+  const uint64_t value = Atomic::add(increment, &cpu_data->_counter);
+
+  if (trace) {
+    ZTracer::tracer()->report_stat_counter(counter, increment, value);
+  }
+}
+
+void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) {
+  ZStatCounterData* const cpu_data = counter.get();
+  Atomic::add(increment, &cpu_data->_counter);
+}
+
+//
+// Stat allocation rate
+//
+const ZStatUnsampledCounter ZStatAllocRate::_counter("Allocation Rate");
+TruncatedSeq                ZStatAllocRate::_rate(ZStatAllocRate::sample_window_sec * ZStatAllocRate::sample_hz);
+TruncatedSeq                ZStatAllocRate::_rate_avg(ZStatAllocRate::sample_window_sec * ZStatAllocRate::sample_hz);
+
+const ZStatUnsampledCounter& ZStatAllocRate::counter() {
+  return _counter;
+}
+
+uint64_t ZStatAllocRate::sample_and_reset() {
+  const ZStatCounterData bytes_per_sample = _counter.collect_and_reset();
+  const uint64_t bytes_per_second = bytes_per_sample._counter * sample_hz;
+
+  _rate.add(bytes_per_second);
+  _rate_avg.add(_rate.avg());
+
+  return bytes_per_second;
+}
+
+double ZStatAllocRate::avg() {
+  return _rate.avg();
+}
+
+double ZStatAllocRate::avg_sd() {
+  return _rate_avg.sd();
+}
+
+//
+// Stat thread
+//
+ZStat::ZStat() :
+    _metronome(sample_hz) {
+  set_name("ZStat");
+  create_and_start();
+}
+
+void ZStat::sample_and_collect(ZStatSamplerHistory* history) const {
+  // Sample counters
+  for (const ZStatCounter* counter = ZStatCounter::first(); counter != NULL; counter = counter->next()) {
+    counter->sample_and_reset();
+  }
+
+  // Collect samples
+  for (const ZStatSampler* sampler = ZStatSampler::first(); sampler != NULL; sampler = sampler->next()) {
+    ZStatSamplerHistory& sampler_history = history[sampler->id()];
+    sampler_history.add(sampler->collect_and_reset());
+  }
+}
+
+bool ZStat::should_print(LogTargetHandle log) const {
+  return log.is_enabled() && (_metronome.nticks() % ZStatisticsInterval == 0);
+}
+
+void ZStat::print(LogTargetHandle log, const ZStatSamplerHistory* history) const {
+  // Print
+  log.print("=== Garbage Collection Statistics =======================================================================================================================");
+  log.print("                                                             Last 10s              Last 10m              Last 10h                Total");
+  log.print("                                                             Avg / Max             Avg / Max             Avg / Max             Avg / Max");
+
+  for (const ZStatSampler* sampler = ZStatSampler::first(); sampler != NULL; sampler = sampler->next()) {
+    const ZStatSamplerHistory& sampler_history = history[sampler->id()];
+    const ZStatUnitPrinter printer = sampler->printer();
+    printer(log, *sampler, sampler_history);
+  }
+
+  log.print("=========================================================================================================================================================");
+}
+
+void ZStat::run_service() {
+  ZStatSamplerHistory* const history = new ZStatSamplerHistory[ZStatSampler::count()];
+  LogTarget(Info, gc, stats) log;
+
+  // Main loop
+  while (_metronome.wait_for_tick()) {
+    sample_and_collect(history);
+    if (should_print(log)) {
+      print(log, history);
+    }
+  }
+
+  delete [] history;
+}
+
+void ZStat::stop_service() {
+  _metronome.stop();
+}
+
+//
+// Stat table
+//
+class ZStatTablePrinter {
+private:
+  static const size_t _buffer_size = 256;
+
+  const size_t _column0_width;
+  const size_t _columnN_width;
+  char         _buffer[_buffer_size];
+
+public:
+  class ZColumn {
+  private:
+    char* const  _buffer;
+    const size_t _position;
+    const size_t _width;
+    const size_t _width_next;
+
+    ZColumn next() const {
+      // Insert space between columns
+      _buffer[_position + _width] = ' ';
+      return ZColumn(_buffer, _position + _width + 1, _width_next, _width_next);
+    }
+
+    size_t print(size_t position, const char* fmt, va_list va) {
+      const int res = jio_vsnprintf(_buffer + position, _buffer_size - position, fmt, va);
+      if (res < 0) {
+        return 0;
+      }
+
+      return (size_t)res;
+    }
+
+  public:
+    ZColumn(char* buffer, size_t position, size_t width, size_t width_next) :
+        _buffer(buffer),
+        _position(position),
+        _width(width),
+        _width_next(width_next) {}
+
+    ZColumn left(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) {
+      va_list va;
+
+      va_start(va, fmt);
+      const size_t written = print(_position, fmt, va);
+      va_end(va);
+
+      if (written < _width) {
+        // Fill empty space
+        memset(_buffer + _position + written, ' ', _width - written);
+      }
+
+      return next();
+    }
+
+    ZColumn right(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) {
+      va_list va;
+
+      va_start(va, fmt);
+      const size_t written = print(_position, fmt, va);
+      va_end(va);
+
+      if (written > _width) {
+        // Line too long
+        return fill('?');
+      }
+
+      if (written < _width) {
+        // Short line, move all to right
+        memmove(_buffer + _position + _width - written, _buffer + _position, written);
+
+        // Fill empty space
+        memset(_buffer + _position, ' ', _width - written);
+      }
+
+      return next();
+    }
+
+    ZColumn center(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) {
+      va_list va;
+
+      va_start(va, fmt);
+      const size_t written = print(_position, fmt, va);
+      va_end(va);
+
+      if (written > _width) {
+        // Line too long
+        return fill('?');
+      }
+
+      if (written < _width) {
+        // Short line, move all to center
+        const size_t start_space = (_width - written) / 2;
+        const size_t end_space = _width - written - start_space;
+        memmove(_buffer + _position + start_space, _buffer + _position, written);
+
+        // Fill empty spaces
+        memset(_buffer + _position, ' ', start_space);
+        memset(_buffer + _position + start_space + written, ' ', end_space);
+      }
+
+      return next();
+    }
+
+    ZColumn fill(char filler = ' ') {
+      memset(_buffer + _position, filler, _width);
+      return next();
+    }
+
+    const char* end() {
+      _buffer[_position] = '\0';
+      return _buffer;
+    }
+  };
+
+public:
+  ZStatTablePrinter(size_t column0_width, size_t columnN_width) :
+      _column0_width(column0_width),
+      _columnN_width(columnN_width) {}
+
+  ZColumn operator()() {
+    return ZColumn(_buffer, 0, _column0_width, _columnN_width);
+  }
+};
+
+//
+// Stat cycle
+//
+uint64_t  ZStatCycle::_ncycles = 0;
+Ticks     ZStatCycle::_start_of_last;
+Ticks     ZStatCycle::_end_of_last;
+NumberSeq ZStatCycle::_normalized_duration(0.3 /* alpha */);
+
+void ZStatCycle::at_start() {
+  _start_of_last = Ticks::now();
+}
+
+void ZStatCycle::at_end(double boost_factor) {
+  _end_of_last = Ticks::now();
+  _ncycles++;
+
+  // Calculate normalized cycle duration. The measured duration is
+  // normalized using the boost factor to avoid artificial deflation
+  // of the duration when boost mode is enabled.
+  const double duration = (_end_of_last - _start_of_last).seconds();
+  const double normalized_duration = duration * boost_factor;
+  _normalized_duration.add(normalized_duration);
+}
+
+uint64_t ZStatCycle::ncycles() {
+  return _ncycles;
+}
+
+const AbsSeq& ZStatCycle::normalized_duration() {
+  return _normalized_duration;
+}
+
+double ZStatCycle::time_since_last() {
+  if (_ncycles == 0) {
+    // Return time since VM start-up
+    return os::elapsedTime();
+  }
+
+  const Ticks now = Ticks::now();
+  const Tickspan time_since_last = now - _end_of_last;
+  return time_since_last.seconds();
+}
+
+//
+// Stat load
+//
+void ZStatLoad::print() {
+  double loadavg[3] = {};
+  os::loadavg(loadavg, ARRAY_SIZE(loadavg));
+  log_info(gc, load)("Load: %.2f/%.2f/%.2f", loadavg[0], loadavg[1], loadavg[2]);
+}
+
+//
+// Stat mark
+//
+size_t ZStatMark::_nstripes;
+size_t ZStatMark::_nproactiveflush;
+size_t ZStatMark::_nterminateflush;
+size_t ZStatMark::_ntrycomplete;
+size_t ZStatMark::_ncontinue;
+
+void ZStatMark::set_at_mark_start(size_t nstripes) {
+  _nstripes = nstripes;
+}
+
+void ZStatMark::set_at_mark_end(size_t nproactiveflush,
+                                size_t nterminateflush,
+                                size_t ntrycomplete,
+                                size_t ncontinue) {
+  _nproactiveflush = nproactiveflush;
+  _nterminateflush = nterminateflush;
+  _ntrycomplete = ntrycomplete;
+  _ncontinue = ncontinue;
+}
+
+void ZStatMark::print() {
+  log_info(gc, marking)("Mark: "
+                        SIZE_FORMAT " stripe(s), "
+                        SIZE_FORMAT " proactive flush(es), "
+                        SIZE_FORMAT " terminate flush(es), "
+                        SIZE_FORMAT " completion(s), "
+                        SIZE_FORMAT " continuation(s) ",
+                        _nstripes,
+                        _nproactiveflush,
+                        _nterminateflush,
+                        _ntrycomplete,
+                        _ncontinue);
+}
+
+//
+// Stat relocation
+//
+size_t ZStatRelocation::_relocating;
+bool ZStatRelocation::_success;
+
+void ZStatRelocation::set_at_select_relocation_set(size_t relocating) {
+  _relocating = relocating;
+}
+
+void ZStatRelocation::set_at_relocate_end(bool success) {
+  _success = success;
+}
+
+void ZStatRelocation::print() {
+  if (_success) {
+    log_info(gc, reloc)("Relocation: Successful, " SIZE_FORMAT "M relocated", _relocating / M);
+  } else {
+    log_info(gc, reloc)("Relocation: Incomplete");
+  }
+}
+
+//
+// Stat nmethods
+//
+void ZStatNMethods::print() {
+  log_info(gc, nmethod)("NMethods: " SIZE_FORMAT " registered, " SIZE_FORMAT " unregistered",
+                        ZNMethodTable::registered_nmethods(),
+                        ZNMethodTable::unregistered_nmethods());
+}
+
+//
+// Stat references
+//
+ZStatReferences::ZCount ZStatReferences::_soft;
+ZStatReferences::ZCount ZStatReferences::_weak;
+ZStatReferences::ZCount ZStatReferences::_final;
+ZStatReferences::ZCount ZStatReferences::_phantom;
+
+void ZStatReferences::set(ZCount* count, size_t encountered, size_t discovered, size_t enqueued) {
+  count->encountered = encountered;
+  count->discovered = discovered;
+  count->enqueued = enqueued;
+}
+
+void ZStatReferences::set_soft(size_t encountered, size_t discovered, size_t enqueued) {
+  set(&_soft, encountered, discovered, enqueued);
+}
+
+void ZStatReferences::set_weak(size_t encountered, size_t discovered, size_t enqueued) {
+  set(&_weak, encountered, discovered, enqueued);
+}
+
+void ZStatReferences::set_final(size_t encountered, size_t discovered, size_t enqueued) {
+  set(&_final, encountered, discovered, enqueued);
+}
+
+void ZStatReferences::set_phantom(size_t encountered, size_t discovered, size_t enqueued) {
+  set(&_phantom, encountered, discovered, enqueued);
+}
+
+void ZStatReferences::print(const char* name, const ZStatReferences::ZCount& ref) {
+  log_info(gc, ref)("%s: "
+                    SIZE_FORMAT " encountered, "
+                    SIZE_FORMAT " discovered, "
+                    SIZE_FORMAT " enqueued",
+                    name,
+                    ref.encountered,
+                    ref.discovered,
+                    ref.enqueued);
+}
+
+void ZStatReferences::print() {
+  print("Soft", _soft);
+  print("Weak", _weak);
+  print("Final", _final);
+  print("Phantom", _phantom);
+}
+
+//
+// Stat heap
+//
+ZStatHeap::ZAtInitialize ZStatHeap::_at_initialize;
+ZStatHeap::ZAtMarkStart ZStatHeap::_at_mark_start;
+ZStatHeap::ZAtMarkEnd ZStatHeap::_at_mark_end;
+ZStatHeap::ZAtRelocateStart ZStatHeap::_at_relocate_start;
+ZStatHeap::ZAtRelocateEnd ZStatHeap::_at_relocate_end;
+
+#define ZSIZE_NA               "%9s", "-"
+#define ZSIZE_ARGS(size)       SIZE_FORMAT_W(8) "M (%.0lf%%)", \
+                               ((size) / M), (percent_of<size_t>(size, _at_initialize.max_capacity))
+
+size_t ZStatHeap::available(size_t used) {
+  return _at_initialize.max_capacity - used;
+}
+
+size_t ZStatHeap::reserve(size_t used) {
+  return MIN2(_at_initialize.max_reserve, available(used));
+}
+
+size_t ZStatHeap::free(size_t used) {
+  return available(used) - reserve(used);
+}
+
+void ZStatHeap::set_at_initialize(size_t max_capacity,
+                                  size_t max_reserve) {
+  _at_initialize.max_capacity = max_capacity;
+  _at_initialize.max_reserve = max_reserve;
+}
+
+void ZStatHeap::set_at_mark_start(size_t capacity,
+                                  size_t used) {
+  _at_mark_start.capacity = capacity;
+  _at_mark_start.reserve = reserve(used);
+  _at_mark_start.used = used;
+  _at_mark_start.free = free(used);
+}
+
+void ZStatHeap::set_at_mark_end(size_t capacity,
+                                size_t allocated,
+                                size_t used) {
+  _at_mark_end.capacity = capacity;
+  _at_mark_end.reserve = reserve(used);
+  _at_mark_end.allocated = allocated;
+  _at_mark_end.used = used;
+  _at_mark_end.free = free(used);
+}
+
+void ZStatHeap::set_at_select_relocation_set(size_t live,
+                                             size_t garbage,
+                                             size_t reclaimed) {
+  _at_mark_end.live = live;
+  _at_mark_end.garbage = garbage;
+
+  _at_relocate_start.garbage = garbage - reclaimed;
+  _at_relocate_start.reclaimed = reclaimed;
+}
+
+void ZStatHeap::set_at_relocate_start(size_t capacity,
+                                      size_t allocated,
+                                      size_t used) {
+  _at_relocate_start.capacity = capacity;
+  _at_relocate_start.reserve = reserve(used);
+  _at_relocate_start.allocated = allocated;
+  _at_relocate_start.used = used;
+  _at_relocate_start.free = free(used);
+}
+
+void ZStatHeap::set_at_relocate_end(size_t capacity,
+                                    size_t allocated,
+                                    size_t reclaimed,
+                                    size_t used,
+                                    size_t used_high,
+                                    size_t used_low) {
+  _at_relocate_end.capacity = capacity;
+  _at_relocate_end.capacity_high = capacity;
+  _at_relocate_end.capacity_low = _at_mark_start.capacity;
+  _at_relocate_end.reserve = reserve(used);
+  _at_relocate_end.reserve_high = reserve(used_low);
+  _at_relocate_end.reserve_low = reserve(used_high);
+  _at_relocate_end.garbage = _at_mark_end.garbage - reclaimed;
+  _at_relocate_end.allocated = allocated;
+  _at_relocate_end.reclaimed = reclaimed;
+  _at_relocate_end.used = used;
+  _at_relocate_end.used_high = used_high;
+  _at_relocate_end.used_low = used_low;
+  _at_relocate_end.free = free(used);
+  _at_relocate_end.free_high = free(used_low);
+  _at_relocate_end.free_low = free(used_high);
+}
+
+size_t ZStatHeap::max_capacity() {
+  return _at_initialize.max_capacity;
+}
+
+size_t ZStatHeap::used_at_mark_start() {
+  return _at_mark_start.used;
+}
+
+size_t ZStatHeap::used_at_relocate_end() {
+  return _at_relocate_end.used;
+}
+
+void ZStatHeap::print() {
+  ZStatTablePrinter table(10, 18);
+  log_info(gc, heap)("%s", table()
+                     .fill()
+                     .center("Mark Start")
+                     .center("Mark End")
+                     .center("Relocate Start")
+                     .center("Relocate End")
+                     .center("High")
+                     .center("Low")
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Capacity:")
+                     .left(ZSIZE_ARGS(_at_mark_start.capacity))
+                     .left(ZSIZE_ARGS(_at_mark_end.capacity))
+                     .left(ZSIZE_ARGS(_at_relocate_start.capacity))
+                     .left(ZSIZE_ARGS(_at_relocate_end.capacity))
+                     .left(ZSIZE_ARGS(_at_relocate_end.capacity_high))
+                     .left(ZSIZE_ARGS(_at_relocate_end.capacity_low))
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Reserve:")
+                     .left(ZSIZE_ARGS(_at_mark_start.reserve))
+                     .left(ZSIZE_ARGS(_at_mark_end.reserve))
+                     .left(ZSIZE_ARGS(_at_relocate_start.reserve))
+                     .left(ZSIZE_ARGS(_at_relocate_end.reserve))
+                     .left(ZSIZE_ARGS(_at_relocate_end.reserve_high))
+                     .left(ZSIZE_ARGS(_at_relocate_end.reserve_low))
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Free:")
+                     .left(ZSIZE_ARGS(_at_mark_start.free))
+                     .left(ZSIZE_ARGS(_at_mark_end.free))
+                     .left(ZSIZE_ARGS(_at_relocate_start.free))
+                     .left(ZSIZE_ARGS(_at_relocate_end.free))
+                     .left(ZSIZE_ARGS(_at_relocate_end.free_high))
+                     .left(ZSIZE_ARGS(_at_relocate_end.free_low))
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Used:")
+                     .left(ZSIZE_ARGS(_at_mark_start.used))
+                     .left(ZSIZE_ARGS(_at_mark_end.used))
+                     .left(ZSIZE_ARGS(_at_relocate_start.used))
+                     .left(ZSIZE_ARGS(_at_relocate_end.used))
+                     .left(ZSIZE_ARGS(_at_relocate_end.used_high))
+                     .left(ZSIZE_ARGS(_at_relocate_end.used_low))
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Live:")
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_ARGS(_at_mark_end.live))
+                     .left(ZSIZE_ARGS(_at_mark_end.live /* Same as at mark end */))
+                     .left(ZSIZE_ARGS(_at_mark_end.live /* Same as at mark end */))
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_NA)
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Allocated:")
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_ARGS(_at_mark_end.allocated))
+                     .left(ZSIZE_ARGS(_at_relocate_start.allocated))
+                     .left(ZSIZE_ARGS(_at_relocate_end.allocated))
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_NA)
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Garbage:")
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_ARGS(_at_mark_end.garbage))
+                     .left(ZSIZE_ARGS(_at_relocate_start.garbage))
+                     .left(ZSIZE_ARGS(_at_relocate_end.garbage))
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_NA)
+                     .end());
+  log_info(gc, heap)("%s", table()
+                     .right("Reclaimed:")
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_ARGS(_at_relocate_start.reclaimed))
+                     .left(ZSIZE_ARGS(_at_relocate_end.reclaimed))
+                     .left(ZSIZE_NA)
+                     .left(ZSIZE_NA)
+                     .end());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zStat.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZSTAT_HPP
+#define SHARE_GC_Z_ZSTAT_HPP
+
+#include "gc/shared/concurrentGCThread.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/z/zMetronome.hpp"
+#include "logging/logHandle.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/numberSeq.hpp"
+#include "utilities/ticks.hpp"
+
+class ZPage;
+class ZStatSampler;
+class ZStatSamplerHistory;
+struct ZStatCounterData;
+struct ZStatSamplerData;
+
+//
+// Stat unit printers
+//
+typedef void (*ZStatUnitPrinter)(LogTargetHandle log, const ZStatSampler&, const ZStatSamplerHistory&);
+
+void ZStatUnitTime(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history);
+void ZStatUnitBytes(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history);
+void ZStatUnitThreads(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history);
+void ZStatUnitBytesPerSecond(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history);
+void ZStatUnitOpsPerSecond(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history);
+
+//
+// Stat value
+//
+class ZStatValue {
+private:
+  static uintptr_t _base;
+  static uint32_t  _cpu_offset;
+
+  const char* const _group;
+  const char* const _name;
+  const uint32_t    _id;
+  const uint32_t    _offset;
+
+protected:
+  ZStatValue(const char* group,
+             const char* name,
+             uint32_t id,
+             uint32_t size);
+
+  template <typename T> T* get_cpu_local(uint32_t cpu) const;
+
+public:
+  static void initialize();
+
+  const char* group() const;
+  const char* name() const;
+  uint32_t id() const;
+};
+
+//
+// Stat iterable value
+//
+template <typename T>
+class ZStatIterableValue : public ZStatValue {
+private:
+  static uint32_t _count;
+  static T*       _first;
+
+  T* _next;
+
+  T* insert() const;
+
+protected:
+  ZStatIterableValue(const char* group,
+                     const char* name,
+                     uint32_t size);
+
+public:
+  static uint32_t count() {
+    return _count;
+  }
+
+  static T* first() {
+    return _first;
+  }
+
+  T* next() const {
+    return _next;
+  }
+};
+
+//
+// Stat sampler
+//
+class ZStatSampler : public ZStatIterableValue<ZStatSampler> {
+private:
+  const ZStatUnitPrinter _printer;
+
+public:
+  ZStatSampler(const char* group,
+               const char* name,
+               ZStatUnitPrinter printer);
+
+  ZStatSamplerData* get() const;
+  ZStatSamplerData collect_and_reset() const;
+
+  ZStatUnitPrinter printer() const;
+};
+
+//
+// Stat counter
+//
+class ZStatCounter : public ZStatIterableValue<ZStatCounter> {
+private:
+  const ZStatSampler _sampler;
+
+public:
+  ZStatCounter(const char* group,
+               const char* name,
+               ZStatUnitPrinter printer);
+
+  ZStatCounterData* get() const;
+  void sample_and_reset() const;
+};
+
+//
+// Stat unsampled counter
+//
+class ZStatUnsampledCounter : public ZStatIterableValue<ZStatUnsampledCounter> {
+public:
+  ZStatUnsampledCounter(const char* name);
+
+  ZStatCounterData* get() const;
+  ZStatCounterData collect_and_reset() const;
+};
+
+//
+// Stat MMU (Mimimum Mutator Utilization)
+//
+class ZStatMMUPause {
+private:
+  double _start;
+  double _end;
+
+public:
+  ZStatMMUPause();
+  ZStatMMUPause(const Ticks& start, const Ticks& end);
+
+  double end() const;
+  double overlap(double start, double end) const;
+};
+
+class ZStatMMU {
+private:
+  static size_t        _next;
+  static size_t        _npauses;
+  static ZStatMMUPause _pauses[200]; // Record the last 200 pauses
+
+  static double _mmu_2ms;
+  static double _mmu_5ms;
+  static double _mmu_10ms;
+  static double _mmu_20ms;
+  static double _mmu_50ms;
+  static double _mmu_100ms;
+
+  static const ZStatMMUPause& pause(size_t index);
+  static double calculate_mmu(double time_slice);
+
+public:
+  static void register_pause(const Ticks& start, const Ticks& end);
+
+  static void print();
+};
+
+//
+// Stat phases
+//
+class ZStatPhase {
+private:
+  static ConcurrentGCTimer _timer;
+
+protected:
+  const ZStatSampler _sampler;
+
+  ZStatPhase(const char* group, const char* name);
+
+  void log_start(LogTargetHandle log, bool thread = false) const;
+  void log_end(LogTargetHandle log, const Tickspan& duration, bool thread = false) const;
+
+public:
+  static ConcurrentGCTimer* timer();
+
+  const char* name() const;
+
+  virtual void register_start(const Ticks& start) const = 0;
+  virtual void register_end(const Ticks& start, const Ticks& end) const = 0;
+};
+
+class ZStatPhaseCycle : public ZStatPhase {
+public:
+  ZStatPhaseCycle(const char* name);
+
+  virtual void register_start(const Ticks& start) const;
+  virtual void register_end(const Ticks& start, const Ticks& end) const;
+};
+
+class ZStatPhasePause : public ZStatPhase {
+private:
+  static Tickspan _max; // Max pause time
+
+public:
+  ZStatPhasePause(const char* name);
+
+  static const Tickspan& max();
+
+  virtual void register_start(const Ticks& start) const;
+  virtual void register_end(const Ticks& start, const Ticks& end) const;
+};
+
+class ZStatPhaseConcurrent : public ZStatPhase {
+public:
+  ZStatPhaseConcurrent(const char* name);
+
+  virtual void register_start(const Ticks& start) const;
+  virtual void register_end(const Ticks& start, const Ticks& end) const;
+};
+
+class ZStatSubPhase : public ZStatPhase {
+public:
+  ZStatSubPhase(const char* name);
+
+  virtual void register_start(const Ticks& start) const;
+  virtual void register_end(const Ticks& start, const Ticks& end) const;
+};
+
+class ZStatCriticalPhase : public ZStatPhase {
+private:
+  const ZStatCounter _counter;
+  const bool         _verbose;
+
+public:
+  ZStatCriticalPhase(const char* name, bool verbose = true);
+
+  virtual void register_start(const Ticks& start) const;
+  virtual void register_end(const Ticks& start, const Ticks& end) const;
+};
+
+//
+// Stat timer
+//
+class ZStatTimer : public StackObj {
+private:
+  const ZStatPhase& _phase;
+  const Ticks       _start;
+
+public:
+  ZStatTimer(const ZStatPhase& phase) :
+      _phase(phase),
+      _start(Ticks::now()) {
+    _phase.register_start(_start);
+  }
+
+  ~ZStatTimer() {
+    const Ticks end = Ticks::now();
+    _phase.register_end(_start, end);
+  }
+};
+
+//
+// Stat sample/increment
+//
+void ZStatSample(const ZStatSampler& sampler, uint64_t value, bool trace = ZStatisticsForceTrace);
+void ZStatInc(const ZStatCounter& counter, uint64_t increment = 1, bool trace = ZStatisticsForceTrace);
+void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment = 1);
+
+//
+// Stat allocation rate
+//
+class ZStatAllocRate : public AllStatic {
+private:
+  static const ZStatUnsampledCounter _counter;
+  static TruncatedSeq                _rate;     // B/s
+  static TruncatedSeq                _rate_avg; // B/s
+
+public:
+  static const uint64_t sample_window_sec = 1; // seconds
+  static const uint64_t sample_hz         = 10;
+
+  static const ZStatUnsampledCounter& counter();
+  static uint64_t sample_and_reset();
+
+  static double avg();
+  static double avg_sd();
+};
+
+//
+// Stat thread
+//
+class ZStat : public ConcurrentGCThread {
+private:
+  static const uint64_t sample_hz = 1;
+
+  ZMetronome _metronome;
+
+  void sample_and_collect(ZStatSamplerHistory* history) const;
+  bool should_print(LogTargetHandle log) const;
+  void print(LogTargetHandle log, const ZStatSamplerHistory* history) const;
+
+protected:
+  virtual void run_service();
+  virtual void stop_service();
+
+public:
+  ZStat();
+};
+
+//
+// Stat cycle
+//
+class ZStatCycle : public AllStatic {
+private:
+  static uint64_t  _ncycles;
+  static Ticks     _start_of_last;
+  static Ticks     _end_of_last;
+  static NumberSeq _normalized_duration;
+
+public:
+  static void at_start();
+  static void at_end(double boost_factor);
+
+  static uint64_t ncycles();
+  static const AbsSeq& normalized_duration();
+  static double time_since_last();
+};
+
+//
+// Stat load
+//
+class ZStatLoad : public AllStatic {
+public:
+  static void print();
+};
+
+//
+// Stat mark
+//
+class ZStatMark : public AllStatic {
+private:
+  static size_t _nstripes;
+  static size_t _nproactiveflush;
+  static size_t _nterminateflush;
+  static size_t _ntrycomplete;
+  static size_t _ncontinue;
+
+public:
+  static void set_at_mark_start(size_t nstripes);
+  static void set_at_mark_end(size_t nproactiveflush,
+                              size_t nterminateflush,
+                              size_t ntrycomplete,
+                              size_t ncontinue);
+
+  static void print();
+};
+
+//
+// Stat relocation
+//
+class ZStatRelocation : public AllStatic {
+private:
+  static size_t _relocating;
+  static bool   _success;
+
+public:
+  static void set_at_select_relocation_set(size_t relocating);
+  static void set_at_relocate_end(bool success);
+
+  static void print();
+};
+
+//
+// Stat nmethods
+//
+class ZStatNMethods : public AllStatic {
+public:
+  static void print();
+};
+
+//
+// Stat references
+//
+class ZStatReferences : public AllStatic {
+private:
+  static struct ZCount {
+    size_t encountered;
+    size_t discovered;
+    size_t enqueued;
+  } _soft, _weak, _final, _phantom;
+
+  static void set(ZCount* count, size_t encountered, size_t discovered, size_t enqueued);
+  static void print(const char* name, const ZCount& ref);
+
+public:
+  static void set_soft(size_t encountered, size_t discovered, size_t enqueued);
+  static void set_weak(size_t encountered, size_t discovered, size_t enqueued);
+  static void set_final(size_t encountered, size_t discovered, size_t enqueued);
+  static void set_phantom(size_t encountered, size_t discovered, size_t enqueued);
+
+  static void print();
+};
+
+//
+// Stat heap
+//
+class ZStatHeap : public AllStatic {
+private:
+  static struct ZAtInitialize {
+    size_t max_capacity;
+    size_t max_reserve;
+  } _at_initialize;
+
+  static struct ZAtMarkStart {
+    size_t capacity;
+    size_t reserve;
+    size_t used;
+    size_t free;
+  } _at_mark_start;
+
+  static struct ZAtMarkEnd {
+    size_t capacity;
+    size_t reserve;
+    size_t allocated;
+    size_t used;
+    size_t free;
+    size_t live;
+    size_t garbage;
+  } _at_mark_end;
+
+  static struct ZAtRelocateStart {
+    size_t capacity;
+    size_t reserve;
+    size_t garbage;
+    size_t allocated;
+    size_t reclaimed;
+    size_t used;
+    size_t free;
+  } _at_relocate_start;
+
+  static struct ZAtRelocateEnd {
+    size_t capacity;
+    size_t capacity_high;
+    size_t capacity_low;
+    size_t reserve;
+    size_t reserve_high;
+    size_t reserve_low;
+    size_t garbage;
+    size_t allocated;
+    size_t reclaimed;
+    size_t used;
+    size_t used_high;
+    size_t used_low;
+    size_t free;
+    size_t free_high;
+    size_t free_low;
+  } _at_relocate_end;
+
+  static size_t available(size_t used);
+  static size_t reserve(size_t used);
+  static size_t free(size_t used);
+
+public:
+  static void set_at_initialize(size_t max_capacity,
+                                size_t max_reserve);
+  static void set_at_mark_start(size_t capacity,
+                                size_t used);
+  static void set_at_mark_end(size_t capacity,
+                              size_t allocated,
+                              size_t used);
+  static void set_at_select_relocation_set(size_t live,
+                                           size_t garbage,
+                                           size_t reclaimed);
+  static void set_at_relocate_start(size_t capacity,
+                                    size_t allocated,
+                                    size_t used);
+  static void set_at_relocate_end(size_t capacity,
+                                  size_t allocated,
+                                  size_t reclaimed,
+                                  size_t used,
+                                  size_t used_high,
+                                  size_t used_low);
+
+  static size_t max_capacity();
+  static size_t used_at_mark_start();
+  static size_t used_at_relocate_end();
+
+  static void print();
+};
+
+#endif // SHARE_GC_Z_ZSTAT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zTask.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zThread.hpp"
+
+ZTask::GangTask::GangTask(ZTask* ztask, const char* name) :
+    AbstractGangTask(name),
+    _ztask(ztask) {}
+
+void ZTask::GangTask::work(uint worker_id) {
+  ZThread::set_worker_id(worker_id);
+  _ztask->work();
+  ZThread::clear_worker_id();
+}
+
+ZTask::ZTask(const char* name) :
+    _gang_task(this, name) {}
+
+const char* ZTask::name() const {
+  return _gang_task.name();
+}
+
+AbstractGangTask* ZTask::gang_task() {
+  return &_gang_task;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zTask.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZTASK_HPP
+#define SHARE_GC_Z_ZTASK_HPP
+
+#include "gc/shared/workgroup.hpp"
+#include "memory/allocation.hpp"
+
+class ZTask : public StackObj {
+private:
+  class GangTask : public AbstractGangTask {
+  private:
+    ZTask* const _ztask;
+
+  public:
+    GangTask(ZTask* ztask, const char* name);
+
+    virtual void work(uint worker_id);
+  };
+
+  GangTask _gang_task;
+
+public:
+  ZTask(const char* name);
+
+  const char* name() const;
+  AbstractGangTask* gang_task();
+
+  virtual void work() = 0;
+};
+
+#endif // SHARE_GC_Z_ZTASK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zThread.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zThread.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+
+__thread bool      ZThread::_initialized;
+__thread uintptr_t ZThread::_id;
+__thread bool      ZThread::_is_vm;
+__thread bool      ZThread::_is_java;
+__thread bool      ZThread::_is_worker;
+__thread uint      ZThread::_worker_id;
+
+void ZThread::initialize() {
+  assert(!_initialized, "Already initialized");
+  const Thread* const thread = Thread::current();
+  _initialized = true;
+  _id = (uintptr_t)thread;
+  _is_vm = thread->is_VM_thread();
+  _is_java = thread->is_Java_thread();
+  _is_worker = thread->is_Worker_thread();
+  _worker_id = (uint)-1;
+}
+
+const char* ZThread::name() {
+  const Thread* const thread = Thread::current();
+  if (thread->is_Named_thread()) {
+    const NamedThread* const named = (const NamedThread*)thread;
+    return named->name();
+  } else if (thread->is_Java_thread()) {
+    return "Java";
+  }
+
+  return "Unknown";
+}
+
+bool ZThread::has_worker_id() {
+  return _initialized &&
+         _is_worker &&
+         _worker_id != (uint)-1;
+}
+
+void ZThread::set_worker_id(uint worker_id) {
+  ensure_initialized();
+  assert(!has_worker_id(), "Worker id already initialized");
+  _worker_id = worker_id;
+}
+
+void ZThread::clear_worker_id() {
+  assert(has_worker_id(), "Worker id not initialized");
+  _worker_id = (uint)-1;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zThread.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZTHREAD_HPP
+#define SHARE_GC_Z_ZTHREAD_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class ZThread : public AllStatic {
+  friend class ZTask;
+
+private:
+  static __thread bool      _initialized;
+  static __thread uintptr_t _id;
+  static __thread bool      _is_vm;
+  static __thread bool      _is_java;
+  static __thread bool      _is_worker;
+  static __thread uint      _worker_id;
+
+  static void initialize();
+
+  static void ensure_initialized() {
+    if (!_initialized) {
+      initialize();
+    }
+  }
+
+  static bool has_worker_id();
+  static void set_worker_id(uint worker_id);
+  static void clear_worker_id();
+
+public:
+  static const char* name();
+
+  static uintptr_t id() {
+    ensure_initialized();
+    return _id;
+  }
+
+  static bool is_vm() {
+    ensure_initialized();
+    return _is_vm;
+  }
+
+  static bool is_java() {
+    ensure_initialized();
+    return _is_java;
+  }
+
+  static bool is_worker() {
+    ensure_initialized();
+    return _is_worker;
+  }
+
+  static uint worker_id() {
+    assert(has_worker_id(), "Worker id not initialized");
+    return _worker_id;
+  }
+};
+
+#endif // SHARE_GC_Z_ZTHREAD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zThreadLocalData.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZTHREADLOCALDATA_HPP
+#define SHARE_GC_Z_ZTHREADLOCALDATA_HPP
+
+#include "gc/z/zMarkStack.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/sizes.hpp"
+
+class ZThreadLocalData {
+private:
+  uintptr_t              _address_bad_mask;
+  ZMarkThreadLocalStacks _stacks;
+
+  ZThreadLocalData() :
+      _address_bad_mask(0),
+      _stacks() {}
+
+  static ZThreadLocalData* data(Thread* thread) {
+    return thread->gc_data<ZThreadLocalData>();
+  }
+
+public:
+  static void create(Thread* thread) {
+    new (data(thread)) ZThreadLocalData();
+  }
+
+  static void destroy(Thread* thread) {
+    data(thread)->~ZThreadLocalData();
+  }
+
+  static void set_address_bad_mask(Thread* thread, uintptr_t mask) {
+    data(thread)->_address_bad_mask = mask;
+  }
+
+  static ZMarkThreadLocalStacks* stacks(Thread* thread) {
+    return &data(thread)->_stacks;
+  }
+
+  static ByteSize address_bad_mask_offset() {
+    return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _address_bad_mask);
+  }
+};
+
+#endif // SHARE_GC_Z_ZTHREADLOCALDATA_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zTracer.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTracer.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "runtime/safepointVerifiers.hpp"
+
+ZTracer* ZTracer::_tracer = NULL;
+
+ZTracer::ZTracer() :
+    GCTracer(Z) {}
+
+void ZTracer::initialize() {
+  _tracer = new (ResourceObj::C_HEAP, mtGC) ZTracer();
+}
+
+void ZTracer::send_stat_counter(uint32_t counter_id, uint64_t increment, uint64_t value) {
+  NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
+
+  EventZStatisticsCounter e;
+  if (e.should_commit()) {
+    e.set_id(counter_id);
+    e.set_increment(increment);
+    e.set_value(value);
+    e.commit();
+  }
+}
+
+void ZTracer::send_stat_sampler(uint32_t sampler_id, uint64_t value) {
+  NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
+
+  EventZStatisticsSampler e;
+  if (e.should_commit()) {
+    e.set_id(sampler_id);
+    e.set_value(value);
+    e.commit();
+  }
+}
+
+void ZTracer::send_thread_phase(const char* name, const Ticks& start, const Ticks& end) {
+  NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
+
+  EventZThreadPhase e(UNTIMED);
+  if (e.should_commit()) {
+    e.set_gcId(GCId::current_or_undefined());
+    e.set_name(name);
+    e.set_starttime(start);
+    e.set_endtime(end);
+    e.commit();
+  }
+}
+
+void ZTracer::send_page_alloc(size_t size, size_t used, size_t free, size_t cache, bool nonblocking, bool noreserve) {
+  NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
+
+  EventZPageAllocation e;
+  if (e.should_commit()) {
+    e.set_pageSize(size);
+    e.set_usedAfter(used);
+    e.set_freeAfter(free);
+    e.set_inCacheAfter(cache);
+    e.set_nonBlocking(nonblocking);
+    e.set_noReserve(noreserve);
+    e.commit();
+  }
+}
+
+void ZTracer::report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value) {
+  send_stat_counter(counter.id(), increment, value);
+}
+
+void ZTracer::report_stat_sampler(const ZStatSampler& sampler, uint64_t value) {
+  send_stat_sampler(sampler.id(), value);
+}
+
+void ZTracer::report_thread_phase(const ZStatPhase& phase, const Ticks& start, const Ticks& end) {
+  send_thread_phase(phase.name(), start, end);
+}
+
+void ZTracer::report_thread_phase(const char* name, const Ticks& start, const Ticks& end) {
+  send_thread_phase(name, start, end);
+}
+
+void ZTracer::report_page_alloc(size_t size, size_t used, size_t free, size_t cache, ZAllocationFlags flags) {
+  send_page_alloc(size, used, free, cache, flags.non_blocking(), flags.no_reserve());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zTracer.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZTRACER_HPP
+#define SHARE_GC_Z_ZTRACER_HPP
+
+#include "gc/shared/gcTrace.hpp"
+#include "gc/z/zAllocationFlags.hpp"
+
+class ZStatCounter;
+class ZStatPhase;
+class ZStatSampler;
+
+class ZTracer : public GCTracer {
+private:
+  static ZTracer* _tracer;
+
+  ZTracer();
+
+  void send_stat_counter(uint32_t counter_id, uint64_t increment, uint64_t value);
+  void send_stat_sampler(uint32_t sampler_id, uint64_t value);
+  void send_thread_phase(const char* name, const Ticks& start, const Ticks& end);
+  void send_page_alloc(size_t size, size_t used, size_t free, size_t cache, bool nonblocking, bool noreserve);
+
+public:
+  static ZTracer* tracer();
+  static void initialize();
+
+  void report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value);
+  void report_stat_sampler(const ZStatSampler& sampler, uint64_t value);
+  void report_thread_phase(const ZStatPhase& phase, const Ticks& start, const Ticks& end);
+  void report_thread_phase(const char* name, const Ticks& start, const Ticks& end);
+  void report_page_alloc(size_t size, size_t used, size_t free, size_t cache, ZAllocationFlags flags);
+};
+
+class ZTraceThreadPhase : public StackObj {
+private:
+  const Ticks       _start;
+  const char* const _name;
+
+public:
+  ZTraceThreadPhase(const char* name);
+  ~ZTraceThreadPhase();
+};
+
+#endif // SHARE_GC_Z_ZTRACER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zTracer.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZTRACER_INLINE_HPP
+#define SHARE_GC_Z_ZTRACER_INLINE_HPP
+
+#include "gc/z/zTracer.hpp"
+
+inline ZTracer* ZTracer::tracer() {
+  return _tracer;
+}
+
+inline ZTraceThreadPhase::ZTraceThreadPhase(const char* name) :
+    _start(Ticks::now()),
+    _name(name) {}
+
+inline ZTraceThreadPhase::~ZTraceThreadPhase() {
+  ZTracer::tracer()->report_thread_phase(_name, _start, Ticks::now());
+}
+
+#endif // SHARE_GC_Z_ZTRACER_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zUtils.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "utilities/debug.hpp"
+
+#include <stdlib.h>
+
+uintptr_t ZUtils::alloc_aligned(size_t alignment, size_t size) {
+  void* res = NULL;
+
+  if (posix_memalign(&res, alignment, size) != 0) {
+    fatal("posix_memalign() failed");
+  }
+
+  memset(res, 0, size);
+
+  return (uintptr_t)res;
+}
+
+void ZUtils::insert_filler_object(uintptr_t addr, size_t size) {
+  const size_t fill_size_in_words = bytes_to_words(size);
+  if (fill_size_in_words >= CollectedHeap::min_fill_size()) {
+    CollectedHeap::fill_with_objects((HeapWord*)ZAddress::good(addr), fill_size_in_words);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zUtils.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZUTILS_HPP
+#define SHARE_GC_Z_ZUTILS_HPP
+
+#include "memory/allocation.hpp"
+
+class ZUtils : public AllStatic {
+public:
+  // Allocation
+  static uintptr_t alloc_aligned(size_t alignment, size_t size);
+
+  // Power of two
+  static size_t round_up_power_of_2(size_t value);
+  static size_t round_down_power_of_2(size_t value);
+
+  // Size convertion
+  static size_t bytes_to_words(size_t size_in_words);
+  static size_t words_to_bytes(size_t size_in_words);
+
+  // Object
+  static size_t object_size(uintptr_t addr);
+  static void object_copy(uintptr_t from, uintptr_t to, size_t size);
+
+  // Filler
+  static void insert_filler_object(uintptr_t addr, size_t size);
+};
+
+#endif // SHARE_GC_Z_ZUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zUtils.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZUTILS_INLINE_HPP
+#define SHARE_GC_Z_ZUTILS_INLINE_HPP
+
+#include "gc/z/zOop.inline.hpp"
+#include "gc/z/zUtils.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/align.hpp"
+#include "utilities/copy.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+inline size_t ZUtils::round_up_power_of_2(size_t value) {
+  assert(value != 0, "Invalid value");
+
+  if (is_power_of_2(value)) {
+    return value;
+  }
+
+  return (size_t)1 << (log2_intptr(value) + 1);
+}
+
+inline size_t ZUtils::round_down_power_of_2(size_t value) {
+  assert(value != 0, "Invalid value");
+  return (size_t)1 << log2_intptr(value);
+}
+
+inline size_t ZUtils::bytes_to_words(size_t size_in_bytes) {
+  assert(is_aligned(size_in_bytes, BytesPerWord), "Size not word aligned");
+  return size_in_bytes >> LogBytesPerWord;
+}
+
+inline size_t ZUtils::words_to_bytes(size_t size_in_words) {
+  return size_in_words << LogBytesPerWord;
+}
+
+inline size_t ZUtils::object_size(uintptr_t addr) {
+  return words_to_bytes(ZOop::to_oop(addr)->size());
+}
+
+inline void ZUtils::object_copy(uintptr_t from, uintptr_t to, size_t size) {
+  Copy::aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size));
+}
+
+#endif // SHARE_GC_Z_ZUTILS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zValue.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZVALUE_HPP
+#define SHARE_GC_Z_ZVALUE_HPP
+
+#include "memory/allocation.hpp"
+#include "gc/z/zCPU.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zThread.hpp"
+#include "gc/z/zUtils.hpp"
+#include "utilities/align.hpp"
+
+template <typename S>
+class ZValueStorage : public AllStatic {
+private:
+  static uintptr_t _top;
+  static uintptr_t _end;
+
+public:
+  static const size_t offset = 4 * K;
+
+  static uintptr_t alloc(size_t size) {
+    guarantee(size <= offset, "Allocation too large");
+
+    // Allocate entry in existing memory block
+    const uintptr_t addr = align_up(_top, S::alignment());
+    _top = addr + size;
+
+    if (_top < _end) {
+      // Success
+      return addr;
+    }
+
+    // Allocate new block of memory
+    const size_t block_alignment = offset;
+    const size_t block_size = offset * S::count();
+    _top = ZUtils::alloc_aligned(block_alignment, block_size);
+    _end = _top + offset;
+
+    // Retry allocation
+    return alloc(size);
+  }
+};
+
+template <typename T> uintptr_t ZValueStorage<T>::_end = 0;
+template <typename T> uintptr_t ZValueStorage<T>::_top = 0;
+
+class ZContendedStorage : public ZValueStorage<ZContendedStorage> {
+public:
+  static size_t alignment() {
+    return ZCacheLineSize;
+  }
+
+  static uint32_t count() {
+    return 1;
+  }
+
+  static uint32_t id() {
+    return 0;
+  }
+};
+
+class ZPerCPUStorage : public ZValueStorage<ZPerCPUStorage> {
+public:
+  static size_t alignment() {
+    return sizeof(uintptr_t);
+  }
+
+  static uint32_t count() {
+    return ZCPU::count();
+  }
+
+  static uint32_t id() {
+    return ZCPU::id();
+  }
+};
+
+class ZPerNUMAStorage : public ZValueStorage<ZPerNUMAStorage> {
+public:
+  static size_t alignment() {
+    return sizeof(uintptr_t);
+  }
+
+  static uint32_t count() {
+    return ZNUMA::count();
+  }
+
+  static uint32_t id() {
+    return ZNUMA::id();
+  }
+};
+
+class ZPerWorkerStorage : public ZValueStorage<ZPerWorkerStorage> {
+public:
+  static size_t alignment() {
+    return sizeof(uintptr_t);
+  }
+
+  static uint32_t count() {
+    return MAX2(ParallelGCThreads, ConcGCThreads);
+  }
+
+  static uint32_t id() {
+    return ZThread::worker_id();
+  }
+};
+
+template <typename S, typename T>
+class ZValueIterator;
+
+template <typename S, typename T>
+class ZValue {
+private:
+  const uintptr_t _addr;
+
+  uintptr_t value_addr(uint32_t value_id) const {
+    return _addr + (value_id * S::offset);
+  }
+
+public:
+  ZValue() :
+      _addr(S::alloc(sizeof(T))) {
+    // Initialize all instances
+    ZValueIterator<S, T> iter(this);
+    for (T* addr; iter.next(&addr);) {
+      ::new (addr) T;
+    }
+  }
+
+  ZValue(const T& value) :
+      _addr(S::alloc(sizeof(T))) {
+    // Initialize all instances
+    ZValueIterator<S, T> iter(this);
+    for (T* addr; iter.next(&addr);) {
+      ::new (addr) T(value);
+    }
+  }
+
+  // Not implemented
+  ZValue(const ZValue<S, T>& value);
+  ZValue<S, T>& operator=(const ZValue<S, T>& value);
+
+  const T* addr(uint32_t value_id = S::id()) const {
+    return reinterpret_cast<const T*>(value_addr(value_id));
+  }
+
+  T* addr(uint32_t value_id = S::id()) {
+    return reinterpret_cast<T*>(value_addr(value_id));
+  }
+
+  const T& get(uint32_t value_id = S::id()) const {
+    return *addr(value_id);
+  }
+
+  T& get(uint32_t value_id = S::id()) {
+    return *addr(value_id);
+  }
+
+  void set(const T& value, uint32_t value_id = S::id()) {
+    get(value_id) = value;
+  }
+
+  void set_all(const T& value) {
+    ZValueIterator<S, T> iter(this);
+    for (T* addr; iter.next(&addr);) {
+      *addr = value;
+    }
+  }
+};
+
+template <typename T>
+class ZContended : public ZValue<ZContendedStorage, T> {
+public:
+  ZContended() :
+      ZValue<ZContendedStorage, T>() {}
+
+  ZContended(const T& value) :
+      ZValue<ZContendedStorage, T>(value) {}
+
+  using ZValue<ZContendedStorage, T>::operator=;
+};
+
+template <typename T>
+class ZPerCPU : public ZValue<ZPerCPUStorage, T> {
+public:
+  ZPerCPU() :
+      ZValue<ZPerCPUStorage, T>() {}
+
+  ZPerCPU(const T& value) :
+      ZValue<ZPerCPUStorage, T>(value) {}
+
+  using ZValue<ZPerCPUStorage, T>::operator=;
+};
+
+template <typename T>
+class ZPerNUMA : public ZValue<ZPerNUMAStorage, T> {
+public:
+  ZPerNUMA() :
+      ZValue<ZPerNUMAStorage, T>() {}
+
+  ZPerNUMA(const T& value) :
+      ZValue<ZPerNUMAStorage, T>(value) {}
+
+  using ZValue<ZPerNUMAStorage, T>::operator=;
+};
+
+template <typename T>
+class ZPerWorker : public ZValue<ZPerWorkerStorage, T> {
+public:
+  ZPerWorker() :
+      ZValue<ZPerWorkerStorage, T>() {}
+
+  ZPerWorker(const T& value) :
+      ZValue<ZPerWorkerStorage, T>(value) {}
+
+  using ZValue<ZPerWorkerStorage, T>::operator=;
+};
+
+template <typename S, typename T>
+class ZValueIterator {
+private:
+  ZValue<S, T>* const _value;
+  uint32_t            _value_id;
+
+public:
+  ZValueIterator(ZValue<S, T>* value) :
+      _value(value),
+      _value_id(0) {}
+
+  bool next(T** value) {
+    if (_value_id < S::count()) {
+      *value = _value->addr(_value_id++);
+      return true;
+    }
+    return false;
+  }
+};
+
+template <typename T>
+class ZPerCPUIterator : public ZValueIterator<ZPerCPUStorage, T> {
+public:
+  ZPerCPUIterator(ZPerCPU<T>* value) :
+      ZValueIterator<ZPerCPUStorage, T>(value) {}
+};
+
+template <typename T>
+class ZPerNUMAIterator : public ZValueIterator<ZPerNUMAStorage, T> {
+public:
+  ZPerNUMAIterator(ZPerNUMA<T>* value) :
+      ZValueIterator<ZPerNUMAStorage, T>(value) {}
+};
+
+template <typename T>
+class ZPerWorkerIterator : public ZValueIterator<ZPerWorkerStorage, T> {
+public:
+  ZPerWorkerIterator(ZPerWorker<T>* value) :
+      ZValueIterator<ZPerWorkerStorage, T>(value) {}
+};
+
+template <typename S, typename T>
+class ZValueConstIterator {
+private:
+  const ZValue<S, T>* const _value;
+  uint32_t                  _value_id;
+
+public:
+  ZValueConstIterator(const ZValue<S, T>* value) :
+      _value(value),
+      _value_id(0) {}
+
+  bool next(const T** value) {
+    if (_value_id < S::count()) {
+      *value = _value->addr(_value_id++);
+      return true;
+    }
+    return false;
+  }
+};
+
+template <typename T>
+class ZPerCPUConstIterator : public ZValueConstIterator<ZPerCPUStorage, T> {
+public:
+  ZPerCPUConstIterator(const ZPerCPU<T>* value) :
+      ZValueConstIterator<ZPerCPUStorage, T>(value) {}
+};
+
+template <typename T>
+class ZPerNUMAConstIterator : public ZValueConstIterator<ZPerNUMAStorage, T> {
+public:
+  ZPerNUMAConstIterator(const ZPerNUMA<T>* value) :
+      ZValueConstIterator<ZPerNUMAStorage, T>(value) {}
+};
+
+template <typename T>
+class ZPerWorkerConstIterator : public ZValueConstIterator<ZPerWorkerStorage, T> {
+public:
+  ZPerWorkerConstIterator(const ZPerWorker<T>* value) :
+      ZValueConstIterator<ZPerWorkerStorage, T>(value) {}
+};
+
+#endif // SHARE_GC_Z_ZVALUE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zVirtualMemory.inline.hpp"
+#include "services/memTracker.hpp"
+
+ZVirtualMemoryManager::ZVirtualMemoryManager() :
+    _manager(),
+    _initialized(false) {
+  // Reserve address space
+  if (!reserve(ZAddressSpaceStart, ZAddressSpaceSize)) {
+    return;
+  }
+
+  // Make the complete address view free
+  _manager.free(0, ZAddressOffsetMax);
+
+  // Register address space with native memory tracker
+  nmt_reserve(ZAddressSpaceStart, ZAddressSpaceSize);
+
+  // Successfully initialized
+  _initialized = true;
+}
+
+void ZVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) {
+  MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC);
+  MemTracker::record_virtual_memory_type((void*)start, mtJavaHeap);
+}
+
+bool ZVirtualMemoryManager::is_initialized() const {
+  return _initialized;
+}
+
+ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool alloc_from_front) {
+  uintptr_t start;
+
+  if (alloc_from_front || size <= ZPageSizeSmall) {
+    // Small page
+    start = _manager.alloc_from_front(size);
+  } else {
+    // Medium/Large page
+    start = _manager.alloc_from_back(size);
+  }
+
+  return ZVirtualMemory(start, size);
+}
+
+void ZVirtualMemoryManager::free(ZVirtualMemory vmem) {
+  _manager.free(vmem.start(), vmem.size());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zVirtualMemory.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZVIRTUALMEMORY_HPP
+#define SHARE_GC_Z_ZVIRTUALMEMORY_HPP
+
+#include "gc/z/zMemory.hpp"
+#include "memory/allocation.hpp"
+
+class ZVirtualMemory {
+  friend class VMStructs;
+
+private:
+  uintptr_t _start;
+  uintptr_t _end;
+
+public:
+  ZVirtualMemory();
+  ZVirtualMemory(uintptr_t start, size_t size);
+
+  bool is_null() const;
+  uintptr_t start() const;
+  uintptr_t end() const;
+  size_t size() const;
+  ZVirtualMemory split(size_t size);
+  void clear();
+};
+
+class ZVirtualMemoryManager {
+private:
+  ZMemoryManager _manager;
+  bool           _initialized;
+
+  bool reserve(uintptr_t start, size_t size);
+  void nmt_reserve(uintptr_t start, size_t size);
+
+public:
+  ZVirtualMemoryManager();
+
+  bool is_initialized() const;
+
+  ZVirtualMemory alloc(size_t size, bool alloc_from_front = false);
+  void free(ZVirtualMemory vmem);
+};
+
+#endif // SHARE_GC_Z_ZVIRTUALMEMORY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP
+#define SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP
+
+#include "gc/z/zMemory.inline.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+
+inline ZVirtualMemory::ZVirtualMemory() :
+    _start(UINTPTR_MAX),
+    _end(UINTPTR_MAX) {}
+
+inline ZVirtualMemory::ZVirtualMemory(uintptr_t start, size_t size) :
+    _start(start),
+    _end(start + size) {}
+
+inline bool ZVirtualMemory::is_null() const {
+  return _start == UINTPTR_MAX;
+}
+
+inline uintptr_t ZVirtualMemory::start() const {
+  return _start;
+}
+
+inline uintptr_t ZVirtualMemory::end() const {
+  return _end;
+}
+
+inline size_t ZVirtualMemory::size() const {
+  return _end - _start;
+}
+
+inline ZVirtualMemory ZVirtualMemory::split(size_t split_size) {
+  assert(split_size <= size(), "precondition");
+  ZVirtualMemory mem(_start, split_size);
+  _start += split_size;
+  return mem;
+}
+
+inline void ZVirtualMemory::clear() {
+  _start = UINTPTR_MAX;
+  _end = UINTPTR_MAX;
+}
+
+#endif // SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zThread.hpp"
+#include "runtime/jniHandles.hpp"
+
+ZWeakRootsProcessor::ZWeakRootsProcessor(ZWorkers* workers) :
+    _workers(workers) {}
+
+class ZProcessWeakRootsTask : public ZTask {
+private:
+  ZWeakRootsIterator _weak_roots;
+
+public:
+  ZProcessWeakRootsTask() :
+      ZTask("ZProcessWeakRootsTask"),
+      _weak_roots() {}
+
+  virtual void work() {
+    ZPhantomIsAliveObjectClosure is_alive;
+    ZPhantomKeepAliveOopClosure keep_alive;
+    _weak_roots.weak_oops_do(&is_alive, &keep_alive);
+  }
+};
+
+void ZWeakRootsProcessor::process_weak_roots() {
+  ZProcessWeakRootsTask task;
+  _workers->run_parallel(&task);
+}
+
+class ZProcessConcurrentWeakRootsTask : public ZTask {
+private:
+  ZConcurrentWeakRootsIterator _concurrent_weak_roots;
+
+public:
+  ZProcessConcurrentWeakRootsTask() :
+      ZTask("ZProcessConccurentWeakRootsTask"),
+      _concurrent_weak_roots() {}
+
+  virtual void work() {
+    ZPhantomCleanOopClosure cl;
+    _concurrent_weak_roots.oops_do(&cl);
+  }
+};
+
+void ZWeakRootsProcessor::process_concurrent_weak_roots() {
+  ZProcessConcurrentWeakRootsTask task;
+  _workers->run_concurrent(&task);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zWeakRootsProcessor.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZWEAKROOTSPROCESSOR_HPP
+#define SHARE_GC_Z_ZWEAKROOTSPROCESSOR_HPP
+
+#include "gc/z/zValue.hpp"
+
+class ZWorkers;
+
+class ZWeakRootsProcessor {
+private:
+  ZWorkers* const _workers;
+
+public:
+  ZWeakRootsProcessor(ZWorkers* workers);
+
+  void process_weak_roots();
+  void process_concurrent_weak_roots();
+};
+
+#endif // SHARE_GC_Z_ZWEAKROOTSPROCESSOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zWorkers.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zWorkers.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+
+uint ZWorkers::calculate_ncpus(double share_in_percent) {
+  return ceil(os::initial_active_processor_count() * share_in_percent / 100.0);
+}
+
+uint ZWorkers::calculate_nparallel() {
+  // Use 60% of the CPUs, rounded up. We would like to use as many threads as
+  // possible to increase parallelism. However, using a thread count that is
+  // close to the number of processors tends to lead to over-provisioning and
+  // scheduling latency issues. Using 60% of the active processors appears to
+  // be a fairly good balance.
+  return calculate_ncpus(60.0);
+}
+
+uint ZWorkers::calculate_nconcurrent() {
+  // Use 12.5% of the CPUs, rounded up. The number of concurrent threads we
+  // would like to use heavily depends on the type of workload we are running.
+  // Using too many threads will have a nagative impact on the application
+  // throughput, while using too few threads will prolong the GC-cycle and
+  // we then risk being out-run by the application. Using 12.5% of the active
+  // processors appears to be a fairly good balance.
+  return calculate_ncpus(12.5);
+}
+
+class ZWorkersWarmupTask : public ZTask {
+private:
+  const uint _nworkers;
+  uint       _started;
+  Monitor    _monitor;
+
+public:
+  ZWorkersWarmupTask(uint nworkers) :
+      ZTask("ZWorkersWarmupTask"),
+      _nworkers(nworkers),
+      _started(0),
+      _monitor(Monitor::leaf, "ZWorkersWarmup", false, Monitor::_safepoint_check_never) {}
+
+  virtual void work() {
+    // Wait for all threads to start
+    MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
+    if (++_started == _nworkers) {
+      // All threads started
+      ml.notify_all();
+    } else {
+      while (_started != _nworkers) {
+        ml.wait(Monitor::_no_safepoint_check_flag);
+      }
+    }
+  }
+};
+
+ZWorkers::ZWorkers() :
+    _boost(false),
+    _workers("ZWorker",
+             nworkers(),
+             true /* are_GC_task_threads */,
+             true /* are_ConcurrentGC_threads */) {
+
+  log_info(gc, init)("Workers: %u parallel, %u concurrent", nparallel(), nconcurrent());
+
+  // Initialize worker threads
+  _workers.initialize_workers();
+  _workers.update_active_workers(nworkers());
+  if (_workers.active_workers() != nworkers()) {
+    vm_exit_during_initialization("Failed to create ZWorkers");
+  }
+
+  // Warm up worker threads by having them execute a dummy task.
+  // This helps reduce latency in early GC pauses, which otherwise
+  // would have to take on any warmup costs.
+  ZWorkersWarmupTask task(nworkers());
+  run(&task, nworkers());
+}
+
+void ZWorkers::set_boost(bool boost) {
+  if (boost) {
+    log_debug(gc)("Boosting workers");
+  }
+
+  _boost = boost;
+}
+
+void ZWorkers::run(ZTask* task, uint nworkers) {
+  log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), nworkers);
+  _workers.update_active_workers(nworkers);
+  _workers.run_task(task->gang_task());
+}
+
+void ZWorkers::run_parallel(ZTask* task) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
+  run(task, nparallel());
+}
+
+void ZWorkers::run_concurrent(ZTask* task) {
+  run(task, nconcurrent());
+}
+
+void ZWorkers::threads_do(ThreadClosure* tc) const {
+  _workers.threads_do(tc);
+}
+
+void ZWorkers::print_threads_on(outputStream* st) const {
+  _workers.print_worker_threads_on(st);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zWorkers.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZWORKERS_HPP
+#define SHARE_GC_Z_ZWORKERS_HPP
+
+#include "gc/shared/workgroup.hpp"
+#include "memory/allocation.hpp"
+
+class ZTask;
+
+class ZWorkers {
+private:
+  bool     _boost;
+  WorkGang _workers;
+
+  static uint calculate_ncpus(double share_in_percent);
+
+  void run(ZTask* task, uint nworkers);
+
+public:
+  static uint calculate_nparallel();
+  static uint calculate_nconcurrent();
+
+  ZWorkers();
+
+  uint nparallel() const;
+  uint nparallel_no_boost() const;
+  uint nconcurrent() const;
+  uint nconcurrent_no_boost() const;
+  uint nworkers() const;
+
+  void set_boost(bool boost);
+
+  void run_parallel(ZTask* task);
+  void run_concurrent(ZTask* task);
+
+  void threads_do(ThreadClosure* tc) const;
+  void print_threads_on(outputStream* st) const;
+};
+
+#endif // SHARE_GC_Z_ZWORKERS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zWorkers.inline.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZWORKERS_INLINE_HPP
+#define SHARE_GC_Z_ZWORKERS_INLINE_HPP
+
+#include "gc/z/zWorkers.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+inline uint ZWorkers::nparallel() const {
+  return _boost ? nworkers() : nparallel_no_boost();
+}
+
+inline uint ZWorkers::nparallel_no_boost() const {
+  return ParallelGCThreads;
+}
+
+inline uint ZWorkers::nconcurrent() const {
+  return _boost ? nworkers() : nconcurrent_no_boost();
+}
+
+inline uint ZWorkers::nconcurrent_no_boost() const {
+  return ConcGCThreads;
+}
+
+inline uint ZWorkers::nworkers() const {
+  return MAX2(ParallelGCThreads, ConcGCThreads);
+}
+
+#endif // SHARE_GC_Z_ZWORKERS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/z_globals.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_Z_GLOBALS_HPP
+#define SHARE_GC_Z_Z_GLOBALS_HPP
+
+#define GC_Z_FLAGS(develop,                                                 \
+                   develop_pd,                                              \
+                   product,                                                 \
+                   product_pd,                                              \
+                   diagnostic,                                              \
+                   diagnostic_pd,                                           \
+                   experimental,                                            \
+                   notproduct,                                              \
+                   manageable,                                              \
+                   product_rw,                                              \
+                   lp64_product,                                            \
+                   range,                                                   \
+                   constraint,                                              \
+                   writeable)                                               \
+                                                                            \
+  product(ccstr, ZPath, NULL,                                               \
+          "Filesystem path for Java heap backing storage "                  \
+          "(must be a tmpfs or a hugetlbfs filesystem)")                    \
+                                                                            \
+  product(double, ZAllocationSpikeTolerance, 2.0,                           \
+          "Allocation spike tolerance factor")                              \
+                                                                            \
+  product(double, ZFragmentationLimit, 25.0,                                \
+          "Maximum allowed heap fragmentation")                             \
+                                                                            \
+  product(bool, ZStallOnOutOfMemory, true,                                  \
+          "Allow Java threads to stall and wait for GC to complete "        \
+          "instead of immediately throwing an OutOfMemoryError")            \
+                                                                            \
+  product(size_t, ZMarkStacksMax, NOT_LP64(512*M) LP64_ONLY(8*G),           \
+          "Maximum number of bytes allocated for marking stacks")           \
+          range(32*M, NOT_LP64(512*M) LP64_ONLY(1024*G))                    \
+                                                                            \
+  product(uint, ZCollectionInterval, 0,                                     \
+          "Force GC at a fixed time interval (in seconds)")                 \
+                                                                            \
+  product(uint, ZStatisticsInterval, 10,                                    \
+          "Time between statistics print outs (in seconds)")                \
+          range(1, (uint)-1)                                                \
+                                                                            \
+  diagnostic(bool, ZStatisticsForceTrace, false,                            \
+          "Force tracing of ZStats")                                        \
+                                                                            \
+  diagnostic(bool, ZProactive, true,                                        \
+          "Enable proactive GC cycles")                                     \
+                                                                            \
+  diagnostic(bool, ZUnmapBadViews, false,                                   \
+          "Unmap bad (inactive) heap views")                                \
+                                                                            \
+  diagnostic(bool, ZVerifyMarking, false,                                   \
+          "Verify marking stacks")                                          \
+                                                                            \
+  diagnostic(bool, ZVerifyForwarding, false,                                \
+          "Verify forwarding tables")                                       \
+                                                                            \
+  diagnostic(bool, ZSymbolTableUnloading, false,                            \
+          "Unload unused VM symbols")                                       \
+                                                                            \
+  diagnostic(bool, ZWeakRoots, true,                                        \
+          "Treat JNI WeakGlobalRefs and StringTable as weak roots")         \
+                                                                            \
+  diagnostic(bool, ZConcurrentStringTable, true,                            \
+          "Clean StringTable concurrently")                                 \
+                                                                            \
+  diagnostic(bool, ZConcurrentVMWeakHandles, true,                          \
+          "Clean VM WeakHandles concurrently")                              \
+                                                                            \
+  diagnostic(bool, ZConcurrentJNIWeakGlobalHandles, true,                   \
+          "Clean JNI WeakGlobalRefs concurrently")                          \
+                                                                            \
+  diagnostic(bool, ZOptimizeLoadBarriers, true,                             \
+          "Apply load barrier optimizations")                               \
+                                                                            \
+  develop(bool, ZVerifyLoadBarriers, false,                                 \
+          "Verify that reference loads are followed by barriers")
+
+#endif // SHARE_GC_Z_Z_GLOBALS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/z_specialized_oop_closures.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_Z_SPECIALIZED_OOP_CLOSURES_HPP
+#define SHARE_GC_Z_Z_SPECIALIZED_OOP_CLOSURES_HPP
+
+class ZLoadBarrierOopClosure;
+template <bool> class ZMarkBarrierOopClosure;
+
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(f) \
+      f(ZLoadBarrierOopClosure,_nv)       \
+      f(ZMarkBarrierOopClosure<true>,_nv) \
+      f(ZMarkBarrierOopClosure<false>,_nv)
+
+#endif // SHARE_GC_Z_Z_SPECIALIZED_OOP_CLOSURES_HPP
--- a/src/hotspot/share/jfr/metadata/metadata.xml	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/jfr/metadata/metadata.xml	Tue Jun 12 17:40:28 2018 +0200
@@ -885,6 +885,39 @@
     <Field type="uint" name="newRatio" label="New Ratio" description="The size of the young generation relative to the tenured generation" />
   </Event>
 
+  <Event name="ZPageAllocation" category="Java Application" label="ZPage Allocation" description="Allocation of a ZPage" thread="true" stackTrace="false">
+     <Field type="ulong" contentType="bytes" name="pageSize" label="Page Size" />
+     <Field type="ulong" contentType="bytes" name="usedAfter" label="Used After" />
+     <Field type="ulong" contentType="bytes" name="freeAfter" label="Free After" />
+     <Field type="ulong" contentType="bytes" name="inCacheAfter" label="In Cache After" />
+     <Field type="boolean" name="nonBlocking" label="Non-blocking" />
+     <Field type="boolean" name="noReserve" label="No Reserve" />
+  </Event>
+
+  <Event name="ZThreadPhase" category="Java Virtual Machine, GC, Detailed" label="ZGC Thread Phase" thread="true">
+    <Field type="uint" name="gcId" label="GC Identifier" relation="GcId"/>
+    <Field type="string" name="name" label="Name" />
+  </Event>
+
+  <Event name="ZStatisticsCounter" category="Java Virtual Machine, GC, Detailed" label="Z Statistics Counter" thread="true">
+    <Field type="ZStatisticsCounterType" name="id" label="Id" />
+    <Field type="ulong" name="increment" label="Increment" />
+    <Field type="ulong" name="value" label="Value" />
+  </Event>
+
+  <Event name="ZStatisticsSampler" category="Java Virtual Machine, GC, Detailed" label="Z Statistics Sampler" thread="true">
+    <Field type="ZStatisticsSamplerType" name="id" label="Id" />
+    <Field type="ulong" name="value" label="Value" />
+  </Event>
+
+  <Type name="ZStatisticsCounterType" label="Z Statistics Counter">
+    <Field type="string" name="counter" label="Counter" />
+  </Type>
+
+  <Type name="ZStatisticsSamplerType" label="Z Statistics Sampler">
+    <Field type="string" name="sampler" label="Sampler" />
+  </Type>
+
   <Type name="Thread" label="Thread">
     <Field type="string" name="osName" label="OS Thread Name" />
     <Field type="long" name="osThreadId" label="OS Thread Id" />
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -59,6 +59,9 @@
 #include "gc/g1/g1HeapRegionTraceType.hpp"
 #include "gc/g1/g1YCTypes.hpp"
 #endif
+#if INCLUDE_ZGC
+#include "gc/z/zStat.hpp"
+#endif
 
 // implementation for the static registration function exposed in the api
 bool JfrSerializer::register_serializer(JfrTypeId id, bool require_safepoint, bool permit_cache, JfrSerializer* cs) {
@@ -346,3 +349,27 @@
   writer.write(thread_group_id);
   JfrThreadGroup::serialize(&writer, thread_group_id);
 }
+
+void ZStatisticsCounterTypeConstant::serialize(JfrCheckpointWriter& writer) {
+#if INCLUDE_ZGC
+  writer.write_count(ZStatCounter::count());
+  for (ZStatCounter* counter = ZStatCounter::first(); counter != NULL; counter = counter->next()) {
+    writer.write_key(counter->id());
+    writer.write(counter->name());
+  }
+#else
+  writer.write_count(0);
+#endif
+}
+
+void ZStatisticsSamplerTypeConstant::serialize(JfrCheckpointWriter& writer) {
+#if INCLUDE_ZGC
+  writer.write_count(ZStatSampler::count());
+  for (ZStatSampler* sampler = ZStatSampler::first(); sampler != NULL; sampler = sampler->next()) {
+    writer.write_key(sampler->id());
+    writer.write(sampler->name());
+  }
+#else
+  writer.write_count(0);
+#endif
+}
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -135,4 +135,14 @@
   void serialize(JfrCheckpointWriter& writer);
 };
 
+class ZStatisticsCounterTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
+class ZStatisticsSamplerTypeConstant : public JfrSerializer {
+ public:
+  void serialize(JfrCheckpointWriter& writer);
+};
+
 #endif // SHARE_VM_JFR_CHECKPOINT_CONSTANT_JFRCONSTANT_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -195,7 +195,7 @@
 
 bool JfrTypeManager::initialize() {
   // register non-safepointing type serialization
-  for (size_t i = 0; i < 16; ++i) {
+  for (size_t i = 0; i < 18; ++i) {
     switch (i) {
     case 0: register_serializer(TYPE_FLAGVALUEORIGIN, false, true, new FlagValueOriginConstant()); break;
     case 1: register_serializer(TYPE_INFLATECAUSE, false, true, new MonitorInflateCauseConstant()); break;
@@ -213,6 +213,8 @@
     case 13: register_serializer(TYPE_CODEBLOBTYPE, false, true, new CodeBlobTypeConstant()); break;
     case 14: register_serializer(TYPE_VMOPERATIONTYPE, false, true, new VMOperationTypeConstant()); break;
     case 15: register_serializer(TYPE_THREADSTATE, false, true, new ThreadStateConstant()); break;
+    case 16: register_serializer(TYPE_ZSTATISTICSCOUNTERTYPE, false, true, new ZStatisticsCounterTypeConstant()); break;
+    case 17: register_serializer(TYPE_ZSTATISTICSSAMPLERTYPE, false, true, new ZStatisticsSamplerTypeConstant()); break;
     default:
       guarantee(false, "invariant");
     }
--- a/src/hotspot/share/logging/logPrefix.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/logging/logPrefix.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -62,9 +62,11 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, humongous)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ihop)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, liveness)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, load)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, marking)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, mmu)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, nmethod)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, ref)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \
@@ -75,6 +77,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset, tracking)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref, start)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, reloc)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, stringtable)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, sweep)) \
--- a/src/hotspot/share/logging/logTag.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/logging/logTag.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -62,6 +62,7 @@
   LOG_TAG(datacreation) \
   LOG_TAG(decoder) \
   LOG_TAG(defaultmethods) \
+  LOG_TAG(director) \
   LOG_TAG(dump) \
   LOG_TAG(ergo) \
   LOG_TAG(exceptions) \
@@ -105,6 +106,7 @@
   LOG_TAG(obsolete) \
   LOG_TAG(oom) \
   LOG_TAG(oopmap) \
+  LOG_TAG(oops) \
   LOG_TAG(oopstorage) \
   LOG_TAG(os) \
   LOG_TAG(pagesize) \
@@ -121,6 +123,7 @@
   LOG_TAG(redefine) \
   LOG_TAG(refine) \
   LOG_TAG(region) \
+  LOG_TAG(reloc) \
   LOG_TAG(remset) \
   LOG_TAG(purge) \
   LOG_TAG(resolve) \
--- a/src/hotspot/share/memory/metaspace.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/memory/metaspace.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -229,6 +229,7 @@
 // Manages the metaspace portion belonging to a class loader
 class ClassLoaderMetaspace : public CHeapObj<mtClass> {
   friend class CollectedHeap; // For expand_and_allocate()
+  friend class ZCollectedHeap; // For expand_and_allocate()
   friend class Metaspace;
   friend class MetaspaceUtils;
   friend class metaspace::PrintCLDMetaspaceInfoClosure;
--- a/src/hotspot/share/opto/classes.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/classes.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -47,11 +47,17 @@
 #include "opto/rootnode.hpp"
 #include "opto/subnode.hpp"
 #include "opto/vectornode.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ZGC
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif
 
 // ----------------------------------------------------------------------------
 // Build a table of virtual functions to map from Nodes to dense integer
 // opcode names.
 int Node::Opcode() const { return Op_Node; }
 #define macro(x) int x##Node::Opcode() const { return Op_##x; }
+#define optionalmacro(x)
 #include "classes.hpp"
 #undef macro
+#undef optionalmacro
--- a/src/hotspot/share/opto/classes.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/classes.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -22,6 +22,8 @@
  *
  */
 
+#include "utilities/macros.hpp"
+
 // The giant table of Node classes.
 // One entry per class, sorted by class name.
 
@@ -186,6 +188,14 @@
 macro(LoadN)
 macro(LoadRange)
 macro(LoadS)
+#if INCLUDE_ZGC
+#define zgcmacro(x) macro(x)
+#else
+#define zgcmacro(x) optionalmacro(x)
+#endif
+zgcmacro(LoadBarrier)
+zgcmacro(LoadBarrierSlowReg)
+zgcmacro(LoadBarrierWeakSlowReg)
 macro(Lock)
 macro(Loop)
 macro(LoopLimit)
--- a/src/hotspot/share/opto/compile.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/compile.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -75,9 +75,13 @@
 #include "runtime/timer.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
 #endif // INCLUDE_G1GC
+#if INCLUDE_ZGC
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif
 
 
 // -------------------- Compile::mach_constant_base_node -----------------------
@@ -2163,6 +2167,11 @@
 
 #endif
 
+#ifdef ASSERT
+  BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+  bs->verify_gc_barriers(true);
+#endif
+
   ResourceMark rm;
   int          loop_opts_cnt;
 
@@ -2335,6 +2344,12 @@
     }
   }
 
+#if INCLUDE_ZGC
+  if (UseZGC) {
+    ZBarrierSetC2::find_dominating_barriers(igvn);
+  }
+#endif
+
   if (failing())  return;
 
   // Ensure that major progress is now clear
@@ -2361,6 +2376,7 @@
   {
     TracePhase tp("macroExpand", &timers[_t_macroExpand]);
     PhaseMacroExpand  mex(igvn);
+    print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
     if (mex.expand_macro_nodes()) {
       assert(failing(), "must bail out w/ explicit message");
       return;
@@ -2890,6 +2906,10 @@
   case Op_LoadL_unaligned:
   case Op_LoadPLocked:
   case Op_LoadP:
+#if INCLUDE_ZGC
+  case Op_LoadBarrierSlowReg:
+  case Op_LoadBarrierWeakSlowReg:
+#endif
   case Op_LoadN:
   case Op_LoadRange:
   case Op_LoadS: {
--- a/src/hotspot/share/opto/compile.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/compile.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -54,6 +54,7 @@
 class ConnectionGraph;
 class InlineTree;
 class Int_Array;
+class LoadBarrierNode;
 class Matcher;
 class MachConstantNode;
 class MachConstantBaseNode;
@@ -359,9 +360,6 @@
   const char*           _stub_name;             // Name of stub or adapter being compiled, or NULL
   address               _stub_entry_point;      // Compile code entry for generated stub, or NULL
 
-  // For GC
-  void*                 _barrier_set_state;
-
   // Control of this compilation.
   int                   _num_loop_opts;         // Number of iterations for doing loop optimiztions
   int                   _max_inline_size;       // Max inline size for this compilation
@@ -410,6 +408,7 @@
 
   // Compilation environment.
   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
+  void*                 _barrier_set_state;     // Potential GC barrier state for Compile
   ciEnv*                _env;                   // CI interface
   DirectiveSet*         _directive;             // Compiler directive
   CompileLog*           _log;                   // from CompilerThread
--- a/src/hotspot/share/opto/escape.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/escape.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -38,9 +38,13 @@
 #include "opto/phaseX.hpp"
 #include "opto/movenode.hpp"
 #include "opto/rootnode.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_G1GC
 #include "gc/g1/g1ThreadLocalData.hpp"
 #endif // INCLUDE_G1GC
+#if INCLUDE_ZGC
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif
 
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
@@ -449,6 +453,10 @@
       break;
     }
     case Op_LoadP:
+#if INCLUDE_ZGC
+    case Op_LoadBarrierSlowReg:
+    case Op_LoadBarrierWeakSlowReg:
+#endif
     case Op_LoadN:
     case Op_LoadPLocked: {
       add_objload_to_connection_graph(n, delayed_worklist);
@@ -483,6 +491,13 @@
         add_local_var_and_edge(n, PointsToNode::NoEscape,
                                n->in(0), delayed_worklist);
       }
+#if INCLUDE_ZGC
+      else if (UseZGC) {
+        if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
+          add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
+        }
+      }
+#endif
       break;
     }
     case Op_Rethrow: // Exception object escapes
@@ -651,6 +666,10 @@
       break;
     }
     case Op_LoadP:
+#if INCLUDE_ZGC
+    case Op_LoadBarrierSlowReg:
+    case Op_LoadBarrierWeakSlowReg:
+#endif
     case Op_LoadN:
     case Op_LoadPLocked: {
       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
@@ -690,6 +709,14 @@
         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
         break;
       }
+#if INCLUDE_ZGC
+      else if (UseZGC) {
+        if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
+          add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
+          break;
+        }
+      }
+#endif
       ELSE_FAIL("Op_Proj");
     }
     case Op_Rethrow: // Exception object escapes
@@ -3163,7 +3190,8 @@
               op == Op_CastP2X || op == Op_StoreCM ||
               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
-              op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
+              op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
+              BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
           n->dump();
           use->dump();
           assert(false, "EA: missing allocation reference path");
--- a/src/hotspot/share/opto/idealKit.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/idealKit.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -488,13 +488,13 @@
 
 //----------------------------- make_call  ----------------------------
 // Trivial runtime call
-void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
-                              address slow_call,
-                              const char *leaf_name,
-                              Node* parm0,
-                              Node* parm1,
-                              Node* parm2,
-                              Node* parm3) {
+Node* IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
+                               address slow_call,
+                               const char *leaf_name,
+                               Node* parm0,
+                               Node* parm1,
+                               Node* parm2,
+                               Node* parm3) {
 
   // We only handle taking in RawMem and modifying RawMem
   const TypePtr* adr_type = TypeRawPtr::BOTTOM;
@@ -532,6 +532,12 @@
 
   assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type),
          "call node must be constructed correctly");
+  Node* res = NULL;
+  if (slow_call_type->range()->cnt() > TypeFunc::Parms) {
+    assert(slow_call_type->range()->cnt() == TypeFunc::Parms+1, "only one return value");
+    res = transform(new ProjNode(call, TypeFunc::Parms));
+  }
+  return res;
 }
 
 void IdealKit::make_leaf_call_no_fp(const TypeFunc *slow_call_type,
--- a/src/hotspot/share/opto/idealKit.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/idealKit.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -242,13 +242,13 @@
                 int adr_idx);
 
   // Trivial call
-  void make_leaf_call(const TypeFunc *slow_call_type,
-                      address slow_call,
-                      const char *leaf_name,
-                      Node* parm0,
-                      Node* parm1 = NULL,
-                      Node* parm2 = NULL,
-                      Node* parm3 = NULL);
+  Node* make_leaf_call(const TypeFunc *slow_call_type,
+                       address slow_call,
+                       const char *leaf_name,
+                       Node* parm0,
+                       Node* parm1 = NULL,
+                       Node* parm2 = NULL,
+                       Node* parm3 = NULL);
 
   void make_leaf_call_no_fp(const TypeFunc *slow_call_type,
                             address slow_call,
--- a/src/hotspot/share/opto/lcm.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/lcm.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -169,6 +169,8 @@
     case Op_LoadI:
     case Op_LoadL:
     case Op_LoadP:
+    case Op_LoadBarrierSlowReg:
+    case Op_LoadBarrierWeakSlowReg:
     case Op_LoadN:
     case Op_LoadS:
     case Op_LoadKlass:
--- a/src/hotspot/share/opto/loopnode.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/loopnode.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,8 @@
 #include "precompiled.hpp"
 #include "ci/ciMethodData.hpp"
 #include "compiler/compileLog.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/c2/barrierSetC2.hpp"
 #include "libadt/vectset.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
@@ -213,7 +215,8 @@
         if (nb_ctl_proj > 1) {
           break;
         }
-        assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call(), "unexpected node");
+        assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() ||
+               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node");
         assert(idom(ctl) == parent_ctl, "strange");
         next = idom(parent_ctl);
       }
@@ -2635,7 +2638,7 @@
 //----------------------------build_and_optimize-------------------------------
 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
-void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts) {
+void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts, bool last_round) {
   ResourceMark rm;
 
   int old_progress = C->major_progress();
@@ -2877,8 +2880,11 @@
   // that require basic-block info (like cloning through Phi's)
   if( SplitIfBlocks && do_split_ifs ) {
     visited.Clear();
-    split_if_with_blocks( visited, nstack );
+    split_if_with_blocks( visited, nstack, last_round );
     NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
+    if (last_round) {
+      C->set_major_progress();
+    }
   }
 
   if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
@@ -4131,6 +4137,8 @@
     case Op_LoadL:
     case Op_LoadS:
     case Op_LoadP:
+    case Op_LoadBarrierSlowReg:
+    case Op_LoadBarrierWeakSlowReg:
     case Op_LoadN:
     case Op_LoadRange:
     case Op_LoadD_unaligned:
--- a/src/hotspot/share/opto/loopnode.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/loopnode.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -912,7 +912,7 @@
   }
 
   // build the loop tree and perform any requested optimizations
-  void build_and_optimize(bool do_split_if, bool skip_loop_opts);
+  void build_and_optimize(bool do_split_if, bool skip_loop_opts, bool last_round = false);
 
   // Dominators for the sea of nodes
   void Dominators();
@@ -922,13 +922,13 @@
   Node *dom_lca_internal( Node *n1, Node *n2 ) const;
 
   // Compute the Ideal Node to Loop mapping
-  PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false) :
+  PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false, bool last_round = false) :
     PhaseTransform(Ideal_Loop),
     _igvn(igvn),
     _dom_lca_tags(arena()), // Thread::resource_area
     _verify_me(NULL),
     _verify_only(false) {
-    build_and_optimize(do_split_ifs, skip_loop_opts);
+    build_and_optimize(do_split_ifs, skip_loop_opts, last_round);
   }
 
   // Verify that verify_me made the same decisions as a fresh run.
@@ -1227,9 +1227,9 @@
 
   // Check for aggressive application of 'split-if' optimization,
   // using basic block level info.
-  void  split_if_with_blocks     ( VectorSet &visited, Node_Stack &nstack );
+  void  split_if_with_blocks     ( VectorSet &visited, Node_Stack &nstack, bool last_round );
   Node *split_if_with_blocks_pre ( Node *n );
-  void  split_if_with_blocks_post( Node *n );
+  void  split_if_with_blocks_post( Node *n, bool last_round );
   Node *has_local_phi_input( Node *n );
   // Mark an IfNode as being dominated by a prior test,
   // without actually altering the CFG (and hence IDOM info).
--- a/src/hotspot/share/opto/loopopts.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/loopopts.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -40,6 +40,10 @@
 #include "opto/opaquenode.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/subnode.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ZGC
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif
 
 //=============================================================================
 //------------------------------split_thru_phi---------------------------------
@@ -1126,11 +1130,11 @@
 // Do the real work in a non-recursive function.  CFG hackery wants to be
 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
 // info.
-void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
+void PhaseIdealLoop::split_if_with_blocks_post(Node *n, bool last_round) {
 
   // Cloning Cmp through Phi's involves the split-if transform.
   // FastLock is not used by an If
-  if (n->is_Cmp() && !n->is_FastLock()) {
+  if (n->is_Cmp() && !n->is_FastLock() && !last_round) {
     Node *n_ctrl = get_ctrl(n);
     // Determine if the Node has inputs from some local Phi.
     // Returns the block to clone thru.
@@ -1377,12 +1381,18 @@
       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
     _igvn.replace_node( n, n->in(1) );
   }
+
+#if INCLUDE_ZGC
+  if (UseZGC) {
+    ZBarrierSetC2::loop_optimize_gc_barrier(this, n, last_round);
+  }
+#endif
 }
 
 //------------------------------split_if_with_blocks---------------------------
 // Check for aggressive application of 'split-if' optimization,
 // using basic block level info.
-void PhaseIdealLoop::split_if_with_blocks( VectorSet &visited, Node_Stack &nstack ) {
+void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack, bool last_round) {
   Node *n = C->root();
   visited.set(n->_idx); // first, mark node as visited
   // Do pre-visit work for root
@@ -1407,7 +1417,7 @@
       // All of n's children have been processed, complete post-processing.
       if (cnt != 0 && !n->is_Con()) {
         assert(has_node(n), "no dead nodes");
-        split_if_with_blocks_post( n );
+        split_if_with_blocks_post( n, last_round );
       }
       if (nstack.is_empty()) {
         // Finished all nodes on stack.
--- a/src/hotspot/share/opto/macro.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/macro.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -2574,7 +2574,9 @@
         assert(n->Opcode() == Op_LoopLimit ||
                n->Opcode() == Op_Opaque1   ||
                n->Opcode() == Op_Opaque2   ||
-               n->Opcode() == Op_Opaque3, "unknown node type in macro list");
+               n->Opcode() == Op_Opaque3   ||
+               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
+               "unknown node type in macro list");
       }
       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
       progress = progress || success;
@@ -2656,7 +2658,7 @@
   while (macro_idx >= 0) {
     Node * n = C->macro_node(macro_idx);
     assert(n->is_macro(), "only macro nodes expected here");
-    if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
+    if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
       // node is unreachable, so don't try to expand it
       C->remove_macro_node(n);
     } else if (n->is_ArrayCopy()){
@@ -2674,7 +2676,7 @@
     int macro_count = C->macro_count();
     Node * n = C->macro_node(macro_count-1);
     assert(n->is_macro(), "only macro nodes expected here");
-    if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
+    if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
       // node is unreachable, so don't try to expand it
       C->remove_macro_node(n);
       continue;
--- a/src/hotspot/share/opto/matcher.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/matcher.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -41,6 +41,9 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/align.hpp"
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSetRuntime.hpp"
+#endif // INCLUDE_ZGC
 
 OptoReg::Name OptoReg::c_frame_pointer;
 
@@ -2062,6 +2065,7 @@
       mstack.set_state(Post_Visit);
       set_visited(n);   // Flag as visited now
       bool mem_op = false;
+      int mem_addr_idx = MemNode::Address;
 
       switch( nop ) {  // Handle some opcodes special
       case Op_Phi:             // Treat Phis as shared roots
@@ -2150,6 +2154,17 @@
       case Op_SafePoint:
         mem_op = true;
         break;
+#if INCLUDE_ZGC
+      case Op_CallLeaf:
+        if (UseZGC) {
+          if (n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() ||
+              n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr()) {
+            mem_op = true;
+            mem_addr_idx = TypeFunc::Parms+1;
+          }
+          break;
+        }
+#endif
       default:
         if( n->is_Store() ) {
           // Do match stores, despite no ideal reg
@@ -2199,7 +2214,7 @@
 #endif
 
         // Clone addressing expressions as they are "free" in memory access instructions
-        if (mem_op && i == MemNode::Address && mop == Op_AddP &&
+        if (mem_op && i == mem_addr_idx && mop == Op_AddP &&
             // When there are other uses besides address expressions
             // put it on stack and mark as shared.
             !is_visited(m)) {
--- a/src/hotspot/share/opto/memnode.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/memnode.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -44,7 +44,11 @@
 #include "opto/regmask.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
+#if INCLUDE_ZGC
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#endif
 
 // Portions of code courtesy of Clifford Click
 
@@ -891,6 +895,14 @@
 // a load node that reads from the source array so we may be able to
 // optimize out the ArrayCopy node later.
 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
+#if INCLUDE_ZGC
+  if (UseZGC) {
+    if (bottom_type()->make_oopptr() != NULL) {
+      return NULL;
+    }
+  }
+#endif
+
   Node* ld_adr = in(MemNode::Address);
   intptr_t ld_off = 0;
   AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
@@ -1574,7 +1586,7 @@
   // Is there a dominating load that loads the same value?  Leave
   // anything that is not a load of a field/array element (like
   // barriers etc.) alone
-  if (in(0) != NULL && adr_type() != TypeRawPtr::BOTTOM && can_reshape) {
+  if (in(0) != NULL && !adr_type()->isa_rawptr() && can_reshape) {
     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
       Node *use = mem->fast_out(i);
       if (use != this &&
@@ -2968,6 +2980,16 @@
     return NULL;
   }
 
+#if INCLUDE_ZGC
+  if (UseZGC) {
+    if (req() == (Precedent+1) && in(MemBarNode::Precedent)->in(0) != NULL && in(MemBarNode::Precedent)->in(0)->is_LoadBarrier()) {
+      Node* load_node = in(MemBarNode::Precedent)->in(0)->in(LoadBarrierNode::Oop);
+      set_req(MemBarNode::Precedent, load_node);
+      return this;
+    }
+  }
+#endif
+
   bool progress = false;
   // Eliminate volatile MemBars for scalar replaced objects.
   if (can_reshape && req() == (Precedent+1)) {
--- a/src/hotspot/share/opto/node.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/node.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -1130,7 +1130,7 @@
   if (this->is_Store()) {
     // Condition for back-to-back stores folding.
     return n->Opcode() == op && n->in(MemNode::Memory) == this;
-  } else if (this->is_Load() || this->is_DecodeN()) {
+  } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
     // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
     return n->Opcode() == Op_MemBarAcquire;
   } else if (op == Op_AddL) {
--- a/src/hotspot/share/opto/node.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/node.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -80,6 +80,9 @@
 class JumpNode;
 class JumpProjNode;
 class LoadNode;
+class LoadBarrierNode;
+class LoadBarrierSlowRegNode;
+class LoadBarrierWeakSlowRegNode;
 class LoadStoreNode;
 class LockNode;
 class LoopNode;
@@ -634,6 +637,7 @@
       DEFINE_CLASS_ID(MemBar,      Multi, 3)
         DEFINE_CLASS_ID(Initialize,       MemBar, 0)
         DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
+      DEFINE_CLASS_ID(LoadBarrier, Multi, 4)
 
     DEFINE_CLASS_ID(Mach,  Node, 1)
       DEFINE_CLASS_ID(MachReturn, Mach, 0)
@@ -680,6 +684,8 @@
     DEFINE_CLASS_ID(Mem,   Node, 4)
       DEFINE_CLASS_ID(Load,  Mem, 0)
         DEFINE_CLASS_ID(LoadVector,  Load, 0)
+          DEFINE_CLASS_ID(LoadBarrierSlowReg, Load, 1)
+          DEFINE_CLASS_ID(LoadBarrierWeakSlowReg, Load, 2)
       DEFINE_CLASS_ID(Store, Mem, 1)
         DEFINE_CLASS_ID(StoreVector, Store, 0)
       DEFINE_CLASS_ID(LoadStore, Mem, 2)
@@ -819,6 +825,9 @@
   DEFINE_CLASS_QUERY(JumpProj)
   DEFINE_CLASS_QUERY(Load)
   DEFINE_CLASS_QUERY(LoadStore)
+  DEFINE_CLASS_QUERY(LoadBarrier)
+  DEFINE_CLASS_QUERY(LoadBarrierSlowReg)
+  DEFINE_CLASS_QUERY(LoadBarrierWeakSlowReg)
   DEFINE_CLASS_QUERY(Lock)
   DEFINE_CLASS_QUERY(Loop)
   DEFINE_CLASS_QUERY(Mach)
--- a/src/hotspot/share/opto/opcodes.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/opcodes.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -28,6 +28,7 @@
 // Build a table of class names as strings.  Used both for debugging printouts
 // and in the ADL machine descriptions.
 #define macro(x) #x,
+#define optionalmacro(x) macro(x)
 const char *NodeClassNames[] = {
   "Node",
   "Set",
@@ -48,3 +49,4 @@
   "_last_class_name",
 };
 #undef macro
+#undef optionalmacro
--- a/src/hotspot/share/opto/opcodes.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/opcodes.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -27,6 +27,7 @@
 
 // Build a big enum of class names to give them dense integer indices
 #define macro(x) Op_##x,
+#define optionalmacro(x) macro(x)
 enum Opcodes {
   Op_Node = 0,
   macro(Set)                    // Instruction selection match rule
@@ -47,6 +48,7 @@
   _last_opcode
 };
 #undef macro
+#undef optionalmacro
 
 // Table of names, indexed by Opcode
 extern const char *NodeClassNames[];
--- a/src/hotspot/share/opto/phasetype.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/phasetype.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -52,6 +52,7 @@
   PHASE_MATCHING,
   PHASE_INCREMENTAL_INLINE,
   PHASE_INCREMENTAL_BOXING_INLINE,
+  PHASE_BEFORE_MACRO_EXPANSION,
   PHASE_END,
   PHASE_FAILURE,
 
@@ -88,6 +89,7 @@
       case PHASE_MATCHING:                   return "After matching";
       case PHASE_INCREMENTAL_INLINE:         return "Incremental Inline";
       case PHASE_INCREMENTAL_BOXING_INLINE:  return "Incremental Boxing Inline";
+      case PHASE_BEFORE_MACRO_EXPANSION:     return "Before macro expansion";
       case PHASE_END:                        return "End";
       case PHASE_FAILURE:                    return "Failure";
       default:
--- a/src/hotspot/share/opto/vectornode.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/opto/vectornode.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -252,6 +252,8 @@
   case Op_LoadI:   case Op_LoadL:
   case Op_LoadF:   case Op_LoadD:
   case Op_LoadP:   case Op_LoadN:
+  case Op_LoadBarrierSlowReg:
+  case Op_LoadBarrierWeakSlowReg:
     *start = 0;
     *end   = 0; // no vector operands
     break;
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -59,6 +59,9 @@
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "utilities/macros.hpp"
+#if INCLUDE_ZGC
+#include "gc/z/zGlobals.hpp"
+#endif
 
 // JvmtiTagHashmapEntry
 //
@@ -178,6 +181,8 @@
 
   // hash a given key (oop) with the specified size
   static unsigned int hash(oop key, int size) {
+    ZGC_ONLY(assert(ZAddressMetadataShift >= sizeof(unsigned int) * BitsPerByte, "cast removes the metadata bits");)
+
     // shift right to get better distribution (as these bits will be zero
     // with aligned addresses)
     unsigned int addr = (unsigned int)(cast_from_oop<intptr_t>(key));
--- a/src/hotspot/share/prims/whitebox.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/prims/whitebox.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -347,7 +347,12 @@
     ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
     return !psh->is_in_young(p);
   }
-#endif // INCLUDE_PARALLELGC
+#endif
+#if INCLUDE_ZGC
+  if (UseZGC) {
+    return Universe::heap()->is_in(p);
+  }
+#endif
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   return !gch->is_in_young(p);
 WB_END
--- a/src/hotspot/share/runtime/jniHandles.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -327,7 +327,11 @@
   VerifyJNIHandles verify_handle;
 
   oops_do(&verify_handle);
-  weak_oops_do(&verify_handle);
+
+  // JNI weaks are handled concurrently in ZGC, so they can't be verified here
+  if (!UseZGC) {
+    weak_oops_do(&verify_handle);
+  }
 }
 
 // This method is implemented here to avoid circular includes between
--- a/src/hotspot/share/runtime/stackValue.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/runtime/stackValue.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -29,6 +29,9 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/stackValue.hpp"
+#if INCLUDE_ZGC
+#include "gc/z/zBarrier.inline.hpp"
+#endif
 
 StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv) {
   if (sv->is_location()) {
@@ -119,6 +122,13 @@
          val = (oop)NULL;
       }
 #endif
+#if INCLUDE_ZGC
+      // Deoptimization must make sure all oop have passed load barrier
+      if (UseZGC) {
+        val = ZBarrier::load_barrier_on_oop_field_preloaded((oop*)value_addr, val);
+      }
+#endif
+
       Handle h(Thread::current(), val); // Wrap a handle around the oop
       return new StackValue(h);
     }
--- a/src/hotspot/share/runtime/vmStructs.cpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -1271,6 +1271,7 @@
   declare_integer_type(intptr_t)                                          \
   declare_unsigned_integer_type(uintx)                                    \
   declare_unsigned_integer_type(uintptr_t)                                \
+  declare_unsigned_integer_type(uint8_t)                                  \
   declare_unsigned_integer_type(uint32_t)                                 \
   declare_unsigned_integer_type(uint64_t)                                 \
                                                                           \
@@ -2602,6 +2603,12 @@
 
 #define VM_LONG_CONSTANTS(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
                                                                           \
+  /****************/                                                      \
+  /* GC constants */                                                      \
+  /****************/                                                      \
+                                                                          \
+  VM_LONG_CONSTANTS_GC(declare_constant)                                  \
+                                                                          \
   /*********************/                                                 \
   /* MarkOop constants */                                                 \
   /*********************/                                                 \
--- a/src/hotspot/share/runtime/vm_operations.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/runtime/vm_operations.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -69,6 +69,7 @@
   template(CMS_Final_Remark)                      \
   template(G1CollectForAllocation)                \
   template(G1CollectFull)                         \
+  template(ZOperation)                            \
   template(HandshakeOneThread)                    \
   template(HandshakeAllThreads)                   \
   template(HandshakeFallback)                     \
--- a/src/hotspot/share/utilities/macros.hpp	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/hotspot/share/utilities/macros.hpp	Tue Jun 12 17:40:28 2018 +0200
@@ -221,7 +221,25 @@
 #define NOT_SERIALGC_RETURN_(code) { return code; }
 #endif // INCLUDE_SERIALGC
 
-#if INCLUDE_CMSGC || INCLUDE_EPSILONGC || INCLUDE_G1GC || INCLUDE_PARALLELGC
+#ifndef INCLUDE_ZGC
+#define INCLUDE_ZGC 1
+#endif // INCLUDE_ZGC
+
+#if INCLUDE_ZGC
+#define ZGC_ONLY(x) x
+#define ZGC_ONLY_ARG(arg) arg,
+#define NOT_ZGC(x)
+#define NOT_ZGC_RETURN        /* next token must be ; */
+#define NOT_ZGC_RETURN_(code) /* next token must be ; */
+#else
+#define ZGC_ONLY(x)
+#define ZGC_ONLY_ARG(arg)
+#define NOT_ZGC(x) x
+#define NOT_ZGC_RETURN        {}
+#define NOT_ZGC_RETURN_(code) { return code; }
+#endif // INCLUDE_ZGC
+
+#if INCLUDE_CMSGC || INCLUDE_EPSILONGC || INCLUDE_G1GC || INCLUDE_PARALLELGC || INCLUDE_ZGC
 #define INCLUDE_NOT_ONLY_SERIALGC 1
 #else
 #define INCLUDE_NOT_ONLY_SERIALGC 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/java.base/share/legal/c-libutl.md	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,35 @@
+## c-libutl 20160225
+
+### c-libutl License
+```
+
+This software is distributed under the terms of the BSD license.
+
+==  BSD LICENSE  ===============================================================
+
+ (C) 2009 by Remo Dentato (rdentato@gmail.com)
+
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+http://opensource.org/licenses/bsd-license.php
+
+```
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java	Tue Jun 12 17:40:28 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
 import sun.jvm.hotspot.gc.parallel.*;
 import sun.jvm.hotspot.gc.shared.*;
 import sun.jvm.hotspot.gc.g1.*;
+import sun.jvm.hotspot.gc.z.*;
 import sun.jvm.hotspot.interpreter.*;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.oops.*;
@@ -1112,6 +1113,10 @@
                         } else if (collHeap instanceof EpsilonHeap) {
                           anno = "Epsilon ";
                           bad = false;
+                        } else if (collHeap instanceof ZCollectedHeap) {
+                          ZCollectedHeap heap = (ZCollectedHeap) collHeap;
+                          anno = "ZHeap ";
+                          bad = false;
                         } else {
                           // Optimistically assume the oop isn't bad
                           anno = "[Unknown generation] ";
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeap.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeap.java	Tue Jun 12 17:40:28 2018 +0200
@@ -75,6 +75,14 @@
 
   public abstract CollectedHeapName kind();
 
+  public String oopAddressDescription(OopHandle handle) {
+      return handle.toString();
+  }
+
+  public OopHandle oop_load_at(OopHandle handle, long offset) {
+      return handle.getOopHandleAt(offset);
+  }
+
   public void print() { printOn(System.out); }
   public void printOn(PrintStream tty) {
     MemRegion mr = reservedRegion();
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/CollectedHeapName.java	Tue Jun 12 17:40:28 2018 +0200
@@ -36,6 +36,7 @@
   public static final CollectedHeapName CMS = new CollectedHeapName("CMS");
   public static final CollectedHeapName G1 = new CollectedHeapName("G1");
   public static final CollectedHeapName EPSILON = new CollectedHeapName("Epsilon");
+  public static final CollectedHeapName Z = new CollectedHeapName("Z");
 
   public String toString() {
     return name;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java	Tue Jun 12 17:40:28 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,6 +61,12 @@
 
   _dcmd_gc_run ("Diagnostic Command"),
 
+  _z_timer ("Timer"),
+  _z_warmup ("Warmup"),
+  _z_allocation_rate ("Allocation Rate"),
+  _z_allocation_stall ("Allocation Stall"),
+  _z_proactive ("Proactive"),
+
   _last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE");
 
   private final String value;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Tue Jun 12 17:40:28 2018 +0200
@@ -37,6 +37,8 @@
   ConcurrentMarkSweep ("ConcurrentMarkSweep"),
   G1Old ("G1Old"),
   G1Full ("G1Full"),
+  Z ("Z"),
+  NA ("N/A"),
   GCNameEndSentinel ("GCNameEndSentinel");
 
   private final String value;
@@ -48,4 +50,3 @@
     return value;
   }
 }
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddress.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+
+class ZAddress {
+    static long as_long(Address value) {
+        if (value == null) {
+            return 0;
+        }
+        return value.asLongValue();
+    };
+
+    static boolean is_null(Address value) {
+        return value == null;
+    }
+
+    static boolean is_weak_bad(Address value) {
+        return (as_long(value) & ZGlobals.ZAddressWeakBadMask()) != 0;
+    }
+
+    static boolean is_weak_good(Address value) {
+        return !is_weak_bad(value) && !is_null(value);
+    }
+
+    static boolean is_weak_good_or_null(Address value) {
+        return !is_weak_bad(value);
+    }
+
+    static long offset(Address address) {
+        return as_long(address) & ZGlobals.ZAddressOffsetMask;
+    }
+
+    static Address address(long value) {
+        VM vm = VM.getVM();
+        if (vm.getOS().equals("solaris") && vm.getCPU().equals("sparc")) {
+            value |= ZGlobals.ZAddressSpaceStart;
+        }
+
+        return ZOop.to_address(value);
+    }
+
+    static Address good(Address value) {
+        return address(offset(value) | ZGlobals.ZAddressGoodMask());
+    }
+
+    static Address good_or_null(Address value) {
+        return is_null(value) ? value : good(value);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddressRangeMapForPageTable.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class ZAddressRangeMapForPageTable  extends VMObject {
+    private static AddressField mapField;
+
+    private static long AddressRangeShift = ZGlobals.ZPageSizeMinShift;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZAddressRangeMapForPageTable");
+
+        mapField = type.getAddressField("_map");
+    }
+
+    public ZAddressRangeMapForPageTable(Address addr) {
+        super(addr);
+    }
+
+    private Address map() {
+        return mapField.getValue(addr);
+    }
+
+    private long index_for_addr(Address addr) {
+        long index = ZAddress.offset(addr) >> AddressRangeShift;
+
+        return index;
+    }
+
+    Address get(Address addr) {
+        long index = index_for_addr(addr);
+
+        return map().getAddressAt(index * VM.getVM().getBytesPerLong());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZBarrier.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+
+class ZBarrier {
+    private static boolean is_weak_good_or_null_fast_path(Address addr) {
+        return ZAddress.is_weak_good_or_null(addr);
+    }
+
+    private static Address weak_load_barrier_on_oop_slow_path(Address addr) {
+        return ZAddress.is_weak_good(addr) ? ZAddress.good(addr) : relocate_or_remap(addr);
+    }
+
+    private static boolean during_relocate() {
+        return ZGlobals.ZGlobalPhase() == ZGlobals.ZPhaseRelocate;
+    }
+
+    private static Address relocate(Address addr) {
+        ZHeap heap = zheap();
+        if (heap.is_relocating(addr)) {
+            // Forward
+            return heap.relocate_object(addr);
+        }
+
+        // Remap
+        return ZAddress.good(addr);
+    }
+
+    private static ZHeap zheap() {
+        ZCollectedHeap zCollectedHeap = (ZCollectedHeap)VM.getVM().getUniverse().heap();
+        return zCollectedHeap.heap();
+    }
+
+    private static Address remap(Address addr) {
+        ZHeap heap = zheap();
+        if (heap.is_relocating(addr)) {
+            // Forward
+            return heap.forward_object(addr);
+        }
+
+        // Remap
+        return ZAddress.good(addr);
+    }
+
+    private static Address relocate_or_remap(Address addr) {
+        return during_relocate() ? relocate(addr) : remap(addr);
+    }
+
+    static Address weak_barrier(Address o) {
+        // Fast path
+        if (is_weak_good_or_null_fast_path(o)) {
+            // Return the good address instead of the weak good address
+            // to ensure that the currently active heap view is used.
+            return ZAddress.good_or_null(o);
+        }
+
+        // Slow path
+        return weak_load_barrier_on_oop_slow_path(o);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZCollectedHeap.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import java.io.PrintStream;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.OopHandle;
+import sun.jvm.hotspot.gc.shared.CollectedHeap;
+import sun.jvm.hotspot.gc.shared.CollectedHeapName;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for ZCollectedHeap.
+
+public class ZCollectedHeap extends CollectedHeap {
+
+    private static long zHeapFieldOffset;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    private static synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZCollectedHeap");
+
+        zHeapFieldOffset = type.getAddressField("_heap").getOffset();
+    }
+
+    public ZHeap heap() {
+        Address heapAddr = addr.addOffsetTo(zHeapFieldOffset);
+        return (ZHeap)VMObjectFactory.newObject(ZHeap.class, heapAddr);
+    }
+
+    @Override
+    public CollectedHeapName kind() {
+        return CollectedHeapName.Z;
+    }
+
+    @Override
+    public void printOn(PrintStream tty) {
+        heap().printOn(tty);
+    }
+
+    public ZCollectedHeap(Address addr) {
+        super(addr);
+    }
+
+    public OopHandle oop_load_at(OopHandle handle, long offset) {
+        assert(!VM.getVM().isCompressedOopsEnabled());
+
+        Address oopAddress = handle.getAddressAt(offset);
+
+        oopAddress = ZBarrier.weak_barrier(oopAddress);
+        if (oopAddress == null) {
+            return null;
+        }
+
+        return oopAddress.addOffsetToAsOopHandle(0);
+    }
+
+    public String oopAddressDescription(OopHandle handle) {
+        Address origOop = ZOop.to_address(handle);
+        Address loadBarrieredOop = ZBarrier.weak_barrier(origOop);
+        if (!origOop.equals(loadBarrieredOop)) {
+            return origOop + " (" + loadBarrieredOop.toString() + ")";
+        } else {
+            return handle.toString();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTable.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class ZForwardingTable extends VMObject {
+    private static AddressField tableField;
+    private static CIntegerField sizeField;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZForwardingTable");
+
+        tableField = type.getAddressField("_table");
+        sizeField = type.getCIntegerField("_size");
+    }
+
+    public ZForwardingTable(Address addr) {
+        super(addr);
+    }
+
+    Address table() {
+        return tableField.getAddress(addr);
+    }
+
+    long size() {
+        return sizeField.getJLong(addr);
+    }
+
+    ZForwardingTableEntry at(ZForwardingTableCursor cursor) {
+        return new ZForwardingTableEntry(table().getAddressAt(cursor._value * VM.getVM().getBytesPerLong()));
+    }
+
+    ZForwardingTableEntry first(long from_index, ZForwardingTableCursor cursor) {
+        long mask = size() - 1;
+        long hash = ZHash.uint32_to_uint32(from_index);
+        cursor._value = hash & mask;
+        return at(cursor);
+    }
+
+    ZForwardingTableEntry next(ZForwardingTableCursor cursor) {
+        long mask = size() - 1;
+        cursor._value = (cursor._value + 1) & mask;
+        return at(cursor);
+    }
+
+    ZForwardingTableEntry find(long from_index, ZForwardingTableCursor cursor) {
+        // Reading entries in the table races with the atomic cas done for
+        // insertion into the table. This is safe because each entry is at
+        // most updated once (from -1 to something else).
+        ZForwardingTableEntry entry = first(from_index, cursor);
+        while (!entry.is_empty()) {
+            if (entry.from_index() == from_index) {
+                // Match found, return matching entry
+                return entry;
+            }
+
+            entry = next(cursor);
+        }
+
+        // Match not found, return empty entry
+        return entry;
+    }
+
+    ZForwardingTableEntry find(long from_index) {
+        ZForwardingTableCursor dummy = new ZForwardingTableCursor();
+        return find(from_index, dummy);
+    }
+
+    void dump() {
+        long s = size();
+        long count = 0;
+        System.out.println("Dumping ZForwardingTable[" + s + "]:");
+        ZForwardingTableCursor cursor = new ZForwardingTableCursor();
+        for (long i = 0; i < s; i++) {
+            cursor._value = i;
+            ZForwardingTableEntry entry = at(cursor);
+            if (!entry.is_empty()) {
+                long hash = ZHash.uint32_to_uint32(entry.from_index());
+                System.out.println(i + " " + count + " " + entry + " hash: " + hash + " masked_hash: " + (hash & (s - 1)));
+                count++;
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTableCursor.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+class ZForwardingTableCursor {
+    long _value;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZForwardingTableEntry.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+
+class ZForwardingTableEntry {
+    private Address entry;
+
+    ZForwardingTableEntry(Address addr) {
+        entry = addr;
+    }
+
+    private static long empty() {
+        return ~0L;
+    }
+
+    boolean is_empty() {
+        return entry.asLongValue() == empty();
+    }
+
+    Address to_offset() {
+        return entry.andWithMask((1L << 42) - 1);
+    }
+
+    long from_index() {
+        return entry.asLongValue() >>> 42;
+    }
+
+    public String toString() {
+        return entry + " - from_index: " + from_index() + " to_offset: " + to_offset();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.Field;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class ZGlobals {
+    private static Field instanceField;
+
+    // Global phase state
+    public static int ZPhaseRelocate;
+
+    public static byte ZPageTypeSmall;
+    public static byte ZPageTypeMedium;
+    public static byte ZPageTypeLarge;
+
+    // Page size shifts
+    public static long ZPageSizeSmallShift;
+    public static long ZPageSizeMediumShift;
+    public static long ZPageSizeMinShift;
+
+    // Object alignment shifts
+    public static int  ZObjectAlignmentMediumShift;
+    public static int  ZObjectAlignmentLargeShift;
+
+    // Pointer part of address
+    public static long ZAddressOffsetShift;
+
+    // Pointer part of address
+    public static long ZAddressOffsetBits;
+    public static long ZAddressOffsetMask;
+
+    // Address space start/end/size
+    public static long ZAddressSpaceStart;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZGlobalsForVMStructs");
+
+        instanceField = type.getField("_instance_p");
+
+        ZPhaseRelocate = db.lookupIntConstant("ZPhaseRelocate").intValue();
+
+        ZPageTypeSmall = db.lookupIntConstant("ZPageTypeSmall").byteValue();
+        ZPageTypeMedium = db.lookupIntConstant("ZPageTypeMedium").byteValue();
+        ZPageTypeLarge = db.lookupIntConstant("ZPageTypeLarge").byteValue();
+
+        ZPageSizeSmallShift = db.lookupLongConstant("ZPageSizeSmallShift").longValue();
+        ZPageSizeMediumShift = db.lookupLongConstant("ZPageSizeMediumShift").longValue();
+        ZPageSizeMinShift = db.lookupLongConstant("ZPageSizeMinShift").longValue();
+
+        ZObjectAlignmentMediumShift = db.lookupIntConstant("ZObjectAlignmentMediumShift").intValue();
+        ZObjectAlignmentLargeShift = db.lookupIntConstant("ZObjectAlignmentLargeShift").intValue();;
+
+        ZAddressOffsetShift = db.lookupLongConstant("ZAddressOffsetShift").longValue();
+
+        ZAddressOffsetBits = db.lookupLongConstant("ZAddressOffsetBits").longValue();
+        ZAddressOffsetMask = db.lookupLongConstant("ZAddressOffsetMask").longValue();
+
+        ZAddressSpaceStart = db.lookupLongConstant("ZAddressSpaceStart").longValue();
+    }
+
+    private static ZGlobalsForVMStructs instance() {
+        return new ZGlobalsForVMStructs(instanceField.getAddress());
+    }
+
+    public static int ZGlobalPhase() {
+        return instance().ZGlobalPhase();
+    }
+
+    public static long ZAddressGoodMask() {
+        return instance().ZAddressGoodMask();
+    }
+
+    public static long ZAddressBadMask() {
+        return instance().ZAddressBadMask();
+    }
+
+    public static long ZAddressWeakBadMask() {
+        return instance().ZAddressWeakBadMask();
+    }
+
+    public static int ZObjectAlignmentSmallShift() {
+        return instance().ZObjectAlignmentSmallShift();
+    }
+
+    public static int ZObjectAlignmentSmall() {
+        return instance().ZObjectAlignmentSmall();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobalsForVMStructs.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+class ZGlobalsForVMStructs extends VMObject {
+    private static AddressField ZGlobalPhaseField;
+    private static AddressField ZAddressGoodMaskField;
+    private static AddressField ZAddressBadMaskField;
+    private static AddressField ZAddressWeakBadMaskField;
+    private static AddressField ZObjectAlignmentSmallShiftField;
+    private static AddressField ZObjectAlignmentSmallField;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZGlobalsForVMStructs");
+
+        ZGlobalPhaseField = type.getAddressField("_ZGlobalPhase");
+        ZAddressGoodMaskField = type.getAddressField("_ZAddressGoodMask");
+        ZAddressBadMaskField = type.getAddressField("_ZAddressBadMask");
+        ZAddressWeakBadMaskField = type.getAddressField("_ZAddressWeakBadMask");
+        ZObjectAlignmentSmallShiftField = type.getAddressField("_ZObjectAlignmentSmallShift");
+        ZObjectAlignmentSmallField = type.getAddressField("_ZObjectAlignmentSmall");
+    }
+
+    ZGlobalsForVMStructs(Address addr) {
+        super(addr);
+    }
+
+    int ZGlobalPhase() {
+        return ZGlobalPhaseField.getValue(addr).getJIntAt(0);
+    }
+
+    long ZAddressGoodMask() {
+        return ZAddressGoodMaskField.getValue(addr).getJLongAt(0);
+    }
+
+    long ZAddressBadMask() {
+        return ZAddressBadMaskField.getValue(addr).getJLongAt(0);
+    }
+
+    long ZAddressWeakBadMask() {
+        return ZAddressWeakBadMaskField.getValue(addr).getJLongAt(0);
+    }
+
+    int ZObjectAlignmentSmallShift() {
+        return ZObjectAlignmentSmallShiftField.getValue(addr).getJIntAt(0);
+    }
+
+    int ZObjectAlignmentSmall() {
+        return ZObjectAlignmentSmallField.getValue(addr).getJIntAt(0);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZHash.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+class ZHash {
+    private static long uint32(long value) {
+        return value & 0xFFFFFFFFL;
+    }
+
+    static long uint32_to_uint32(long key) {
+        key = uint32(~key + (key << 15));
+        key = uint32(key ^ (key >> 12));
+        key = uint32(key + (key << 2));
+        key = uint32(key ^ (key >> 4));
+        key = uint32(key * 2057);
+        key = uint32(key ^ (key >> 16));
+        return key;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZHeap.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import java.io.PrintStream;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for ZHeap
+
+public class ZHeap extends VMObject {
+
+    private static long pageAllocatorFieldOffset;
+    private static long pageTableFieldOffset;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    private static synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZHeap");
+
+        pageAllocatorFieldOffset = type.getAddressField("_page_allocator").getOffset();
+        pageTableFieldOffset = type.getAddressField("_pagetable").getOffset();
+    }
+
+    public ZHeap(Address addr) {
+        super(addr);
+    }
+
+    private ZPageAllocator pageAllocator() {
+        Address pageAllocatorAddr = addr.addOffsetTo(pageAllocatorFieldOffset);
+        return (ZPageAllocator)VMObjectFactory.newObject(ZPageAllocator.class, pageAllocatorAddr);
+    }
+
+    ZPageTable pageTable() {
+        return (ZPageTable)VMObjectFactory.newObject(ZPageTable.class, addr.addOffsetTo(pageTableFieldOffset));
+    }
+
+    public long maxCapacity() {
+        return pageAllocator().maxCapacity();
+    }
+
+    public long capacity() {
+        return pageAllocator().capacity();
+    }
+
+    public long used() {
+        return pageAllocator().used();
+    }
+
+    boolean is_relocating(Address o) {
+        return pageTable().is_relocating(o);
+    }
+
+    Address forward_object(Address addr) {
+        ZPage page = pageTable().get(addr);
+        return page.forward_object(addr);
+    }
+
+    Address relocate_object(Address addr) {
+        ZPage page = pageTable().get(addr);
+        return page.relocate_object(addr);
+    }
+
+    public void printOn(PrintStream tty) {
+        tty.print(" ZHeap          ");
+        tty.print("used " + (used() / 1024 / 1024) + "M, ");
+        tty.print("capacity " + (capacity() / 1024 / 1024) + "M, ");
+        tty.println("max capacity " + (maxCapacity() / 1024 / 1024) + "M");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZOop.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.OopHandle;
+import sun.jvm.hotspot.runtime.VM;
+
+class ZOop {
+    private static final long MSB = ~0L ^ (~0L >>> 1);
+
+    private static Address msbAddress() {
+        return VM.getVM().getUniverse().heap().start().orWithMask(MSB).andWithMask(MSB);
+    }
+
+    static Address to_address(long value) {
+        // If the value of an Address becomes 0, null is returned instead of an Address.
+        // Start with a one-bit address and as a last step, remove that bit.
+        Address oneAddress = msbAddress();
+        return oneAddress.orWithMask(value).xorWithMask(ZAddress.as_long(oneAddress));
+    }
+
+    static Address to_address(OopHandle oop) {
+        return to_address(ZAddress.as_long(oop));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPage.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class ZPage extends VMObject {
+    private static CIntegerField typeField;
+    private static long virtualFieldOffset;
+    private static long forwardingFieldOffset;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZPage");
+
+        typeField = type.getCIntegerField("_type");
+        virtualFieldOffset = type.getField("_virtual").getOffset();
+        forwardingFieldOffset = type.getField("_forwarding").getOffset();
+    }
+
+    public ZPage(Address addr) {
+        super(addr);
+    }
+
+    private byte type() {
+        return typeField.getJByte(addr);
+    }
+
+    private ZVirtualMemory virtual() {
+        return (ZVirtualMemory)VMObjectFactory.newObject(ZVirtualMemory.class, addr.addOffsetTo(virtualFieldOffset));
+    }
+
+    private ZForwardingTable forwarding() {
+        return (ZForwardingTable)VMObjectFactory.newObject(ZForwardingTable.class, addr.addOffsetTo(forwardingFieldOffset));
+    }
+
+    private long start() {
+        return virtual().start();
+    }
+
+    Address forward_object(Address from) {
+        // Lookup address in forwarding table
+        long from_offset = ZAddress.offset(from);
+        long from_index = (from_offset - start()) >> object_alignment_shift();
+        ZForwardingTableEntry entry = forwarding().find(from_index);
+        assert(!entry.is_empty());
+        assert(entry.from_index() == from_index);
+
+        return ZAddress.good(entry.to_offset());
+    }
+
+    Address relocate_object(Address from) {
+        // Lookup address in forwarding table
+        long from_offset = ZAddress.offset(from);
+        long from_index = (from_offset - start()) >> object_alignment_shift();
+        ZForwardingTableEntry entry = forwarding().find(from_index);
+        if (!entry.is_empty() && entry.from_index() == from_index) {
+          return ZAddress.good(entry.to_offset());
+        }
+
+        // There's no relocate operation in the SA.
+        // Mimic object pinning and return the good view of the from object.
+        return ZAddress.good(from);
+    }
+
+
+    long object_alignment_shift() {
+        if (type() == ZGlobals.ZPageTypeSmall) {
+            return ZGlobals.ZObjectAlignmentSmallShift();
+        } else if (type() == ZGlobals.ZPageTypeMedium) {
+            return ZGlobals.ZObjectAlignmentMediumShift;
+        } else {
+            assert(type() == ZGlobals.ZPageTypeLarge);
+            return ZGlobals.ZObjectAlignmentLargeShift;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageAllocator.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for ZPageAllocator
+
+public class ZPageAllocator extends VMObject {
+
+    private static AddressField physicalField;
+    private static CIntegerField usedField;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZPageAllocator");
+
+        physicalField = type.getAddressField("_physical");
+        usedField = type.getCIntegerField("_used");
+    }
+
+    private ZPhysicalMemoryManager physical() {
+      Address physicalAddr = physicalField.getValue(addr);
+      return (ZPhysicalMemoryManager)VMObjectFactory.newObject(ZPhysicalMemoryManager.class, physicalAddr);
+    }
+
+    public long maxCapacity() {
+        return physical().maxCapacity();
+    }
+
+    public long capacity() {
+        return physical().capacity();
+    }
+
+    public long used() {
+        return usedField.getValue(addr);
+    }
+
+    public ZPageAllocator(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageTable.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class ZPageTable extends VMObject {
+    private static long mapFieldOffset;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZPageTable");
+
+        mapFieldOffset = type.getAddressField("_map").getOffset();
+    }
+
+    public ZPageTable(Address addr) {
+        super(addr);
+    }
+
+    private ZAddressRangeMapForPageTable map() {
+        return (ZAddressRangeMapForPageTable)VMObjectFactory.newObject(ZAddressRangeMapForPageTable.class, addr.addOffsetTo(mapFieldOffset));
+    }
+
+    private ZPageTableEntry getEntry(Address o) {
+        return new ZPageTableEntry(map().get(o));
+    }
+
+    ZPage get(Address o) {
+        return getEntry(o).page();
+    }
+
+    boolean is_relocating(Address o) {
+        return getEntry(o).relocating();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPageTableEntry.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+
+class ZPageTableEntry {
+    Address entry;
+
+    ZPageTableEntry(Address address) {
+        entry = address;
+    }
+
+    ZPage page() {
+        return (ZPage)VMObjectFactory.newObject(ZPage.class, entry.andWithMask(~1L));
+    }
+
+    boolean relocating() {
+        return (entry.asLongValue() & 1) == 1;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZPhysicalMemoryManager.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for ZPhysicalMemoryManager
+
+public class ZPhysicalMemoryManager extends VMObject {
+
+    private static CIntegerField capacityField;
+
+    private static CIntegerField maxCapacityField;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    private static synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZPhysicalMemoryManager");
+
+        capacityField = type.getCIntegerField("_capacity");
+        maxCapacityField = type.getCIntegerField("_max_capacity");
+    }
+
+    public long capacity() {
+        return capacityField.getValue(addr);
+    }
+
+    public long maxCapacity() {
+        return maxCapacityField.getValue(addr);
+    }
+
+    public ZPhysicalMemoryManager(Address addr) {
+        super(addr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZVirtualMemory.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.z;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class ZVirtualMemory extends VMObject {
+    private static CIntegerField startField;
+    private static CIntegerField endField;
+
+    static {
+        VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("ZVirtualMemory");
+
+        startField = type.getCIntegerField("_start");
+        endField = type.getCIntegerField("_end");
+    }
+
+    public ZVirtualMemory(Address addr) {
+        super(addr);
+    }
+
+    long start() {
+        return startField.getJLong(addr);
+    }
+
+    long end() {
+        return endField.getJLong(addr);
+    }
+}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Tue Jun 12 17:40:28 2018 +0200
@@ -24,18 +24,27 @@
 
 package sun.jvm.hotspot.memory;
 
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
+import java.io.PrintStream;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.OopHandle;
 import sun.jvm.hotspot.gc.cms.CMSHeap;
+import sun.jvm.hotspot.gc.epsilon.EpsilonHeap;
+import sun.jvm.hotspot.gc.g1.G1CollectedHeap;
+import sun.jvm.hotspot.gc.parallel.ParallelScavengeHeap;
 import sun.jvm.hotspot.gc.serial.SerialHeap;
-import sun.jvm.hotspot.gc.shared.*;
-import sun.jvm.hotspot.gc.g1.G1CollectedHeap;
-import sun.jvm.hotspot.gc.epsilon.EpsilonHeap;
-import sun.jvm.hotspot.gc.parallel.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.gc.shared.CollectedHeap;
+import sun.jvm.hotspot.gc.z.ZCollectedHeap;
+import sun.jvm.hotspot.oops.Oop;
+import sun.jvm.hotspot.runtime.BasicType;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VirtualConstructor;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
 
 
 public class Universe {
@@ -73,17 +82,34 @@
       });
   }
 
+  private static boolean typeExists(TypeDataBase db, String type) {
+      try {
+          db.lookupType(type);
+      } catch (RuntimeException e) {
+          return false;
+      }
+      return true;
+  }
+
+  private static void addHeapTypeIfInDB(TypeDataBase db, Class heapClass) {
+      String heapName = heapClass.getSimpleName();
+      if (typeExists(db, heapName)) {
+          heapConstructor.addMapping(heapName, heapClass);
+      }
+  }
+
   private static synchronized void initialize(TypeDataBase db) {
     Type type = db.lookupType("Universe");
 
     collectedHeapField = type.getAddressField("_collectedHeap");
 
     heapConstructor = new VirtualConstructor(db);
-    heapConstructor.addMapping("CMSHeap", CMSHeap.class);
-    heapConstructor.addMapping("SerialHeap", SerialHeap.class);
-    heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
-    heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
-    heapConstructor.addMapping("EpsilonHeap", EpsilonHeap.class);
+    addHeapTypeIfInDB(db, CMSHeap.class);
+    addHeapTypeIfInDB(db, SerialHeap.class);
+    addHeapTypeIfInDB(db, ParallelScavengeHeap.class);
+    addHeapTypeIfInDB(db, G1CollectedHeap.class);
+    addHeapTypeIfInDB(db, EpsilonHeap.class);
+    addHeapTypeIfInDB(db, ZCollectedHeap.class);
 
     mainThreadGroupField   = type.getOopField("_main_thread_group");
     systemThreadGroupField = type.getOopField("_system_thread_group");
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java	Tue Jun 12 17:40:28 2018 +0200
@@ -164,7 +164,7 @@
       tty.print("null");
     } else {
       obj.printValueOn(tty);
-      tty.print(" @ " + obj.getHandle());
+      tty.print(" @ " + VM.getVM().getUniverse().heap().oopAddressDescription(obj.getHandle()));
     }
   }
 
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/OopField.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/OopField.java	Tue Jun 12 17:40:28 2018 +0200
@@ -54,7 +54,8 @@
     if (!isVMField() && !obj.isInstance() && !obj.isArray()) {
       throw new InternalError(obj.toString());
     }
-    return obj.getHandle().getOopHandleAt(getOffset());
+
+    return VM.getVM().getUniverse().heap().oop_load_at(obj.getHandle(), getOffset());
   }
 
   public Oop getValue(VMObject obj) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VMOps.java	Tue Jun 12 17:40:28 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@
   CMS_Initial_Mark,
   CMS_Final_Remark,
   G1CollectFull,
+  ZOperation,
   G1CollectForAllocation,
   G1IncCollectionPause,
   EnableBiasedLocking,
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Tue Jun 12 17:40:28 2018 +0200
@@ -30,6 +30,7 @@
 import sun.jvm.hotspot.gc.parallel.*;
 import sun.jvm.hotspot.gc.serial.*;
 import sun.jvm.hotspot.gc.shared.*;
+import sun.jvm.hotspot.gc.z.*;
 import sun.jvm.hotspot.debugger.JVMDebugger;
 import sun.jvm.hotspot.memory.*;
 import sun.jvm.hotspot.oops.*;
@@ -128,6 +129,9 @@
       } else if (heap instanceof EpsilonHeap) {
          EpsilonHeap eh = (EpsilonHeap) heap;
          printSpace(eh.space());
+      } else if (heap instanceof ZCollectedHeap) {
+         ZCollectedHeap zheap = (ZCollectedHeap) heap;
+         zheap.printOn(System.out);
       } else {
          throw new RuntimeException("unknown CollectedHeap type : " + heap.getClass());
       }
@@ -171,6 +175,14 @@
            return;
        }
 
+       l = getFlagValue("UseZGC", flagMap);
+       if (l == 1L) {
+           System.out.print("ZGC ");
+           l = getFlagValue("ParallelGCThreads", flagMap);
+           System.out.println("with " + l + " thread(s)");
+           return;
+       }
+
        System.out.println("Mark Sweep Compact GC");
    }
 
--- a/src/jdk.jfr/share/conf/jfr/default.jfc	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.jfr/share/conf/jfr/default.jfc	Tue Jun 12 17:40:28 2018 +0200
@@ -614,6 +614,25 @@
       <setting name="enabled">true</setting>
     </event>
 
+    <event name="jdk.ZPageAllocation">
+      <setting name="enabled">true</setting>
+      <setting name="threshold">10 ms</setting>
+    </event>
+
+    <event name="jdk.ZThreadPhase">
+      <setting name="enabled">true</setting>
+      <setting name="threshold">0 ms</setting>
+    </event>
+
+    <event name="jdk.ZStatisticsCounter">
+      <setting name="enabled">true</setting>
+      <setting name="threshold">10 ms</setting>
+    </event>
+
+    <event name="jdk.ZStatisticsSampler">
+      <setting name="enabled">true</setting>
+      <setting name="threshold">10 ms</setting>
+    </event>
 
 
 
--- a/src/jdk.jfr/share/conf/jfr/profile.jfc	Tue Jun 12 07:52:30 2018 -0700
+++ b/src/jdk.jfr/share/conf/jfr/profile.jfc	Tue Jun 12 17:40:28 2018 +0200
@@ -614,6 +614,25 @@
       <setting name="enabled">true</setting>
     </event>
 
+    <event name="jdk.ZPageAllocation">
+      <setting name="enabled">true</setting>
+      <setting name="threshold">10 ms</setting>
+    </event>
+
+    <event name="jdk.ZThreadPhase">
+      <setting name="enabled">true</setting>
+      <setting name="threshold">0 ms</setting>
+    </event>
+
+    <event name="jdk.ZStatisticsCounter">
+      <setting name="threshold">10 ms</setting>
+      <setting name="enabled">true</setting>
+    </event>
+
+    <event name="jdk.ZStatisticsSampler">
+      <setting name="enabled">true</setting>
+      <setting name="threshold">10 ms</setting>
+    </event>
 
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zAddress.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "unittest.hpp"
+
+class ZAddressTest : public ::testing::Test {
+protected:
+  static void is_good_bit(uintptr_t bit_mask) {
+    // Setup
+    uintptr_t mask_before = ZAddressGoodMask;
+
+    ZAddressMasks::set_good_mask(bit_mask);
+
+    // Test that a pointer with only the given bit is considered good.
+    EXPECT_EQ(ZAddress::is_good(ZAddressMetadataMarked0),  (bit_mask == ZAddressMetadataMarked0));
+    EXPECT_EQ(ZAddress::is_good(ZAddressMetadataMarked1),  (bit_mask == ZAddressMetadataMarked1));
+    EXPECT_EQ(ZAddress::is_good(ZAddressMetadataRemapped), (bit_mask == ZAddressMetadataRemapped));
+
+    // Test that a pointer with the given bit and some extra bits is considered good.
+    EXPECT_EQ(ZAddress::is_good(ZAddressMetadataMarked0  | 0x8),(bit_mask == ZAddressMetadataMarked0));
+    EXPECT_EQ(ZAddress::is_good(ZAddressMetadataMarked1  | 0x8), (bit_mask == ZAddressMetadataMarked1));
+    EXPECT_EQ(ZAddress::is_good(ZAddressMetadataRemapped | 0x8), (bit_mask == ZAddressMetadataRemapped));
+
+    // Test that null is not considered good.
+    EXPECT_FALSE(ZAddress::is_good(0));
+
+    // Teardown
+    ZAddressMasks::set_good_mask(mask_before);
+  }
+
+  static void is_good_or_null_bit(uintptr_t bit_mask) {
+    // Setup
+    uintptr_t mask_before = ZAddressGoodMask;
+
+    ZAddressMasks::set_good_mask(bit_mask);
+
+    // Test that a pointer with only the given bit is considered good.
+    EXPECT_EQ(ZAddress::is_good_or_null(ZAddressMetadataMarked0),  (bit_mask == ZAddressMetadataMarked0));
+    EXPECT_EQ(ZAddress::is_good_or_null(ZAddressMetadataMarked1),  (bit_mask == ZAddressMetadataMarked1));
+    EXPECT_EQ(ZAddress::is_good_or_null(ZAddressMetadataRemapped), (bit_mask == ZAddressMetadataRemapped));
+
+    // Test that a pointer with the given bit and some extra bits is considered good.
+    EXPECT_EQ(ZAddress::is_good_or_null(ZAddressMetadataMarked0  | 0x8), (bit_mask == ZAddressMetadataMarked0));
+    EXPECT_EQ(ZAddress::is_good_or_null(ZAddressMetadataMarked1  | 0x8), (bit_mask == ZAddressMetadataMarked1));
+    EXPECT_EQ(ZAddress::is_good_or_null(ZAddressMetadataRemapped | 0x8), (bit_mask == ZAddressMetadataRemapped));
+
+    // Test that null is considered good_or_null.
+    EXPECT_TRUE(ZAddress::is_good_or_null(0));
+
+    // Teardown
+    ZAddressMasks::set_good_mask(mask_before);
+  }
+
+  static void finalizable() {
+    // Setup
+    ZAddressMasks::initialize();
+    ZAddressMasks::flip_to_marked();
+
+    // Test that a normal good pointer is good and weak good, but not finalizable
+    const uintptr_t addr1 = ZAddress::good(1);
+    EXPECT_FALSE(ZAddress::is_finalizable(addr1));
+    EXPECT_TRUE(ZAddress::is_marked(addr1));
+    EXPECT_FALSE(ZAddress::is_remapped(addr1));
+    EXPECT_TRUE(ZAddress::is_weak_good(addr1));
+    EXPECT_TRUE(ZAddress::is_weak_good_or_null(addr1));
+    EXPECT_TRUE(ZAddress::is_good(addr1));
+    EXPECT_TRUE(ZAddress::is_good_or_null(addr1));
+
+    // Test that a finalizable good pointer is finalizable and weak good, but not good
+    const uintptr_t addr2 = ZAddress::finalizable_good(1);
+    EXPECT_TRUE(ZAddress::is_finalizable(addr2));
+    EXPECT_TRUE(ZAddress::is_marked(addr2));
+    EXPECT_FALSE(ZAddress::is_remapped(addr2));
+    EXPECT_TRUE(ZAddress::is_weak_good(addr2));
+    EXPECT_TRUE(ZAddress::is_weak_good_or_null(addr2));
+    EXPECT_FALSE(ZAddress::is_good(addr2));
+    EXPECT_FALSE(ZAddress::is_good_or_null(addr2));
+
+    // Flip to remapped and test that it's no longer weak good
+    ZAddressMasks::flip_to_remapped();
+    EXPECT_TRUE(ZAddress::is_finalizable(addr2));
+    EXPECT_TRUE(ZAddress::is_marked(addr2));
+    EXPECT_FALSE(ZAddress::is_remapped(addr2));
+    EXPECT_FALSE(ZAddress::is_weak_good(addr2));
+    EXPECT_FALSE(ZAddress::is_weak_good_or_null(addr2));
+    EXPECT_FALSE(ZAddress::is_good(addr2));
+    EXPECT_FALSE(ZAddress::is_good_or_null(addr2));
+  }
+};
+
+TEST_F(ZAddressTest, is_good) {
+  is_good_bit(ZAddressMetadataMarked0);
+  is_good_bit(ZAddressMetadataMarked1);
+  is_good_bit(ZAddressMetadataRemapped);
+}
+
+TEST_F(ZAddressTest, is_good_or_null) {
+  is_good_or_null_bit(ZAddressMetadataMarked0);
+  is_good_or_null_bit(ZAddressMetadataMarked1);
+  is_good_or_null_bit(ZAddressMetadataRemapped);
+}
+
+TEST_F(ZAddressTest, is_weak_good_or_null) {
+#define check_is_weak_good_or_null(value)                                        \
+  EXPECT_EQ(ZAddress::is_weak_good_or_null(value),                               \
+            (ZAddress::is_good_or_null(value) || ZAddress::is_remapped(value)))  \
+    << "is_good_or_null: " << ZAddress::is_good_or_null(value)                   \
+    << " is_remaped: " << ZAddress::is_remapped(value)                           \
+    << " is_good_or_null_or_remapped: " << ZAddress::is_weak_good_or_null(value)
+
+  check_is_weak_good_or_null((uintptr_t)NULL);
+  check_is_weak_good_or_null(ZAddressMetadataMarked0);
+  check_is_weak_good_or_null(ZAddressMetadataMarked1);
+  check_is_weak_good_or_null(ZAddressMetadataRemapped);
+  check_is_weak_good_or_null((uintptr_t)0x123);
+}
+
+TEST_F(ZAddressTest, finalizable) {
+  finalizable();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zArray.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "unittest.hpp"
+
+TEST(ZArrayTest, test_add) {
+  ZArray<int> a;
+
+  // Add elements
+  for (int i = 0; i < 10; i++) {
+    a.add(i);
+  }
+
+  // Check size
+  ASSERT_EQ(a.size(), 10u);
+
+  // Check elements
+  for (int i = 0; i < 10; i++) {
+    EXPECT_EQ(a.at(i), i);
+  }
+}
+
+TEST(ZArrayTest, test_clear) {
+  ZArray<int> a;
+
+  // Add elements
+  for (int i = 0; i < 10; i++) {
+    a.add(i);
+  }
+
+  // Check size
+  ASSERT_EQ(a.size(), 10u);
+  ASSERT_EQ(a.is_empty(), false);
+
+  // Clear elements
+  a.clear();
+
+  // Check size
+  ASSERT_EQ(a.size(), 0u);
+  ASSERT_EQ(a.is_empty(), true);
+
+  // Add element
+  a.add(11);
+
+  // Check size
+  ASSERT_EQ(a.size(), 1u);
+  ASSERT_EQ(a.is_empty(), false);
+
+  // Clear elements
+  a.clear();
+
+  // Check size
+  ASSERT_EQ(a.size(), 0u);
+  ASSERT_EQ(a.is_empty(), true);
+}
+
+TEST(ZArrayTest, test_iterator) {
+  ZArray<int> a;
+
+  // Add elements
+  for (int i = 0; i < 10; i++) {
+    a.add(i);
+  }
+
+  // Iterate
+  int count = 0;
+  ZArrayIterator<int> iter(&a);
+  for (int value; iter.next(&value);) {
+    ASSERT_EQ(a.at(count), count);
+    count++;
+  }
+
+  // Check count
+  ASSERT_EQ(count, 10);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zBitField.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBitField.hpp"
+#include "unittest.hpp"
+
+TEST(ZBitFieldTest, test) {
+  typedef ZBitField<uint64_t, bool,      0,  1>    field_bool;
+  typedef ZBitField<uint64_t, uint8_t,   1,  8>    field_uint8;
+  typedef ZBitField<uint64_t, uint16_t,  2, 16>    field_uint16;
+  typedef ZBitField<uint64_t, uint32_t, 32, 32>    field_uint32;
+  typedef ZBitField<uint64_t, uint64_t,  0, 63>    field_uint64;
+  typedef ZBitField<uint64_t, void*,     1, 61, 3> field_pointer;
+
+  uint64_t entry;
+
+  {
+    const bool value = false;
+    entry = field_bool::encode(value);
+    EXPECT_EQ(field_bool::decode(entry), value) << "Should be equal";
+  }
+
+  {
+    const bool value = true;
+    entry = field_bool::encode(value);
+      EXPECT_EQ(field_bool::decode(entry), value) << "Should be equal";
+  }
+
+  {
+    const uint8_t value = ~(uint8_t)0;
+    entry = field_uint8::encode(value);
+    EXPECT_EQ(field_uint8::decode(entry), value) << "Should be equal";
+  }
+
+  {
+    const uint16_t value = ~(uint16_t)0;
+    entry = field_uint16::encode(value);
+    EXPECT_EQ(field_uint16::decode(entry), value) << "Should be equal";
+  }
+
+  {
+    const uint32_t value = ~(uint32_t)0;
+    entry = field_uint32::encode(value);
+    EXPECT_EQ(field_uint32::decode(entry), value) << "Should be equal";
+  }
+
+  {
+    const uint64_t value = ~(uint64_t)0 >> 1;
+    entry = field_uint64::encode(value);
+    EXPECT_EQ(field_uint64::decode(entry), value) << "Should be equal";
+  }
+
+  {
+    void* const value = (void*)(~(uintptr_t)0 << 3);
+    entry = field_pointer::encode(value);
+    EXPECT_EQ(field_pointer::decode(entry), value) << "Should be equal";
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zBitMap.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBitMap.inline.hpp"
+#include "unittest.hpp"
+
+class ZBitMapTest : public ::testing::Test {
+protected:
+  static void test_set_pair_unset(size_t size, bool finalizable) {
+    ZBitMap bitmap(size);
+
+    for (BitMap::idx_t i = 0; i < size - 1; i++) {
+      if ((i + 1) % BitsPerWord == 0) {
+        // Can't set pairs of bits in different words.
+        continue;
+      }
+
+      // ZBitMaps are not cleared when constructed.
+      bitmap.clear();
+
+      bool inc_live = false;
+
+      bool ret = bitmap.par_set_bit_pair(i, finalizable, inc_live);
+      EXPECT_TRUE(ret) << "Failed to set bit";
+      EXPECT_TRUE(inc_live) << "Should have set inc_live";
+
+      // First bit should always be set
+      EXPECT_TRUE(bitmap.at(i)) << "Should be set";
+
+      // Second bit should only be set when marking strong
+      EXPECT_NE(bitmap.at(i + 1), finalizable);
+    }
+  }
+
+  static void test_set_pair_set(size_t size, bool finalizable) {
+    ZBitMap bitmap(size);
+
+    for (BitMap::idx_t i = 0; i < size - 1; i++) {
+      if ((i + 1) % BitsPerWord == 0) {
+        // Can't set pairs of bits in different words.
+        continue;
+      }
+
+      // Fill the bitmap with ones.
+      bitmap.set_range(0, size);
+
+      bool inc_live = false;
+
+      bool ret = bitmap.par_set_bit_pair(i, finalizable, inc_live);
+      EXPECT_FALSE(ret) << "Should not succeed setting bit";
+      EXPECT_FALSE(inc_live) << "Should not have set inc_live";
+
+      // Both bits were pre-set.
+      EXPECT_TRUE(bitmap.at(i)) << "Should be set";
+      EXPECT_TRUE(bitmap.at(i + 1)) << "Should be set";
+    }
+  }
+
+  static void test_set_pair_set(bool finalizable) {
+    test_set_pair_set(2,   finalizable);
+    test_set_pair_set(62,  finalizable);
+    test_set_pair_set(64,  finalizable);
+    test_set_pair_set(66,  finalizable);
+    test_set_pair_set(126, finalizable);
+    test_set_pair_set(128, finalizable);
+  }
+
+  static void test_set_pair_unset(bool finalizable) {
+    test_set_pair_unset(2,   finalizable);
+    test_set_pair_unset(62,  finalizable);
+    test_set_pair_unset(64,  finalizable);
+    test_set_pair_unset(66,  finalizable);
+    test_set_pair_unset(126, finalizable);
+    test_set_pair_unset(128, finalizable);
+  }
+
+};
+
+TEST_F(ZBitMapTest, test_set_pair_set) {
+  test_set_pair_set(false);
+  test_set_pair_set(true);
+}
+
+TEST_F(ZBitMapTest, test_set_pair_unset) {
+  test_set_pair_unset(false);
+  test_set_pair_unset(true);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zForwardingTable.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zForwardingTable.inline.hpp"
+#include "unittest.hpp"
+
+using namespace testing;
+
+#define CAPTURE_DELIM "\n"
+#define CAPTURE1(expression) #expression << " evaluates to " << expression
+#define CAPTURE2(e0, e1)                 CAPTURE1(e0) << CAPTURE_DELIM << CAPTURE1(e1)
+
+#define CAPTURE(expression) CAPTURE1(expression)
+
+class ZForwardingTableTest : public Test {
+public:
+  // Helper functions
+
+  static bool is_power_of_2(size_t value) {
+    return ::is_power_of_2((intptr_t)value);
+  }
+
+  class SequenceToFromIndex : AllStatic {
+  public:
+    static uintptr_t even(uint32_t sequence_number) {
+      return sequence_number * 2;
+    }
+    static uintptr_t odd(uint32_t sequence_number) {
+      return even(sequence_number) + 1;
+    }
+    static uintptr_t one_to_one(uint32_t sequence_number) {
+      return sequence_number;
+    }
+  };
+
+  // Test functions
+
+  static void setup(ZForwardingTable& table) {
+    EXPECT_PRED1(is_power_of_2, table._size) << CAPTURE(table._size);
+  }
+
+  static void find_empty(ZForwardingTable& table) {
+    size_t size = table._size;
+    size_t entries_to_check = size * 2;
+
+    for (uint32_t i = 0; i < entries_to_check; i++) {
+      uintptr_t from_index = SequenceToFromIndex::one_to_one(i);
+
+      EXPECT_TRUE(table.find(from_index).is_empty()) << CAPTURE2(from_index, size);
+    }
+
+    EXPECT_TRUE(table.find(uintptr_t(-1)).is_empty()) << CAPTURE(size);
+  }
+
+  static void find_full(ZForwardingTable& table) {
+    size_t size = table._size;
+    size_t entries_to_populate = size;
+
+    // Populate
+    for (uint32_t i = 0; i < entries_to_populate; i++) {
+      uintptr_t from_index = SequenceToFromIndex::one_to_one(i);
+
+      ZForwardingTableCursor cursor;
+      ZForwardingTableEntry entry = table.find(from_index, &cursor);
+      ASSERT_TRUE(entry.is_empty()) << CAPTURE2(from_index, size);
+
+      table.insert(from_index, from_index, &cursor);
+    }
+
+    // Verify
+    for (uint32_t i = 0; i < entries_to_populate; i++) {
+      uintptr_t from_index = SequenceToFromIndex::one_to_one(i);
+
+      ZForwardingTableEntry entry = table.find(from_index);
+      ASSERT_FALSE(entry.is_empty()) << CAPTURE2(from_index, size);
+
+      ASSERT_EQ(entry.from_index(), from_index) << CAPTURE(size);
+      ASSERT_EQ(entry.to_offset(), from_index) << CAPTURE(size);
+    }
+  }
+
+  static void find_every_other(ZForwardingTable& table) {
+    size_t size = table._size;
+    size_t entries_to_populate = size / 2;
+
+    // Populate even from indices
+    for (uint32_t i = 0; i < entries_to_populate; i++) {
+      uintptr_t from_index = SequenceToFromIndex::even(i);
+
+      ZForwardingTableCursor cursor;
+      ZForwardingTableEntry entry = table.find(from_index, &cursor);
+      ASSERT_TRUE(entry.is_empty()) << CAPTURE2(from_index, size);
+
+      table.insert(from_index, from_index, &cursor);
+    }
+
+    // Verify populated even indices
+    for (uint32_t i = 0; i < entries_to_populate; i++) {
+      uintptr_t from_index = SequenceToFromIndex::even(i);
+
+      ZForwardingTableCursor cursor;
+      ZForwardingTableEntry entry = table.find(from_index, &cursor);
+      ASSERT_FALSE(entry.is_empty()) << CAPTURE2(from_index, size);
+
+      ASSERT_EQ(entry.from_index(), from_index) << CAPTURE(size);
+      ASSERT_EQ(entry.to_offset(), from_index) << CAPTURE(size);
+    }
+
+    // Verify empty odd indices
+    //
+    // This check could be done on a larger range of sequence numbers,
+    // but currently entries_to_populate is used.
+    for (uint32_t i = 0; i < entries_to_populate; i++) {
+      uintptr_t from_index = SequenceToFromIndex::odd(i);
+
+      ZForwardingTableEntry entry = table.find(from_index);
+
+      ASSERT_TRUE(entry.is_empty()) << CAPTURE2(from_index, size);
+    }
+  }
+
+  static void test(void (*function)(ZForwardingTable&), uint32_t size) {
+    // Setup
+    ZForwardingTable table;
+    table.setup(size);
+    ASSERT_FALSE(table.is_null());
+
+    // Actual test function
+    (*function)(table);
+
+    // Teardown
+    table.reset();
+    ASSERT_TRUE(table.is_null());
+  }
+
+  // Run the given function with a few different input values.
+  static void test(void (*function)(ZForwardingTable&)) {
+    test(function, 1);
+    test(function, 2);
+    test(function, 3);
+    test(function, 4);
+    test(function, 7);
+    test(function, 8);
+    test(function, 1023);
+    test(function, 1024);
+    test(function, 1025);
+  }
+};
+
+TEST_F(ZForwardingTableTest, setup) {
+  test(&ZForwardingTableTest::setup);
+}
+
+TEST_F(ZForwardingTableTest, find_empty) {
+  test(&ZForwardingTableTest::find_empty);
+}
+
+TEST_F(ZForwardingTableTest, find_full) {
+  test(&ZForwardingTableTest::find_full);
+}
+
+TEST_F(ZForwardingTableTest, find_every_other) {
+  test(&ZForwardingTableTest::find_every_other);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zList.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zList.inline.hpp"
+#include "unittest.hpp"
+
+#ifndef PRODUCT
+
+class ZTestEntry {
+  friend class ZList<ZTestEntry>;
+
+private:
+  const int             _id;
+  ZListNode<ZTestEntry> _node;
+
+public:
+  ZTestEntry(int id) :
+      _id(id),
+      _node() {}
+
+  int id() const {
+    return _id;
+  }
+};
+
+class ZListTest : public ::testing::Test {
+protected:
+  static void assert_sorted(ZList<ZTestEntry>* list) {
+    // Iterate forward
+    {
+      int count = list->first()->id();
+      ZListIterator<ZTestEntry> iter(list);
+      for (ZTestEntry* entry; iter.next(&entry);) {
+        ASSERT_EQ(entry->id(), count);
+        count++;
+      }
+    }
+
+    // Iterate backward
+    {
+      int count = list->last()->id();
+      ZListReverseIterator<ZTestEntry> iter(list);
+      for (ZTestEntry* entry; iter.next(&entry);) {
+        EXPECT_EQ(entry->id(), count);
+        count--;
+      }
+    }
+  }
+};
+
+TEST_F(ZListTest, test_insert) {
+  ZList<ZTestEntry> list;
+  ZTestEntry e0(0);
+  ZTestEntry e1(1);
+  ZTestEntry e2(2);
+  ZTestEntry e3(3);
+  ZTestEntry e4(4);
+  ZTestEntry e5(5);
+
+  list.insert_first(&e2);
+  list.insert_before(&e2, &e1);
+  list.insert_after(&e2, &e3);
+  list.insert_last(&e4);
+  list.insert_first(&e0);
+  list.insert_last(&e5);
+
+  EXPECT_EQ(list.size(), 6u);
+  assert_sorted(&list);
+}
+
+TEST_F(ZListTest, test_remove) {
+  // Remove first
+  {
+    ZList<ZTestEntry> list;
+    ZTestEntry e0(0);
+    ZTestEntry e1(1);
+    ZTestEntry e2(2);
+    ZTestEntry e3(3);
+    ZTestEntry e4(4);
+    ZTestEntry e5(5);
+
+    list.insert_last(&e0);
+    list.insert_last(&e1);
+    list.insert_last(&e2);
+    list.insert_last(&e3);
+    list.insert_last(&e4);
+    list.insert_last(&e5);
+
+    EXPECT_EQ(list.size(), 6u);
+
+    for (int i = 0; i < 6; i++) {
+      ZTestEntry* e = list.remove_first();
+      EXPECT_EQ(e->id(), i);
+    }
+
+    EXPECT_EQ(list.size(), 0u);
+  }
+
+  // Remove last
+  {
+    ZList<ZTestEntry> list;
+    ZTestEntry e0(0);
+    ZTestEntry e1(1);
+    ZTestEntry e2(2);
+    ZTestEntry e3(3);
+    ZTestEntry e4(4);
+    ZTestEntry e5(5);
+
+    list.insert_last(&e0);
+    list.insert_last(&e1);
+    list.insert_last(&e2);
+    list.insert_last(&e3);
+    list.insert_last(&e4);
+    list.insert_last(&e5);
+
+    EXPECT_EQ(list.size(), 6u);
+
+    for (int i = 5; i >= 0; i--) {
+      ZTestEntry* e = list.remove_last();
+      EXPECT_EQ(e->id(), i);
+    }
+
+    EXPECT_EQ(list.size(), 0u);
+  }
+}
+
+TEST_F(ZListTest, test_transfer) {
+  // Transfer empty to empty
+  {
+    ZList<ZTestEntry> list0;
+    ZList<ZTestEntry> list1;
+
+    EXPECT_TRUE(list0.is_empty());
+    EXPECT_TRUE(list1.is_empty());
+
+    list0.transfer(&list1);
+
+    EXPECT_TRUE(list0.is_empty());
+    EXPECT_TRUE(list1.is_empty());
+  }
+
+  // Transfer non-empty to empty
+  {
+    ZList<ZTestEntry> list0;
+    ZList<ZTestEntry> list1;
+    ZTestEntry e0(0);
+    ZTestEntry e1(1);
+    ZTestEntry e2(2);
+    ZTestEntry e3(3);
+    ZTestEntry e4(4);
+    ZTestEntry e5(5);
+
+    list1.insert_last(&e0);
+    list1.insert_last(&e1);
+    list1.insert_last(&e2);
+    list1.insert_last(&e3);
+    list1.insert_last(&e4);
+    list1.insert_last(&e5);
+
+    EXPECT_EQ(list0.size(), 0u);
+    EXPECT_EQ(list1.size(), 6u);
+
+    list0.transfer(&list1);
+
+    EXPECT_EQ(list0.size(), 6u);
+    EXPECT_EQ(list1.size(), 0u);
+
+    assert_sorted(&list0);
+  }
+
+  // Transfer non-empty to non-empty
+  {
+    ZList<ZTestEntry> list0;
+    ZList<ZTestEntry> list1;
+    ZTestEntry e0(0);
+    ZTestEntry e1(1);
+    ZTestEntry e2(2);
+    ZTestEntry e3(3);
+    ZTestEntry e4(4);
+    ZTestEntry e5(5);
+
+    list0.insert_last(&e0);
+    list0.insert_last(&e1);
+    list0.insert_last(&e2);
+
+    list1.insert_last(&e3);
+    list1.insert_last(&e4);
+    list1.insert_last(&e5);
+
+    EXPECT_EQ(list0.size(), 3u);
+    EXPECT_EQ(list1.size(), 3u);
+
+    list0.transfer(&list1);
+
+    EXPECT_EQ(list0.size(), 6u);
+    EXPECT_EQ(list1.size(), 0u);
+
+    assert_sorted(&list0);
+  }
+}
+
+#endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zLiveMap.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLiveMap.inline.hpp"
+#include "unittest.hpp"
+
+class ZLiveMapTest : public ::testing::Test {
+protected:
+  static void strongly_live_for_large_zpage() {
+    // Large ZPages only have room for one object.
+    ZLiveMap livemap(1);
+
+    bool inc_live;
+    uintptr_t object = 0u;
+
+    // Mark the object strong.
+    livemap.set_atomic(object, false /* finalizable */, inc_live);
+
+    // Check that both bits are in the same segment.
+    ASSERT_EQ(livemap.index_to_segment(0), livemap.index_to_segment(1));
+
+    // Check that the object was marked.
+    ASSERT_TRUE(livemap.get(0));
+
+    // Check that the object was strongly marked.
+    ASSERT_TRUE(livemap.get(1));
+
+    ASSERT_TRUE(inc_live);
+  }
+};
+
+TEST_F(ZLiveMapTest, strongly_live_for_large_zpage) {
+  strongly_live_for_large_zpage();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "utilities/debug.hpp"
+#include "unittest.hpp"
+
+#if defined(AMD64)
+
+TEST(ZPhysicalMemorySegmentTest, split) {
+  const size_t SegmentSize = 2 * M;
+
+  ZPhysicalMemorySegment seg(0, 10 * SegmentSize);
+
+  ZPhysicalMemorySegment seg_split0 = seg.split(0 * SegmentSize);
+  EXPECT_EQ(seg_split0.size(),  0 * SegmentSize);
+  EXPECT_EQ(       seg.size(), 10 * SegmentSize);
+
+  ZPhysicalMemorySegment seg_split1 = seg.split(5 * SegmentSize);
+  EXPECT_EQ(seg_split1.size(),  5 * SegmentSize);
+  EXPECT_EQ(       seg.size(),  5 * SegmentSize);
+
+  ZPhysicalMemorySegment seg_split2 = seg.split(5 * SegmentSize);
+  EXPECT_EQ(seg_split2.size(),  5 * SegmentSize);
+  EXPECT_EQ(       seg.size(),  0 * SegmentSize);
+
+  ZPhysicalMemorySegment seg_split3 = seg.split(0 * SegmentSize);
+  EXPECT_EQ(seg_split3.size(),  0 * SegmentSize);
+  EXPECT_EQ(       seg.size(),  0 * SegmentSize);
+}
+
+TEST(ZPhysicalMemoryTest, split) {
+  const size_t SegmentSize = 2 * M;
+
+  ZPhysicalMemoryManager pmem_manager(10 * SegmentSize, SegmentSize);
+
+  ZPhysicalMemory pmem = pmem_manager.alloc(8 * SegmentSize);
+  EXPECT_EQ(pmem.nsegments(), 1u) << "wrong number of segments";
+
+  ZPhysicalMemory split0_pmem = pmem.split(SegmentSize);
+  EXPECT_EQ(split0_pmem.nsegments(), 1u);
+  EXPECT_EQ(       pmem.nsegments(), 1u);
+  EXPECT_EQ(split0_pmem.size(), 1 * SegmentSize);
+  EXPECT_EQ(       pmem.size(), 7 * SegmentSize);
+
+  ZPhysicalMemory split1_pmem = pmem.split(2 * SegmentSize);
+  EXPECT_EQ(split1_pmem.nsegments(), 1u);
+  EXPECT_EQ(       pmem.nsegments(), 1u);
+  EXPECT_EQ(split1_pmem.size(), 2 * SegmentSize);
+  EXPECT_EQ(       pmem.size(), 5 * SegmentSize);
+
+  ZPhysicalMemory split2_pmem = pmem.split(5 * SegmentSize);
+  EXPECT_EQ(split2_pmem.nsegments(), 1u);
+  EXPECT_EQ(       pmem.nsegments(), 1u);
+  EXPECT_EQ(split2_pmem.size(), 5 * SegmentSize);
+  EXPECT_EQ(       pmem.size(), 0 * SegmentSize);
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zUtils.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zUtils.inline.hpp"
+#include "unittest.hpp"
+
+#include <limits>
+
+template <typename T>
+static T max_alignment() {
+  T max = std::numeric_limits<T>::max();
+  return max ^ (max >> 1);
+}
+
+TEST(ZUtilsTest, round_up_power_of_2) {
+  EXPECT_EQ(ZUtils::round_up_power_of_2(1u), 1u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(2u), 2u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(3u), 4u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(4u), 4u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(5u), 8u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(6u), 8u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(7u), 8u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(8u), 8u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(9u), 16u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(10u), 16u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(1023u), 1024u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(1024u), 1024u);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(1025u), 2048u);
+
+  const size_t max = max_alignment<size_t>();
+  EXPECT_EQ(ZUtils::round_up_power_of_2(max - 1), max);
+  EXPECT_EQ(ZUtils::round_up_power_of_2(max), max);
+}
+
+TEST(ZUtilsTest, round_down_power_of_2) {
+  EXPECT_EQ(ZUtils::round_down_power_of_2(1u), 1u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(2u), 2u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(3u), 2u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(4u), 4u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(5u), 4u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(6u), 4u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(7u), 4u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(8u), 8u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(9u), 8u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(10u), 8u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(1023u), 512u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(1024u), 1024u);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(1025u), 1024u);
+
+  const size_t max = max_alignment<size_t>();
+  EXPECT_EQ(ZUtils::round_down_power_of_2(max), max);
+  EXPECT_EQ(ZUtils::round_down_power_of_2(max - 1), max / 2);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/z/test_zVirtualMemory.cpp	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * ORACLE PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zVirtualMemory.inline.hpp"
+#include "utilities/debug.hpp"
+#include "unittest.hpp"
+
+TEST(ZVirtualMemory, split) {
+  const size_t PageSize = 2 * M;
+
+  ZVirtualMemory mem(0, 10 * PageSize);
+
+  ZVirtualMemory mem_split0 = mem.split(0 * PageSize);
+  EXPECT_EQ(mem_split0.size(),  0 * PageSize);
+  EXPECT_EQ(       mem.size(), 10 * PageSize);
+
+  ZVirtualMemory mem_split1 = mem.split(5u * PageSize);
+  EXPECT_EQ(mem_split1.size(),  5 * PageSize);
+  EXPECT_EQ(       mem.size(),  5 * PageSize);
+
+  ZVirtualMemory mem_split2 = mem.split(5u * PageSize);
+  EXPECT_EQ(mem_split2.size(),  5 * PageSize);
+  EXPECT_EQ(       mem.size(),  0 * PageSize);
+
+  ZVirtualMemory mem_split3 = mem.split(0 * PageSize);
+  EXPECT_EQ(mem_split3.size(),  0 * PageSize);
+}
--- a/test/hotspot/jtreg/TEST.ROOT	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/TEST.ROOT	Tue Jun 12 17:40:28 2018 +0200
@@ -46,6 +46,7 @@
     vm.gc.Parallel \
     vm.gc.ConcMarkSweep \
     vm.gc.Epsilon \
+    vm.gc.Z \
     vm.jvmci \
     vm.emulatedClient \
     vm.cpu.features \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8059022
+ * @modules java.base/jdk.internal.misc:+open
+ * @summary Validate barriers after Unsafe getObject, CAS and swap (GetAndSet)
+ * @requires vm.gc.Z & !vm.graal.enabled
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:+UnlockDiagnosticVMOptions -XX:+ZUnmapBadViews -XX:ZCollectionInterval=1 -XX:-CreateCoredumpOnCrash -XX:CompileCommand=dontinline,*::mergeImpl* compiler.gcbarriers.UnsafeIntrinsicsTest
+ */
+
+package compiler.gcbarriers;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Random;
+import sun.misc.Unsafe;
+
+public class UnsafeIntrinsicsTest {
+
+    /*
+     * This test triggers the loadbarriers by allocating a lot, keeping the objects alive and then
+     * letting them die in a way that maximizes fragmentation.
+     *
+     * All subtests (OperationType's) could run in parallel.
+     */
+
+    static int node_count = 133700;
+    static int thread_count = 4;
+    static int time = Integer.getInteger("time", 4); // seconds per subtest
+
+    static Runner r = new Runner(null, 1, 1, Runner.OperationType.CAS);
+
+    static Node first_node;
+    int epoch = 0;
+
+    public static void main(String[] args) {
+        UnsafeIntrinsicsTest t = new UnsafeIntrinsicsTest();
+
+        t.testWithLocalData(Runner.OperationType.CAS);
+        t.testWithLocalData(Runner.OperationType.Weak_CAS);
+        t.testWithLocalData(Runner.OperationType.CMPX);
+
+        t.testWithSharedData(Runner.OperationType.Swap);
+        t.testWithSharedData(Runner.OperationType.Load);
+    }
+
+    public UnsafeIntrinsicsTest() {
+
+    }
+
+    public void testWithLocalData(Runner.OperationType optype) {
+        System.out.println("Testing " + optype.name() + " with " + thread_count +" thread and " + node_count + " nodes");
+
+        // start mutator threads
+        ArrayList<Thread> thread_list = new ArrayList<Thread>();
+        Random r = new Random(System.nanoTime());
+        for (int i = 0; i < thread_count; i++) {
+
+            setup(); // each thread has its own circle of nodes
+            Thread t = new Thread(new Runner(first_node, time, r.nextLong(), optype));
+            t.start();
+            thread_list.add(t);
+        }
+
+        waitForCompletion(thread_list);
+        countNodes();
+    }
+
+    public void testWithSharedData(Runner.OperationType optype) {
+        System.out.println("Testing " + optype.name() + " with " + thread_count +" thread and " + node_count + " nodes");
+
+        setup(); // All nodes are shared between threads
+        ArrayList<Thread> thread_list = new ArrayList<Thread>();
+        Random r = new Random(System.nanoTime());
+        for (int i = 0; i < thread_count; i++) {
+            Thread t = new Thread(new Runner(first_node, time, r.nextLong(), optype));
+            t.start();
+            thread_list.add(t);
+        }
+
+        waitForCompletion(thread_list);
+        countNodes();
+    }
+
+    public void waitForCompletion(ArrayList<Thread> thread_list) {
+        // do some waiting
+        try {
+            Thread.sleep(time*1000);
+        } catch (InterruptedException e) {
+            e.printStackTrace();
+        }
+
+        // wait for all thread to terminate
+        for (int i = 0; i < thread_count; i++) {
+            try {
+                thread_list.get(i).join();
+            } catch (InterruptedException e) {
+                e.printStackTrace();
+            }
+        }
+    }
+
+    void countNodes() {
+        epoch++;
+        int count = 0;
+        Node node = first_node;
+        while (node.number() < epoch) {
+            node.setNumber(epoch);
+            count++;
+            node = node.next();
+        }
+        System.out.println("Program end, found " + count + " nodes");
+    }
+
+    // Create a circular linked list
+    public void setup() {
+        first_node = new Node();
+        Node last_node = first_node;
+        for (int i = 0; i < node_count; i++) {
+            last_node = new Node(last_node);
+        }
+        first_node.setNext(last_node);
+    }
+}
+
+class Runner implements Runnable {
+
+    OperationType type;
+    Node current;
+    Random r;
+    long time;
+    long seed;
+
+    long milage = 0;
+    long created = 0;
+    long skipped = 0;
+    int iterations = 0;
+
+    static final jdk.internal.misc.Unsafe UNSAFE;
+    static final long offset;
+
+    public enum OperationType {
+        Load("Load"),
+        Swap("Swap"),
+        CAS("CAS"),
+        Weak_CAS("Weak-CAS"),
+        CMPX("CMPX");
+
+        private String name;
+        private OperationType(String name) { this.name = name; }
+    }
+
+    static {
+        try {
+            Field f = jdk.internal.misc.Unsafe.class.getDeclaredField("theUnsafe");
+            f.setAccessible(true);
+            UNSAFE = (jdk.internal.misc.Unsafe) f.get(null);
+            offset = UNSAFE.objectFieldOffset(Node.class.getDeclaredField("next"));
+        } catch (Exception e) {
+            throw new RuntimeException("Unable to get Unsafe instance.", e);
+        }
+    }
+
+    public Runner(Node start, int testtime, long seed, OperationType type) {
+        current = start;
+        time = testtime*1000000000L;
+        r = new Random(seed);
+        this.type = type;
+    }
+
+    @Override
+    public void run() {
+        long starttime = System.nanoTime();
+        while((System.nanoTime() - starttime) < time) {
+            iterations++;
+            // Run a bit
+            int run_length = r.nextInt() & 0xfff;
+            for (int i = 0; i < run_length; i++) {
+                current = current.next();
+                milage++;
+            }
+            // find a start node
+            Node startNode = current;
+            Node expectedNext = startNode.next;
+
+            // Run a bit more
+            int skip_length = (r.nextInt() & 0xff) + 1;
+            for (int i = 0; i < skip_length; i++) {
+                current = current.next();
+                skipped++;
+            }
+
+            // create a branch
+            int branch_length = (r.nextInt() & 0xff) + 1;
+            created += branch_length;
+            Node head = makeBranch(current, branch_length);
+
+            // complete circle, but continue to run on old path
+            boolean test_fail = ((iterations & 0x1) == 0);
+            Node current = merge(startNode, expectedNext, head, test_fail);
+        }
+        System.out.println("Milage: " + milage + " Skipped: " + skipped + " Created: " + created + " iterations: " + iterations);
+    }
+
+    /*
+     *  The reason for the duplicated code that is wrapping the unsafe operations is that we want
+     *  to test the operations individually. They must not interfere with each other - checking a field
+     *  will heal that reference and no operation after can trigger the barrier.
+     *
+     *  All mergeImpl*-method are prevented from being inlined.
+     */
+
+    private Node merge(Node startNode, Node expectedNext, Node head, boolean test_fail) {
+        switch (type) {
+            case Load:
+                return mergeImplLoad(startNode, expectedNext, head);
+            case Swap:
+                return mergeImplSwap(startNode, expectedNext, head);
+            case CAS:
+                if (test_fail) {
+                    return mergeImplCASFail(startNode, expectedNext, head);
+                } else {
+                    return mergeImplCAS(startNode, expectedNext, head);
+                }
+            case Weak_CAS:
+                if (test_fail) {
+                    return mergeImplWeakCASFail(startNode, expectedNext, head);
+                } else {
+                    return mergeImplWeakCAS(startNode, expectedNext, head);
+                }
+            case CMPX:
+                if (test_fail) {
+                    return mergeImplCMPXFail(startNode, expectedNext, head);
+                } else {
+                    return mergeImplCMPX(startNode, expectedNext, head);
+                }
+            default:
+            throw new Error("Unimplemented");
+        }
+    }
+
+    private Node mergeImplLoad(Node startNode, Node expectedNext, Node head) {
+        // Atomic load version
+        Node temp = (Node) UNSAFE.getObject(startNode, offset);
+        startNode.setNext(head);
+        return temp;
+    }
+
+    private Node mergeImplSwap(Node startNode, Node expectedNext, Node head) {
+        // Swap version
+        return (Node) UNSAFE.getAndSetObject(startNode, offset, head);
+    }
+
+    private Node mergeImplCAS(Node startNode, Node expectedNext, Node head) {
+        // CAS - should always be true within a single thread - no other thread can have overwritten
+        if (!UNSAFE.compareAndSetObject(startNode, offset, expectedNext, head)) {
+            throw new Error("CAS should always succeed on thread local objects, check you barrier implementation");
+        }
+        return expectedNext; // continue on old circle
+    }
+
+    private Node mergeImplCASFail(Node startNode, Node expectedNext, Node head) {
+        // Force a fail
+        if (UNSAFE.compareAndSetObject(startNode, offset, "fail", head)) {
+            throw new Error("This CAS should always fail, check you barrier implementation");
+        }
+        if (startNode.next() != expectedNext) {
+            throw new Error("Shouldn't have changed");
+        }
+        return current;
+    }
+
+    private Node mergeImplWeakCAS(Node startNode, Node expectedNext, Node head) {
+        // Weak CAS - should always be true within a single thread - no other thread can have overwritten
+        if (!UNSAFE.weakCompareAndSetObject(startNode, offset, expectedNext, head)) {
+            throw new Error("Weak CAS should always succeed on thread local objects, check you barrier implementation");
+        }
+        return expectedNext; // continue on old circle
+    }
+
+    private Node mergeImplWeakCASFail(Node startNode, Node expectedNext, Node head) {
+        // Force a fail
+        if (UNSAFE.weakCompareAndSetObject(startNode, offset, "fail", head)) {
+            throw new Error("This weak CAS should always fail, check you barrier implementation");
+        }
+        if (startNode.next() != expectedNext) {
+            throw new Error("Shouldn't have changed");
+        }
+        return current;
+    }
+
+    private Node mergeImplCMPX(Node startNode, Node expectedNext, Node head) {
+        // CmpX - should always be true within a single thread - no other thread can have overwritten
+        Object res = UNSAFE.compareAndExchangeObject(startNode, offset, expectedNext, head);
+        if (!res.equals(expectedNext)) {
+            throw new Error("Fail CmpX should always succeed on thread local objects, check you barrier implementation");
+        }
+        return expectedNext; // continue on old circle
+    }
+
+    private Node mergeImplCMPXFail(Node startNode, Node expectedNext, Node head) {
+        Object res = UNSAFE.compareAndExchangeObject(startNode, offset, head, head);
+        if (startNode.next() != expectedNext) {
+            throw new Error("Shouldn't have changed");
+        }
+        if (head == expectedNext) {
+            throw new Error("Test malfunction");
+        }
+        if (!res.equals(expectedNext)) {
+            throw new Error("This CmpX should have returned 'expectedNext' when it failed");
+        }
+        if (res.equals(head)) {
+            throw new Error("This CmpX shouldn't have returned head when it failed. count: "+ iterations);
+        }
+
+        return current;
+    }
+
+    // Create a new branch that will replace a part of the circle
+    public Node makeBranch(Node end_node, int count) {
+        Node head = end_node;
+        for (int i = 0; i < count; i++) {
+            head = new Node(head);
+        }
+        return head;
+    }
+}
+
+class Node {
+    Node next;
+    int number = 0;
+
+    public int number() {
+        return number;
+    }
+
+    public void setNumber(int v) {
+        number = v;
+    }
+
+    public Node() {
+    }
+
+    public Node(Node link) {
+        next = link;
+    }
+
+    public void setNext(Node next) {
+        this.next = next;
+    }
+    public Node next() {
+        return next;
+    }
+}
--- a/test/hotspot/jtreg/gc/TestAllocateHeapAt.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/TestAllocateHeapAt.java	Tue Jun 12 17:40:28 2018 +0200
@@ -24,6 +24,7 @@
 /* @test TestAllocateHeapAt.java
  * @key gc
  * @summary Test to check allocation of Java Heap with AllocateHeapAt option
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  */
--- a/test/hotspot/jtreg/gc/TestAllocateHeapAtError.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/TestAllocateHeapAtError.java	Tue Jun 12 17:40:28 2018 +0200
@@ -24,6 +24,7 @@
 /* @test TestAllocateHeapAtError.java
  * @key gc
  * @summary Test to check correct handling of non-existent directory passed to AllocateHeapAt option
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  */
@@ -75,4 +76,3 @@
     output.shouldNotHaveExitValue(0);
   }
 }
-
--- a/test/hotspot/jtreg/gc/TestAllocateHeapAtMultiple.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/TestAllocateHeapAtMultiple.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,7 +26,7 @@
  * @summary Test to check allocation of Java Heap with AllocateHeapAt option. Has multiple sub-tests to cover different code paths.
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
- * @requires vm.bits == "64"
+ * @requires vm.bits == "64" & vm.gc != "Z"
  */
 
 import jdk.test.lib.JDKToolFinder;
--- a/test/hotspot/jtreg/gc/TestSoftReferencesBehaviorOnOOME.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/TestSoftReferencesBehaviorOnOOME.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @test TestSoftReferencesBehaviorOnOOME
  * @key gc
  * @summary Tests that all SoftReferences has been cleared at time of OOM.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 512 2k
--- a/test/hotspot/jtreg/gc/TestVerifyDuringStartup.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/TestVerifyDuringStartup.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @key gc
  * @bug 8010463 8011343 8011898
  * @summary Simple test run with -XX:+VerifyDuringStartup -XX:-UseTLAB to verify 8010463
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  */
--- a/test/hotspot/jtreg/gc/TestVerifySilently.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/TestVerifySilently.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @key gc
  * @bug 8032771
  * @summary Test silent verification.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  */
--- a/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java	Tue Jun 12 17:40:28 2018 +0200
@@ -43,6 +43,8 @@
                                                                   "-XX:-UseParallelGC",
                                                                   "-XX:-UseG1GC",
                                                                   "-XX:-UseConcMarkSweepGC",
+                                                                  "-XX:+UnlockExperimentalVMOptions",
+                                                                  "-XX:-UseZGC",
                                                                   "-version");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldMatch("Garbage collector not selected");
--- a/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @test TestMaxMinHeapFreeRatioFlags
  * @key gc
  * @summary Verify that heap size changes according to max and min heap free ratios.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,6 +26,7 @@
  * @key gc
  * @bug 8025166
  * @summary Verify that heap devided among generations according to NewRatio
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,6 +26,7 @@
  * @key gc
  * @bug 8025166
  * @summary Verify that young gen size conforms values specified by NewSize, MaxNewSize and Xmn options
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestShrinkHeapInSteps.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @test TestShrinkHeapInSteps
  * @key gc
  * @summary Verify that -XX:-ShrinkHeapInSteps works properly.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @test TestSurvivorRatioFlag
  * @key gc
  * @summary Verify that actual survivor ratio is equal to specified SurvivorRatio value
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java	Tue Jun 12 17:40:28 2018 +0200
@@ -27,6 +27,7 @@
  * @summary Verify that option TargetSurvivorRatio affects survivor space occupancy after minor GC.
  * @requires (vm.opt.ExplicitGCInvokesConcurrent == null) | (vm.opt.ExplicitGCInvokesConcurrent == false)
  * @requires (vm.opt.UseJVMCICompiler == null) | (vm.opt.UseJVMCICompiler == false)
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java	Tue Jun 12 17:40:28 2018 +0200
@@ -28,6 +28,7 @@
  * @summary Runs an simple application (GarbageProducer) with various
          combinations of -XX:{+|-}Verify{After|Before}GC flags and checks that
          output contain or doesn't contain expected patterns
+ * @requires vm.gc != "Z"
  * @modules java.base/jdk.internal.misc
  * @modules java.management
  * @library /test/lib
--- a/test/hotspot/jtreg/gc/logging/TestUnifiedLoggingSwitchStress.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/logging/TestUnifiedLoggingSwitchStress.java	Tue Jun 12 17:40:28 2018 +0200
@@ -42,6 +42,7 @@
  * @key gc stress
  * @summary Switches gc log level on fly while stressing memory/gc
  * @requires !vm.flightRecorder
+ * @requires vm.gc != "Z"
  * @library /test/lib /
  * @modules java.management java.base/jdk.internal.misc
  *
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithZ.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.io.IOException;
+
+/*
+ * @test TestGCBasherWithZ
+ * @key gc stress
+ * @requires vm.gc.Z
+ * @requires vm.flavor == "server" & !vm.emulatedClient
+ * @summary Stress ZGC
+ * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx384m -server -XX:+UnlockExperimentalVMOptions -XX:+UseZGC TestGCBasherWithZ 120000
+ */
+public class TestGCBasherWithZ {
+    public static void main(String[] args) throws IOException {
+        TestGCBasher.main(args);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithZ.java	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test TestGCOldWithZ
+ * @key gc
+ * @requires vm.gc.Z
+ * @summary Stress the Z
+ * @run main/othervm -Xmx384M -XX:+UnlockExperimentalVMOptions -XX:+UseZGC TestGCOldWithZ 50 1 20 10 10000
+ * @run main/othervm -Xmx256m -XX:+UnlockExperimentalVMOptions -XX:+UseZGC TestGCOldWithZ 50 5 20 1 5000
+ */
+public class TestGCOldWithZ {
+    public static void main(String[] args) {
+        TestGCOld.main(args);
+    }
+}
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestAllocationInEden.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,6 +26,7 @@
  * @bug 8031323
  * @summary Verify that object's alignment in eden space is not affected by
  *          SurvivorAlignmentInBytes option.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromEdenToTenured.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,6 +26,7 @@
  * @bug 8031323
  * @summary Verify that objects promoted from eden space to tenured space during
  *          full GC are not aligned to SurvivorAlignmentInBytes value.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,6 +26,7 @@
  * @bug 8031323
  * @summary Verify that objects promoted from survivor space to tenured space
  *          during full GC are not aligned to SurvivorAlignmentInBytes value.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java	Tue Jun 12 17:40:28 2018 +0200
@@ -27,6 +27,7 @@
  * @summary Verify that objects promoted from survivor space to tenured space
  *          when their age exceeded tenuring threshold are not aligned to
  *          SurvivorAlignmentInBytes value.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/survivorAlignment/TestPromotionToSurvivor.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,6 +26,7 @@
  * @bug 8031323
  * @summary Verify that objects promoted from eden space to survivor space after
  *          minor GC are aligned to SurvivorAlignmentInBytes.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/gc/whitebox/TestWBGC.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/gc/whitebox/TestWBGC.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @test TestWBGC
  * @bug 8055098
  * @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/runtime/Metaspace/PrintMetaspaceDcmd.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/runtime/Metaspace/PrintMetaspaceDcmd.java	Tue Jun 12 17:40:28 2018 +0200
@@ -30,6 +30,7 @@
  * @test
  * @key metaspace jcmd
  * @summary Test the VM.metaspace command
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/runtime/memory/LargePages/TestLargePagesFlags.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/runtime/memory/LargePages/TestLargePagesFlags.java	Tue Jun 12 17:40:28 2018 +0200
@@ -23,6 +23,7 @@
 
 /* @test TestLargePagesFlags
  * @summary Tests how large pages are choosen depending on the given large pages flag combinations.
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/test/hotspot/jtreg/serviceability/dcmd/gc/RunGCTest.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/dcmd/gc/RunGCTest.java	Tue Jun 12 17:40:28 2018 +0200
@@ -36,6 +36,7 @@
 /*
  * @test
  * @summary Test of diagnostic command GC.run
+ * @requires vm.gc != "Z"
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.compiler
--- a/test/hotspot/jtreg/serviceability/sa/TestUniverse.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/sa/TestUniverse.java	Tue Jun 12 17:40:28 2018 +0200
@@ -36,12 +36,25 @@
 /*
  * @test
  * @summary Test the 'universe' command of jhsdb clhsdb.
+ * @requires vm.gc != "Z"
  * @bug 8190307
  * @library /test/lib
  * @build jdk.test.lib.apps.*
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. TestUniverse
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. TestUniverse withoutZ
+ */
+
+/*
+ * @test
+ * @summary Test the 'universe' command of jhsdb clhsdb.
+ * @requires vm.gc == "Z"
+ * @bug 8190307
+ * @library /test/lib
+ * @build jdk.test.lib.apps.*
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. TestUniverse withZ
  */
 
 public class TestUniverse {
@@ -84,6 +97,9 @@
             p.destroyForcibly();
             throw new Error("Problem awaiting the child process: " + ie, ie);
         }
+        if (gc.contains("UseZGC")) {
+            output.shouldContain("ZHeap");
+        }
 
         output.shouldHaveExitValue(0);
         System.out.println(output.getOutput());
@@ -142,8 +158,11 @@
             test("-XX:+UseG1GC");
             test("-XX:+UseParallelGC");
             test("-XX:+UseSerialGC");
-            if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
-              test("-XX:+UseConcMarkSweepGC");
+            if (!Compiler.isGraalEnabled()) { // Graal does not support all GCs
+                test("-XX:+UseConcMarkSweepGC");
+                if (args[0].equals("withZ")) {
+                    test("-XX:+UseZGC");
+                }
             }
             test("-XX:+UseEpsilonGC");
         } catch (Exception e) {
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCapacityTest.java	Tue Jun 12 17:40:28 2018 +0200
@@ -27,6 +27,7 @@
  * @test
  * @summary Test checks the consistency of the output
  * displayed with jstat -gccapacity.
+ * @requires vm.gc != "Z"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest01.java	Tue Jun 12 17:40:28 2018 +0200
@@ -32,6 +32,7 @@
  * @library /test/lib
  * @library ../share
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
+ * @requires vm.gc != "Z"
  * @run main/othervm -XX:+UsePerfData -Xmx128M GcCauseTest01
  */
 import utils.*;
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest02.java	Tue Jun 12 17:40:28 2018 +0200
@@ -28,6 +28,7 @@
  *          test forces debuggee application eat ~70% of heap and runs jstat.
  *          jstat should show actual usage of old gen (OC/OU ~= old gen usage).
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
+ * @requires vm.gc != "Z"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcCauseTest03.java	Tue Jun 12 17:40:28 2018 +0200
@@ -27,6 +27,7 @@
  *          Test scenario:
  *          test forces debuggee application call System.gc(), runs jstat and checks that
  *          cause of last garbage collection displayed by jstat (LGCC) is 'System.gc()'.
+ * @requires vm.gc != "Z"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcNewTest.java	Tue Jun 12 17:40:28 2018 +0200
@@ -29,6 +29,7 @@
  *          test several times provokes garbage collection in the debuggee application and after each garbage
  *          collection runs jstat. jstat should show that after garbage collection number of GC events and garbage
  *          collection time increase.
+ * @requires vm.gc != "Z"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest01.java	Tue Jun 12 17:40:28 2018 +0200
@@ -35,6 +35,7 @@
  * @library /test/lib
  * @library ../share
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
+ * @requires vm.gc != "Z"
  * @run main/othervm -XX:+UsePerfData -Xmx128M GcTest01
  */
 import utils.*;
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/GcTest02.java	Tue Jun 12 17:40:28 2018 +0200
@@ -28,6 +28,7 @@
  *          test forces debuggee application eat ~70% of heap and runs jstat.
  *          jstat should show actual usage of old gen (OC/OU ~= old gen usage).
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
+ * @requires vm.gc != "Z"
  * @modules java.base/jdk.internal.misc
  * @library /test/lib
  * @library ../share
--- a/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/CompressedClassSpaceSize/TestDescription.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/CompressedClassSpaceSize/TestDescription.java	Tue Jun 12 17:40:28 2018 +0200
@@ -29,6 +29,7 @@
  * VM Testbase keywords: [monitoring, quarantine]
  * VM Testbase comments: JDK-8058967
  *
+ * @requires vm.opt.final.ClassUnloading
  * @library /vmTestbase /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
  * @run main/othervm
--- a/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowTest/ShrinkGrowTest.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/shrink_grow/ShrinkGrowTest/ShrinkGrowTest.java	Tue Jun 12 17:40:28 2018 +0200
@@ -27,6 +27,7 @@
  *
  * @summary converted from VM Testbase metaspace/shrink_grow/ShrinkGrowTest.
  *
+ * @requires vm.opt.final.ClassUnloading
  * @library /vmTestbase /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
  * @run main/othervm
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdi/ObjectReference/referringObjects/referringObjects001/referringObjects001.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdi/ObjectReference/referringObjects/referringObjects001/referringObjects001.java	Tue Jun 12 17:40:28 2018 +0200
@@ -51,6 +51,7 @@
  *         create references of all possible types to single object, ObjectReference.referringObjects should return only
  *         referrers with supported type(Strong, PhantomReference, SoftReference, WeakReference)
  *
+ * @requires vm.gc != "Z"
  * @library /vmTestbase
  *          /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
@@ -151,6 +152,11 @@
         else
             expectedInstanceCount = 0;
 
+        // Note! This test is broken, in the sense that it incorrectly assumes
+        // that no GC can happen before it walks the heap. In practice, it seems
+        // to only affect this test when using ZGC. However, this test will also
+        // fail when using other GCs if an explicit GC is done here.
+
         checkDebugeeAnswer_instanceCounts(className, expectedInstanceCount, objectsToFilter);
         checkDebugeeAnswer_instances_referringObjects(objectsToFilter, className, expectedInstanceCount, includedInReferrersCount, referrerCount);
 
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdi/ReferenceType/instances/instances003/instances003.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdi/ReferenceType/instances/instances003/instances003.java	Tue Jun 12 17:40:28 2018 +0200
@@ -41,6 +41,7 @@
  *         done
  *         Test is executed for following sublcasses of ObjectReference: StringReference, ThreadReference, ClassLoaderReference
  *
+ * @requires vm.gc != "Z"
  * @library /vmTestbase
  *          /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
@@ -125,6 +126,11 @@
             ":" + referrerCount + ":" + referrerType +
             (referrerType.equals(ObjectInstancesManager.WEAK_REFERENCE) ? "|" + ObjectInstancesManager.STRONG_REFERENCE : ""));
 
+        // Note! This test is broken, in the sense that it incorrectly assumes
+        // that no GC can happen before it walks the heap. In practice, it seems
+        // to only affect this test when using ZGC. However, this test will also
+        // fail when using other GCs if an explicit GC is done here.
+
         // the instance counts should not be affected by creating multiple references
         checkDebugeeAnswer_instanceCounts(className, createInstanceCount, objectsToFilter);
 
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdwp/ReferenceType/Instances/instances001/instances001.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdwp/ReferenceType/Instances/instances001/instances001.java	Tue Jun 12 17:40:28 2018 +0200
@@ -60,6 +60,7 @@
  *     Finally, debugger sends debuggee signal to quit, waits for it exits
  *     and exits too with the proper exit code.
  *
+ * @requires vm.gc != "Z"
  * @library /vmTestbase /test/hotspot/jtreg/vmTestbase
  *          /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
@@ -156,6 +157,10 @@
 
         long typeID = debuggee.getReferenceTypeID(createTypeSignature(testClassName));
 
+        // Note! This test is broken, in the sense that it incorrectly assumes
+        // that no GC can happen before it walks the heap. In practice, it seems
+        // to only affect this test when using ZGC. However, this test will also
+        // fail when using other GCs if an explicit GC is done here.
 
         // create command with maxInstances=1, only 1 instance should be returned
         testClass(typeID, 1, 1, false, 0);
--- a/test/hotspot/jtreg/vmTestbase/nsk/jdwp/VirtualMachine/InstanceCounts/instanceCounts001/instanceCounts001.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jdwp/VirtualMachine/InstanceCounts/instanceCounts001/instanceCounts001.java	Tue Jun 12 17:40:28 2018 +0200
@@ -61,6 +61,7 @@
  *     Finally, debugger sends debuggee signal to quit, waits for it exits
  *     and exits too with the proper exit code.
  *
+ * @requires vm.gc != "Z"
  * @library /vmTestbase /test/hotspot/jtreg/vmTestbase
  *          /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
@@ -164,6 +165,11 @@
         if (!isDebuggeeReady())
             return;
 
+        // Note! This test is broken, in the sense that it incorrectly assumes
+        // that no GC can happen before it walks the heap. In practice, it seems
+        // to only affect this test when using ZGC. However, this test will also
+        // fail when using other GCs if an explicit GC is done here.
+
         int expectedCount = instanceCounts001a.expectedCount;
 
         String classNames[];
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java	Tue Jun 12 17:40:28 2018 +0200
@@ -69,9 +69,10 @@
         Boolean isExplicitGCInvokesConcurrentOn = wb.getBooleanVMFlag("ExplicitGCInvokesConcurrent");
         Boolean isUseG1GCon = wb.getBooleanVMFlag("UseG1GC");
         Boolean isUseConcMarkSweepGCon = wb.getBooleanVMFlag("UseConcMarkSweepGC");
+        Boolean isUseZGCon = wb.getBooleanVMFlag("UseZGC");
 
         String keyPhrase;
-        if (isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) {
+        if ((isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) || isUseZGCon) {
             keyPhrase = "GC";
         } else {
             keyPhrase = "Pause Full";
--- a/test/jdk/TEST.ROOT	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/jdk/TEST.ROOT	Tue Jun 12 17:40:28 2018 +0200
@@ -37,6 +37,7 @@
 requires.properties= \
     sun.arch.data.model \
     java.runtime.name \
+    vm.gc.Z \
     vm.graal.enabled \
     vm.cds
 
--- a/test/jdk/com/sun/jdi/OomDebugTest.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/jdk/com/sun/jdi/OomDebugTest.java	Tue Jun 12 17:40:28 2018 +0200
@@ -29,6 +29,7 @@
  *
  *  @author Severin Gehwolf <sgehwolf@redhat.com>
  *
+ *  @requires vm.gc != "Z"
  *  @library ..
  *  @run build TestScaffold VMConnection TargetListener TargetAdapter
  *  @run compile -g OomDebugTest.java
--- a/test/jdk/com/sun/management/OperatingSystemMXBean/GetCommittedVirtualMemorySize.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/jdk/com/sun/management/OperatingSystemMXBean/GetCommittedVirtualMemorySize.java	Tue Jun 12 17:40:28 2018 +0200
@@ -25,6 +25,7 @@
  * @test
  * @bug     4858522 6191542
  * @summary Basic unit test of OperatingSystemMXBean.getCommittedVirtualMemorySize()
+ * @requires vm.gc != "Z"
  * @author  Steve Bohne
  */
 
--- a/test/jdk/java/lang/management/ManagementFactory/MXBeanException.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/jdk/java/lang/management/ManagementFactory/MXBeanException.java	Tue Jun 12 17:40:28 2018 +0200
@@ -27,6 +27,7 @@
  * @summary Check if a RuntimeException is wrapped by RuntimeMBeanException
  *          only once.
  *
+ * @requires vm.gc != "Z"
  * @author  Mandy Chung
  *
  * @build MXBeanException
--- a/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java	Tue Jun 12 17:40:28 2018 +0200
@@ -26,6 +26,7 @@
  * @bug     4530538
  * @summary Basic unit test of MemoryMXBean.getMemoryPools() and
  *          MemoryMXBean.getMemoryManager().
+ * @requires vm.gc != "Z"
  * @author  Mandy Chung
  *
  * @modules jdk.management
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/lang/management/MemoryMXBean/MemoryTestZGC.sh	Tue Jun 12 17:40:28 2018 +0200
@@ -0,0 +1,50 @@
+#
+# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+#
+# @test
+# @requires vm.gc.Z
+# @run compile MemoryTest.java
+# @run shell MemoryTestZGC.sh
+#
+
+#Set appropriate jdk
+
+if [ ! -z "${TESTJAVA}" ] ; then
+     jdk="$TESTJAVA"
+else
+     echo "--Error: TESTJAVA must be defined as the pathname of a jdk to test."
+     exit 1
+fi
+
+runOne()
+{
+   echo "runOne $@"
+   $TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES $@ || exit 2
+}
+
+# Test MemoryTest with ZGC. ZGC is a single generation GC, which means
+# it has one memory manager and one memory pool.
+runOne -XX:+UnlockExperimentalVMOptions -XX:+UseZGC MemoryTest 1 1
+
+exit 0
--- a/test/lib/sun/hotspot/gc/GC.java	Tue Jun 12 07:52:30 2018 -0700
+++ b/test/lib/sun/hotspot/gc/GC.java	Tue Jun 12 17:40:28 2018 +0200
@@ -37,7 +37,8 @@
     Parallel(2),
     ConcMarkSweep(3),
     G1(4),
-    Epsilon(5);
+    Epsilon(5),
+    Z(6);
 
     private static final WhiteBox WB = WhiteBox.getWhiteBox();