8214897: ZGC: Concurrent Class Unloading
authoreosterlund
Tue, 11 Dec 2018 11:08:39 +0100
changeset 52939 9a8585f60c32
parent 52938 5ff7480c9e28
child 52940 26e2cfebcfba
8214897: ZGC: Concurrent Class Unloading Reviewed-by: pliden Contributed-by: erik.osterlund@oracle.com, per.liden@oracle.com, stefan.karlsson@oracle.com
src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp
src/hotspot/share/gc/z/zArguments.cpp
src/hotspot/share/gc/z/zBarrier.hpp
src/hotspot/share/gc/z/zBarrier.inline.hpp
src/hotspot/share/gc/z/zBarrierSet.cpp
src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
src/hotspot/share/gc/z/zBarrierSetNMethod.hpp
src/hotspot/share/gc/z/zCollectedHeap.cpp
src/hotspot/share/gc/z/zGlobals.hpp
src/hotspot/share/gc/z/zHeap.cpp
src/hotspot/share/gc/z/zHeap.hpp
src/hotspot/share/gc/z/zMark.cpp
src/hotspot/share/gc/z/zNMethodTable.cpp
src/hotspot/share/gc/z/zNMethodTable.hpp
src/hotspot/share/gc/z/zNMethodTableEntry.hpp
src/hotspot/share/gc/z/zOopClosures.hpp
src/hotspot/share/gc/z/zOopClosures.inline.hpp
src/hotspot/share/gc/z/zRootsIterator.cpp
src/hotspot/share/gc/z/zRootsIterator.hpp
src/hotspot/share/gc/z/zThreadLocalData.hpp
src/hotspot/share/gc/z/zUnload.cpp
src/hotspot/share/gc/z/zUnload.hpp
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -74,15 +74,17 @@
 //  * 63-47 Fixed (17-bits, always zero)
 //
 
-const size_t    ZPlatformPageSizeSmallShift   = 21; // 2M
+const size_t    ZPlatformPageSizeSmallShift    = 21; // 2M
 
-const size_t    ZPlatformAddressOffsetBits    = 42; // 4TB
+const size_t    ZPlatformAddressOffsetBits     = 42; // 4TB
+
+const uintptr_t ZPlatformAddressMetadataShift  = ZPlatformAddressOffsetBits;
 
-const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
+const uintptr_t ZPlatformAddressSpaceStart     = (uintptr_t)1 << ZPlatformAddressOffsetBits;
+const uintptr_t ZPlatformAddressSpaceSize      = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
 
-const uintptr_t ZPlatformAddressSpaceStart    = (uintptr_t)1 << ZPlatformAddressOffsetBits;
-const uintptr_t ZPlatformAddressSpaceSize     = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
+const size_t    ZPlatformNMethodDisarmedOffset = 4;
 
-const size_t    ZPlatformCacheLineSize        = 64;
+const size_t    ZPlatformCacheLineSize         = 64;
 
 #endif // OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
--- a/src/hotspot/share/gc/z/zArguments.cpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zArguments.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -80,10 +80,6 @@
   FLAG_SET_DEFAULT(UseCompressedOops, false);
   FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
 
-  // ClassUnloading not (yet) supported
-  FLAG_SET_DEFAULT(ClassUnloading, false);
-  FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
-
   // Verification before startup and after exit not (yet) supported
   FLAG_SET_DEFAULT(VerifyDuringStartup, false);
   FLAG_SET_DEFAULT(VerifyBeforeExit, false);
--- a/src/hotspot/share/gc/z/zBarrier.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zBarrier.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -81,6 +81,7 @@
   static void load_barrier_on_oop_fields(oop o);
   static  oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o);
   static  oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o);
+  static void load_barrier_on_root_oop_field(oop* p);
 
   // Weak load barrier
   static oop weak_load_barrier_on_oop_field(volatile oop* p);
@@ -99,6 +100,7 @@
   // Keep alive barrier
   static void keep_alive_barrier_on_weak_oop_field(volatile oop* p);
   static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p);
+  static void keep_alive_barrier_on_phantom_root_oop_field(oop* p);
 
   // Mark barrier
   static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -111,11 +111,12 @@
   const uintptr_t good_addr = slow_path(addr);
 
   // Non-atomic healing helps speed up root scanning. This is safe to do
-  // since we are always healing roots in a safepoint, which means we are
-  // never racing with mutators modifying roots while we are healing them.
-  // It's also safe in case multiple GC threads try to heal the same root,
-  // since they would always heal the root in the same way and it does not
-  // matter in which order it happens.
+  // since we are always healing roots in a safepoint, or under a lock,
+  // which ensures we are never racing with mutators modifying roots while
+  // we are healing them. It's also safe in case multiple GC threads try
+  // to heal the same root if it is aligned, since they would always heal
+  // the root in the same way and it does not matter in which order it
+  // happens. For misaligned oops, there needs to be mutual exclusion.
   *p = ZOop::to_oop(good_addr);
 }
 
@@ -188,6 +189,11 @@
   return load_barrier_on_oop_field_preloaded(p, o);
 }
 
+inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
+  const oop o = *p;
+  root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
+}
+
 //
 // Weak load barrier
 //
@@ -269,6 +275,13 @@
   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 }
 
+inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
+  // This operation is only valid when resurrection is blocked.
+  assert(ZResurrection::is_blocked(), "Invalid phase");
+  const oop o = *p;
+  root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
+}
+
 //
 // Mark barrier
 //
--- a/src/hotspot/share/gc/z/zBarrierSet.cpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -24,6 +24,7 @@
 #include "precompiled.hpp"
 #include "gc/z/zBarrierSet.hpp"
 #include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zBarrierSetNMethod.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zHeap.inline.hpp"
 #include "gc/z/zThreadLocalData.hpp"
@@ -39,11 +40,20 @@
 class ZBarrierSetC1;
 class ZBarrierSetC2;
 
+static BarrierSetNMethod* make_barrier_set_nmethod() {
+  // NMethod barriers are only used when class unloading is enabled
+  if (!ClassUnloading) {
+    return NULL;
+  }
+
+  return new ZBarrierSetNMethod();
+}
+
 ZBarrierSet::ZBarrierSet() :
     BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
                make_barrier_set_c1<ZBarrierSetC1>(),
                make_barrier_set_c2<ZBarrierSetC2>(),
-               NULL /* barrier_set_nmethod */,
+               make_barrier_set_nmethod(),
                BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
 
 ZBarrierSetAssembler* ZBarrierSet::assembler() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "code/nmethod.hpp"
+#include "gc/z/zBarrierSetNMethod.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zOopClosures.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "logging/log.hpp"
+
+bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
+  ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
+  log_trace(nmethod, barrier)("Entered critical zone for %p", nm);
+
+  if (!is_armed(nm)) {
+    // Some other thread got here first and healed the oops
+    // and disarmed the nmethod.
+    return true;
+  }
+
+  if (nm->is_unloading()) {
+    // We can end up calling nmethods that are unloading
+    // since we clear compiled ICs lazily. Returning false
+    // will re-resovle the call and update the compiled IC.
+    return false;
+  }
+
+  // Heal oops and disarm
+  ZNMethodOopClosure cl;
+  nm->oops_do(&cl);
+  nm->fix_oop_relocations();
+
+  OrderAccess::release();
+
+  disarm(nm);
+
+  return true;
+}
+
+int ZBarrierSetNMethod::disarmed_value() const {
+  // We override the default BarrierSetNMethod::disarmed_value() since
+  // this can be called by GC threads, which doesn't keep an up to date
+  // address_bad_mask.
+  const uintptr_t disarmed_addr = ((uintptr_t)&ZAddressBadMask) + ZNMethodDisarmedOffset;
+  return *((int*)disarmed_addr);
+}
+
+ByteSize ZBarrierSetNMethod::thread_disarmed_offset() const {
+  return ZThreadLocalData::nmethod_disarmed_offset();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZBARRIERSETNMETHOD_HPP
+#define SHARE_GC_Z_ZBARRIERSETNMETHOD_HPP
+
+#include "gc/shared/barrierSetNMethod.hpp"
+#include "memory/allocation.hpp"
+
+class nmethod;
+
+class ZBarrierSetNMethod : public BarrierSetNMethod {
+protected:
+  virtual int disarmed_value() const;
+  virtual bool nmethod_entry_barrier(nmethod* nm);
+
+public:
+  virtual ByteSize thread_disarmed_offset() const;
+};
+
+#endif // SHARE_GC_Z_ZBARRIERSETNMETHOD_HPP
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -259,12 +259,10 @@
 }
 
 void ZCollectedHeap::register_nmethod(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
   ZNMethodTable::register_nmethod(nm);
 }
 
 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
   ZNMethodTable::unregister_nmethod(nm);
 }
 
--- a/src/hotspot/share/gc/z/zGlobals.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zGlobals.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -91,6 +91,9 @@
 const uintptr_t   ZAddressSpaceSize             = ZPlatformAddressSpaceSize;
 const uintptr_t   ZAddressSpaceEnd              = ZAddressSpaceStart + ZAddressSpaceSize;
 
+// NMethod entry barrier
+const size_t      ZNMethodDisarmedOffset        = ZPlatformNMethodDisarmedOffset;
+
 // Cache line size
 const size_t      ZCacheLineSize                = ZPlatformCacheLineSize;
 
--- a/src/hotspot/share/gc/z/zHeap.cpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zHeap.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -69,6 +69,7 @@
     _weak_roots_processor(&_workers),
     _relocate(&_workers),
     _relocation_set(),
+    _unload(&_workers),
     _serviceability(heap_min_size(), heap_max_size()) {
   // Install global heap instance
   assert(_heap == NULL, "Already initialized");
@@ -353,9 +354,6 @@
   // Enter mark completed phase
   ZGlobalPhase = ZPhaseMarkCompleted;
 
-  // Resize metaspace
-  MetaspaceGC::compute_new_size();
-
   // Update statistics
   ZStatSample(ZSamplerHeapUsedAfterMark, used());
   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
@@ -366,6 +364,9 @@
   // Process weak roots
   _weak_roots_processor.process_weak_roots();
 
+  // Prepare to unload unused classes and code
+  _unload.prepare();
+
   return true;
 }
 
@@ -380,6 +381,9 @@
   // Process concurrent weak roots
   _weak_roots_processor.process_concurrent_weak_roots();
 
+  // Unload unused classes and code
+  _unload.unload();
+
   // Unblock resurrection of weak/phantom references
   ZResurrection::unblock();
 
@@ -463,8 +467,8 @@
 void ZHeap::relocate_start() {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 
-  // Update statistics
-  ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
+  // Finish unloading of classes and code
+  _unload.finish();
 
   // Flip address view
   ZAddressMasks::flip_to_remapped();
@@ -474,6 +478,7 @@
   ZGlobalPhase = ZPhaseRelocate;
 
   // Update statistics
+  ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 
   // Remap/Relocate roots
--- a/src/hotspot/share/gc/z/zHeap.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zHeap.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -41,6 +41,7 @@
 #include "gc/z/zRootsIterator.hpp"
 #include "gc/z/zWeakRootsProcessor.hpp"
 #include "gc/z/zServiceability.hpp"
+#include "gc/z/zUnload.hpp"
 #include "gc/z/zWorkers.hpp"
 #include "memory/allocation.hpp"
 
@@ -59,6 +60,7 @@
   ZWeakRootsProcessor _weak_roots_processor;
   ZRelocate           _relocate;
   ZRelocationSet      _relocation_set;
+  ZUnload             _unload;
   ZServiceability     _serviceability;
 
   size_t heap_min_size() const;
--- a/src/hotspot/share/gc/z/zMark.cpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zMark.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -287,6 +287,14 @@
 }
 
 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
+  if (finalizable) {
+    ZMarkBarrierOopClosure<true /* finalizable */> cl;
+    cl.do_klass(obj->klass());
+  } else {
+    ZMarkBarrierOopClosure<false /* finalizable */> cl;
+    cl.do_klass(obj->klass());
+  }
+
   const uintptr_t addr = (uintptr_t)obj->base();
   const size_t size = (size_t)obj->length() * oopSize;
 
--- a/src/hotspot/share/gc/z/zNMethodTable.cpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zNMethodTable.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -23,45 +23,62 @@
 
 #include "precompiled.hpp"
 #include "code/relocInfo.hpp"
-#include "code/nativeInst.hpp"
 #include "code/nmethod.hpp"
+#include "code/icBuffer.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetNMethod.hpp"
+#include "gc/z/zArray.inline.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zHash.inline.hpp"
+#include "gc/z/zLock.inline.hpp"
 #include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zWorkers.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
-#include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
 #include "utilities/debug.hpp"
 
-class ZNMethodWithImmediateOops {
+class ZNMethodDataImmediateOops {
 private:
-  nmethod* const _nm;
-  const size_t   _nimmediate_oops;
+  const size_t _nimmediate_oops;
 
   static size_t header_size();
 
-  ZNMethodWithImmediateOops(nmethod* nm, const GrowableArray<oop*>& immediate_oops);
+  ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops);
 
 public:
-  static ZNMethodWithImmediateOops* create(nmethod* nm, const GrowableArray<oop*>& immediate_oops);
-  static void destroy(ZNMethodWithImmediateOops* nmi);
+  static ZNMethodDataImmediateOops* create(const GrowableArray<oop*>& immediate_oops);
+  static void destroy(ZNMethodDataImmediateOops* data_immediate_oops);
 
-  nmethod* method() const;
   size_t immediate_oops_count() const;
   oop** immediate_oops_begin() const;
   oop** immediate_oops_end() const;
 };
 
-size_t ZNMethodWithImmediateOops::header_size() {
-  const size_t size = sizeof(ZNMethodWithImmediateOops);
+size_t ZNMethodDataImmediateOops::header_size() {
+  const size_t size = sizeof(ZNMethodDataImmediateOops);
   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
   return size;
 }
 
-ZNMethodWithImmediateOops::ZNMethodWithImmediateOops(nmethod* nm, const GrowableArray<oop*>& immediate_oops) :
-    _nm(nm),
+ZNMethodDataImmediateOops* ZNMethodDataImmediateOops::create(const GrowableArray<oop*>& immediate_oops) {
+  // Allocate memory for the ZNMethodDataImmediateOops object
+  // plus the immediate oop* array that follows right after.
+  const size_t size = ZNMethodDataImmediateOops::header_size() + (sizeof(oop*) * immediate_oops.length());
+  void* const data_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
+  return ::new (data_immediate_oops) ZNMethodDataImmediateOops(immediate_oops);
+}
+
+void ZNMethodDataImmediateOops::destroy(ZNMethodDataImmediateOops* data_immediate_oops) {
+  ZNMethodTable::safe_delete(data_immediate_oops);
+}
+
+ZNMethodDataImmediateOops::ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops) :
     _nimmediate_oops(immediate_oops.length()) {
   // Save all immediate oops
   for (size_t i = 0; i < _nimmediate_oops; i++) {
@@ -69,41 +86,97 @@
   }
 }
 
-ZNMethodWithImmediateOops* ZNMethodWithImmediateOops::create(nmethod* nm, const GrowableArray<oop*>& immediate_oops) {
-  // Allocate memory for the ZNMethodWithImmediateOops object
-  // plus the immediate oop* array that follows right after.
-  const size_t size = header_size() + (sizeof(oop*) * immediate_oops.length());
-  void* const method_with_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
-  return ::new (method_with_immediate_oops) ZNMethodWithImmediateOops(nm, immediate_oops);
-}
-
-void ZNMethodWithImmediateOops::destroy(ZNMethodWithImmediateOops* nmi) {
-  FREE_C_HEAP_ARRAY(uint8_t, nmi);
-}
-
-nmethod* ZNMethodWithImmediateOops::method() const {
-  return _nm;
-}
-
-size_t ZNMethodWithImmediateOops::immediate_oops_count() const {
+size_t ZNMethodDataImmediateOops::immediate_oops_count() const {
   return _nimmediate_oops;
 }
 
-oop** ZNMethodWithImmediateOops::immediate_oops_begin() const {
+oop** ZNMethodDataImmediateOops::immediate_oops_begin() const {
   // The immediate oop* array starts immediately after this object
   return (oop**)((uintptr_t)this + header_size());
 }
 
-oop** ZNMethodWithImmediateOops::immediate_oops_end() const {
+oop** ZNMethodDataImmediateOops::immediate_oops_end() const {
   return immediate_oops_begin() + immediate_oops_count();
 }
 
+class ZNMethodData {
+private:
+  ZReentrantLock                      _lock;
+  ZNMethodDataImmediateOops* volatile _immediate_oops;
+
+  ZNMethodData(nmethod* nm);
+
+public:
+  static ZNMethodData* create(nmethod* nm);
+  static void destroy(ZNMethodData* data);
+
+  ZReentrantLock* lock();
+
+  ZNMethodDataImmediateOops* immediate_oops() const;
+  ZNMethodDataImmediateOops* swap_immediate_oops(const GrowableArray<oop*>& immediate_oops);
+};
+
+ZNMethodData* ZNMethodData::create(nmethod* nm) {
+  void* const method = NEW_C_HEAP_ARRAY(uint8_t, sizeof(ZNMethodData), mtGC);
+  return ::new (method) ZNMethodData(nm);
+}
+
+void ZNMethodData::destroy(ZNMethodData* data) {
+  ZNMethodDataImmediateOops::destroy(data->immediate_oops());
+  ZNMethodTable::safe_delete(data);
+}
+
+ZNMethodData::ZNMethodData(nmethod* nm) :
+    _lock(),
+    _immediate_oops(NULL) {}
+
+ZReentrantLock* ZNMethodData::lock() {
+  return &_lock;
+}
+
+ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
+  return OrderAccess::load_acquire(&_immediate_oops);
+}
+
+ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
+  ZNMethodDataImmediateOops* const data_immediate_oops =
+    immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
+  return Atomic::xchg(data_immediate_oops, &_immediate_oops);
+}
+
+static ZNMethodData* gc_data(const nmethod* nm) {
+  return nm->gc_data<ZNMethodData>();
+}
+
+static void set_gc_data(nmethod* nm, ZNMethodData* data) {
+  return nm->set_gc_data<ZNMethodData>(data);
+}
+
 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 size_t ZNMethodTable::_size = 0;
+ZLock ZNMethodTable::_iter_lock;
+ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
+size_t ZNMethodTable::_iter_table_size = 0;
+ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 size_t ZNMethodTable::_nregistered = 0;
 size_t ZNMethodTable::_nunregistered = 0;
 volatile size_t ZNMethodTable::_claimed = 0;
 
+void ZNMethodTable::safe_delete(void* data) {
+  if (data == NULL) {
+    return;
+  }
+
+  ZLocker<ZLock> locker(&_iter_lock);
+  if (_iter_table != NULL) {
+    // Iteration in progress, defer delete
+    _iter_deferred_deletes.add(data);
+  } else {
+    // Iteration not in progress, delete now
+    FREE_C_HEAP_ARRAY(uint8_t, data);
+  }
+}
+
 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
   GrowableArray<oop*> immediate_oops;
   bool non_immediate_oops = false;
@@ -132,29 +205,27 @@
     }
   }
 
-  // oops_count() returns the number of oops in the oop table plus one
-  if (immediate_oops.is_empty() && nm->oops_count() == 1) {
-    // No oops found, return empty entry
-    return ZNMethodTableEntry();
-  }
-
-  if (immediate_oops.is_empty()) {
-    // No immediate oops found, return entry without immediate oops
-    return ZNMethodTableEntry(nm, non_immediate_oops);
+  // Attach GC data to nmethod
+  ZNMethodData* data = gc_data(nm);
+  if (data == NULL) {
+    data = ZNMethodData::create(nm);
+    set_gc_data(nm, data);
   }
 
-  // Return entry with immediate oops
-  return ZNMethodTableEntry(ZNMethodWithImmediateOops::create(nm, immediate_oops), non_immediate_oops);
+  // Attach immediate oops in GC data
+  ZNMethodDataImmediateOops* const old_data_immediate_oops = data->swap_immediate_oops(immediate_oops);
+  ZNMethodDataImmediateOops::destroy(old_data_immediate_oops);
+
+  // Create entry
+  return ZNMethodTableEntry(nm, non_immediate_oops, !immediate_oops.is_empty());
 }
 
-void ZNMethodTable::destroy_entry(ZNMethodTableEntry entry) {
-  if (entry.immediate_oops()) {
-    ZNMethodWithImmediateOops::destroy(entry.method_with_immediate_oops());
+ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
+  ZNMethodData* const data = gc_data(nm);
+  if (data == NULL) {
+    return NULL;
   }
-}
-
-nmethod* ZNMethodTable::method(ZNMethodTableEntry entry) {
-  return entry.immediate_oops() ? entry.method_with_immediate_oops()->method() : entry.method();
+  return data->lock();
 }
 
 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
@@ -171,7 +242,7 @@
 }
 
 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
-  const nmethod* const nm = method(entry);
+  const nmethod* const nm = entry.method();
   size_t index = first_index(nm, size);
 
   for (;;) {
@@ -183,9 +254,8 @@
       return true;
     }
 
-    if (table_entry.registered() && method(table_entry) == nm) {
+    if (table_entry.registered() && table_entry.method() == nm) {
       // Replace existing entry
-      destroy_entry(table_entry);
       table[index] = entry;
       return false;
     }
@@ -194,7 +264,7 @@
   }
 }
 
-bool ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, const nmethod* nm) {
+bool ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
   if (size == 0) {
     // Table is empty
     return false;
@@ -210,10 +280,13 @@
       return false;
     }
 
-    if (table_entry.registered() && method(table_entry) == nm) {
+    if (table_entry.registered() && table_entry.method() == nm) {
       // Remove entry
-      destroy_entry(table_entry);
       table[index] = ZNMethodTableEntry(true /* unregistered */);
+
+      // Destroy GC data
+      ZNMethodData::destroy(gc_data(nm));
+      set_gc_data(nm, NULL);
       return true;
     }
 
@@ -222,6 +295,7 @@
 }
 
 void ZNMethodTable::rebuild(size_t new_size) {
+  ZLocker<ZLock> locker(&_iter_lock);
   assert(is_power_of_2(new_size), "Invalid size");
 
   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
@@ -243,8 +317,10 @@
     }
   }
 
-  // Delete old table
-  delete [] _table;
+  if (_iter_table != _table) {
+    // Delete old table
+    delete [] _table;
+  }
 
   // Install new table
   _table = new_table;
@@ -294,8 +370,8 @@
             p2i(nm),
             nm->compiler_name(),
             nm->oops_count() - 1,
-            entry.immediate_oops() ? entry.method_with_immediate_oops()->immediate_oops_count() : 0,
-            BOOL_TO_STR(entry.non_immediate_oops()));
+            entry.immediate_oops() ? gc_data(nm)->immediate_oops()->immediate_oops_count() : 0,
+            entry.non_immediate_oops() ? "Yes" : "No");
 
   LogTarget(Trace, gc, nmethod, oops) log_oops;
   if (!log_oops.is_enabled()) {
@@ -312,12 +388,14 @@
 
   if (entry.immediate_oops()) {
     // Print nmethod immediate oops
-    const ZNMethodWithImmediateOops* const nmi = entry.method_with_immediate_oops();
-    oop** const begin = nmi->immediate_oops_begin();
-    oop** const end = nmi->immediate_oops_end();
-    for (oop** p = begin; p < end; p++) {
-      log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
-                     (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
+    const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
+    if (nmi != NULL) {
+      oop** const begin = nmi->immediate_oops_begin();
+      oop** const end = nmi->immediate_oops_end();
+      for (oop** p = begin; p < end; p++) {
+        log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
+                       (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
+      }
     }
   }
 }
@@ -343,21 +421,17 @@
 }
 
 void ZNMethodTable::register_nmethod(nmethod* nm) {
+  assert(CodeCache_lock->owned_by_self(), "Lock must be held");
   ResourceMark rm;
 
+  // Grow/Shrink/Prune table if needed
+  rebuild_if_needed();
+
   // Create entry
   const ZNMethodTableEntry entry = create_entry(nm);
 
   log_register(nm, entry);
 
-  if (!entry.registered()) {
-    // Method doesn't have any oops, ignore it
-    return;
-  }
-
-  // Grow/Shrink/Prune table if needed
-  rebuild_if_needed();
-
   // Insert new entry
   if (register_entry(_table, _size, entry)) {
     // New entry registered. When register_entry() instead returns
@@ -365,11 +439,31 @@
     // to increase number of registered entries in that case.
     _nregistered++;
   }
+
+  // Disarm nmethod entry barrier
+  disarm_nmethod(nm);
+}
+
+void ZNMethodTable::sweeper_wait_for_iteration() {
+  // The sweeper must wait for any ongoing iteration to complete
+  // before it can unregister an nmethod.
+  if (!Thread::current()->is_Code_cache_sweeper_thread()) {
+    return;
+  }
+
+  assert(CodeCache_lock->owned_by_self(), "Lock must be held");
+
+  while (_iter_table != NULL) {
+    MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    os::naked_short_sleep(1);
+  }
 }
 
 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
   ResourceMark rm;
 
+  sweeper_wait_for_iteration();
+
   log_unregister(nm);
 
   // Remove entry
@@ -383,20 +477,45 @@
   }
 }
 
-void ZNMethodTable::gc_prologue() {
+void ZNMethodTable::disarm_nmethod(nmethod* nm) {
+  BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
+  if (bs != NULL) {
+    bs->disarm(nm);
+  }
+}
+
+void ZNMethodTable::nmethod_entries_do_begin() {
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  ZLocker<ZLock> locker(&_iter_lock);
+
+  // Prepare iteration
+  _iter_table = _table;
+  _iter_table_size = _size;
   _claimed = 0;
+  assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
 }
 
-void ZNMethodTable::gc_epilogue() {
-  assert(_claimed >= _size, "Failed to claim all table entries");
+void ZNMethodTable::nmethod_entries_do_end() {
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  ZLocker<ZLock> locker(&_iter_lock);
+
+  // Finish iteration
+  if (_iter_table != _table) {
+    delete [] _iter_table;
+  }
+  _iter_table = NULL;
+  assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
+
+  // Process deferred deletes
+  ZArrayIterator<void*> iter(&_iter_deferred_deletes);
+  for (void* data; iter.next(&data);) {
+    FREE_C_HEAP_ARRAY(uint8_t, data);
+  }
+  _iter_deferred_deletes.clear();
 }
 
 void ZNMethodTable::entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl) {
-  nmethod* const nm = method(entry);
-  if (!nm->is_alive()) {
-    // No need to visit oops
-    return;
-  }
+  nmethod* const nm = entry.method();
 
   // Process oops table
   oop* const begin = nm->oops_begin();
@@ -407,29 +526,52 @@
     }
   }
 
+  // Process immediate oops
   if (entry.immediate_oops()) {
-    // Process immediate oops
-    const ZNMethodWithImmediateOops* const nmi = entry.method_with_immediate_oops();
-    oop** const begin = nmi->immediate_oops_begin();
-    oop** const end = nmi->immediate_oops_end();
-    for (oop** p = begin; p < end; p++) {
-      cl->do_oop(*p);
+    const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
+    if (nmi != NULL) {
+      oop** const begin = nmi->immediate_oops_begin();
+      oop** const end = nmi->immediate_oops_end();
+      for (oop** p = begin; p < end; p++) {
+        if (**p != Universe::non_oop_word()) {
+          cl->do_oop(*p);
+        }
+      }
     }
   }
 
+  // Process non-immediate oops
   if (entry.non_immediate_oops()) {
-    // Process non-immediate oops
+    nmethod* const nm = entry.method();
     nm->fix_oop_relocations();
   }
 }
 
+class ZNMethodTableEntryToOopsDo : public ZNMethodTableEntryClosure {
+private:
+  OopClosure* _cl;
+
+public:
+  ZNMethodTableEntryToOopsDo(OopClosure* cl) :
+      _cl(cl) {}
+
+  void do_nmethod_entry(ZNMethodTableEntry entry) {
+    ZNMethodTable::entry_oops_do(entry, _cl);
+  }
+};
+
 void ZNMethodTable::oops_do(OopClosure* cl) {
+  ZNMethodTableEntryToOopsDo entry_cl(cl);
+  nmethod_entries_do(&entry_cl);
+}
+
+void ZNMethodTable::nmethod_entries_do(ZNMethodTableEntryClosure* cl) {
   for (;;) {
     // Claim table partition. Each partition is currently sized to span
     // two cache lines. This number is just a guess, but seems to work well.
     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
-    const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _size);
-    const size_t partition_end = MIN2(partition_start + partition_size, _size);
+    const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
+    const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
     if (partition_start == partition_end) {
       // End of table
       break;
@@ -437,10 +579,141 @@
 
     // Process table partition
     for (size_t i = partition_start; i < partition_end; i++) {
-      const ZNMethodTableEntry entry = _table[i];
+      const ZNMethodTableEntry entry = _iter_table[i];
       if (entry.registered()) {
-        entry_oops_do(entry, cl);
+        cl->do_nmethod_entry(entry);
       }
     }
   }
 }
+
+class ZNMethodTableUnlinkClosure : public ZNMethodTableEntryClosure {
+private:
+  bool          _unloading_occurred;
+  volatile bool _failed;
+
+  void set_failed() {
+    Atomic::store(true, &_failed);
+  }
+
+public:
+  ZNMethodTableUnlinkClosure(bool unloading_occurred) :
+      _unloading_occurred(unloading_occurred),
+      _failed(false) {}
+
+  virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
+    if (failed()) {
+      return;
+    }
+
+    nmethod* const nm = entry.method();
+    if (!nm->is_alive()) {
+      return;
+    }
+
+    if (nm->is_unloading()) {
+      // Unlinking of the dependencies must happen before the
+      // handshake separating unlink and purge.
+      nm->flush_dependencies(false /* delete_immediately */);
+      return;
+    }
+
+    ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
+
+    // Heal oops and disarm
+    ZNMethodOopClosure cl;
+    ZNMethodTable::entry_oops_do(entry, &cl);
+    ZNMethodTable::disarm_nmethod(nm);
+
+    // Clear compiled ICs and exception caches
+    if (!nm->unload_nmethod_caches(_unloading_occurred)) {
+      set_failed();
+    }
+  }
+
+  bool failed() const {
+    return Atomic::load(&_failed);
+  }
+};
+
+class ZNMethodTableUnlinkTask : public ZTask {
+private:
+  ZNMethodTableUnlinkClosure _cl;
+  ICRefillVerifier*          _verifier;
+
+public:
+  ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
+      ZTask("ZNMethodTableUnlinkTask"),
+      _cl(unloading_occurred),
+      _verifier(verifier) {
+    ZNMethodTable::nmethod_entries_do_begin();
+  }
+
+  ~ZNMethodTableUnlinkTask() {
+    ZNMethodTable::nmethod_entries_do_end();
+  }
+
+  virtual void work() {
+    ICRefillVerifierMark mark(_verifier);
+    ZNMethodTable::nmethod_entries_do(&_cl);
+  }
+
+  bool success() const {
+    return !_cl.failed();
+  }
+};
+
+void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
+  for (;;) {
+    ICRefillVerifier verifier;
+
+    {
+      ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
+      workers->run_concurrent(&task);
+      if (task.success()) {
+        return;
+      }
+    }
+
+    // Cleaning failed because we ran out of transitional IC stubs,
+    // so we have to refill and try again. Refilling requires taking
+    // a safepoint, so we temporarily leave the suspendible thread set.
+    SuspendibleThreadSetLeaver sts;
+    InlineCacheBuffer::refill_ic_stubs();
+  }
+}
+
+class ZNMethodTablePurgeClosure : public ZNMethodTableEntryClosure {
+public:
+  virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
+    nmethod* const nm = entry.method();
+    if (nm->is_alive() && nm->is_unloading()) {
+      nm->make_unloaded();
+    }
+  }
+};
+
+class ZNMethodTablePurgeTask : public ZTask {
+private:
+  ZNMethodTablePurgeClosure _cl;
+
+public:
+  ZNMethodTablePurgeTask() :
+      ZTask("ZNMethodTablePurgeTask"),
+      _cl() {
+    ZNMethodTable::nmethod_entries_do_begin();
+  }
+
+  ~ZNMethodTablePurgeTask() {
+    ZNMethodTable::nmethod_entries_do_end();
+  }
+
+  virtual void work() {
+    ZNMethodTable::nmethod_entries_do(&_cl);
+  }
+};
+
+void ZNMethodTable::purge(ZWorkers* workers) {
+  ZNMethodTablePurgeTask task;
+  workers->run_concurrent(&task);
+}
--- a/src/hotspot/share/gc/z/zNMethodTable.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zNMethodTable.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -24,28 +24,40 @@
 #ifndef SHARE_GC_Z_ZNMETHODTABLE_HPP
 #define SHARE_GC_Z_ZNMETHODTABLE_HPP
 
+#include "gc/z/zArray.hpp"
 #include "gc/z/zGlobals.hpp"
+#include "gc/z/zLock.hpp"
 #include "gc/z/zNMethodTableEntry.hpp"
 #include "memory/allocation.hpp"
 
+class ZWorkers;
+
+class ZNMethodTableEntryClosure {
+public:
+  virtual void do_nmethod_entry(ZNMethodTableEntry entry) = 0;
+};
+
 class ZNMethodTable : public AllStatic {
 private:
   static ZNMethodTableEntry* _table;
   static size_t              _size;
+  static ZLock               _iter_lock;
+  static ZNMethodTableEntry* _iter_table;
+  static size_t              _iter_table_size;
+  static ZArray<void*>       _iter_deferred_deletes;
   static size_t              _nregistered;
   static size_t              _nunregistered;
   static volatile size_t     _claimed ATTRIBUTE_ALIGNED(ZCacheLineSize);
 
   static ZNMethodTableEntry create_entry(nmethod* nm);
-  static void destroy_entry(ZNMethodTableEntry entry);
-
-  static nmethod* method(ZNMethodTableEntry entry);
 
   static size_t first_index(const nmethod* nm, size_t size);
   static size_t next_index(size_t prev_index, size_t size);
 
+  static void sweeper_wait_for_iteration();
+
   static bool register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry);
-  static bool unregister_entry(ZNMethodTableEntry* table, size_t size, const nmethod* nm);
+  static bool unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm);
 
   static void rebuild(size_t new_size);
   static void rebuild_if_needed();
@@ -53,19 +65,28 @@
   static void log_register(const nmethod* nm, ZNMethodTableEntry entry);
   static void log_unregister(const nmethod* nm);
 
-  static void entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl);
+public:
+  static void safe_delete(void* data);
 
-public:
   static size_t registered_nmethods();
   static size_t unregistered_nmethods();
 
   static void register_nmethod(nmethod* nm);
   static void unregister_nmethod(nmethod* nm);
+  static void disarm_nmethod(nmethod* nm);
 
-  static void gc_prologue();
-  static void gc_epilogue();
+  static ZReentrantLock* lock_for_nmethod(nmethod* nm);
 
   static void oops_do(OopClosure* cl);
+
+  static void entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl);
+
+  static void nmethod_entries_do_begin();
+  static void nmethod_entries_do_end();
+  static void nmethod_entries_do(ZNMethodTableEntryClosure* cl);
+
+  static void unlink(ZWorkers* workers, bool unloading_occurred);
+  static void purge(ZWorkers* workers);
 };
 
 #endif // SHARE_GC_Z_ZNMETHODTABLE_HPP
--- a/src/hotspot/share/gc/z/zNMethodTableEntry.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zNMethodTableEntry.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,38 +43,30 @@
 //  |                                                                        |
 //  |                                           0-0 Registered Flag (1-bits) *
 //  |
-//  * 63-3 NMethod/ZNMethodWithImmediateOops Address (61-bits)
+//  * 63-3 NMethod Address (61-bits)
 //
 
 class nmethod;
-class ZNMethodWithImmediateOops;
 
 class ZNMethodTableEntry : public CHeapObj<mtGC> {
 private:
-  typedef ZBitField<uint64_t, bool,                       0,  1>    field_registered;
-  typedef ZBitField<uint64_t, bool,                       1,  1>    field_unregistered;
-  typedef ZBitField<uint64_t, bool,                       1,  1>    field_immediate_oops;
-  typedef ZBitField<uint64_t, bool,                       2,  1>    field_non_immediate_oops;
-  typedef ZBitField<uint64_t, nmethod*,                   3, 61, 3> field_method;
-  typedef ZBitField<uint64_t, ZNMethodWithImmediateOops*, 3, 61, 3> field_method_with_immediate_oops;
+  typedef ZBitField<uint64_t, bool,     0,  1>    field_registered;
+  typedef ZBitField<uint64_t, bool,     1,  1>    field_unregistered;
+  typedef ZBitField<uint64_t, bool,     1,  1>    field_immediate_oops;
+  typedef ZBitField<uint64_t, bool,     2,  1>    field_non_immediate_oops;
+  typedef ZBitField<uint64_t, nmethod*, 3, 61, 3> field_method;
 
   uint64_t _entry;
 
 public:
-  ZNMethodTableEntry(bool unregistered = false) :
+  explicit ZNMethodTableEntry(bool unregistered = false) :
       _entry(field_unregistered::encode(unregistered) |
              field_registered::encode(false)) {}
 
-  ZNMethodTableEntry(nmethod* method, bool non_immediate_oops) :
+  ZNMethodTableEntry(nmethod* method, bool non_immediate_oops, bool immediate_oops) :
       _entry(field_method::encode(method) |
              field_non_immediate_oops::encode(non_immediate_oops) |
-             field_immediate_oops::encode(false) |
-             field_registered::encode(true)) {}
-
-  ZNMethodTableEntry(ZNMethodWithImmediateOops* method_with_immediate_oops, bool non_immediate_oops) :
-      _entry(field_method_with_immediate_oops::encode(method_with_immediate_oops) |
-             field_non_immediate_oops::encode(non_immediate_oops) |
-             field_immediate_oops::encode(true) |
+             field_immediate_oops::encode(immediate_oops) |
              field_registered::encode(true)) {}
 
   bool registered() const {
@@ -96,10 +88,6 @@
   nmethod* method() const {
     return field_method::decode(_entry);
   }
-
-  ZNMethodWithImmediateOops* method_with_immediate_oops() const {
-    return field_method_with_immediate_oops::decode(_entry);
-  }
 };
 
 #endif // SHARE_GC_Z_ZNMETHODTABLEENTRY_HPP
--- a/src/hotspot/share/gc/z/zOopClosures.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zOopClosures.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -39,14 +39,23 @@
 #endif
 };
 
+class ZNMethodOopClosure : public OopClosure {
+public:
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
 template <bool finalizable>
-class ZMarkBarrierOopClosure : public BasicOopIterateClosure {
+class ZMarkBarrierOopClosure : public MetadataVisitingOopIterateClosure {
 public:
   ZMarkBarrierOopClosure();
 
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
 
+  virtual void do_klass(Klass* k);
+  virtual void do_cld(ClassLoaderData* cld);
+
 #ifdef ASSERT
   virtual bool should_verify_oops() {
     return false;
--- a/src/hotspot/share/gc/z/zOopClosures.inline.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zOopClosures.inline.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -24,6 +24,7 @@
 #ifndef SHARE_GC_Z_ZOOPCLOSURES_INLINE_HPP
 #define SHARE_GC_Z_ZOOPCLOSURES_INLINE_HPP
 
+#include "classfile/classLoaderData.hpp"
 #include "gc/z/zBarrier.inline.hpp"
 #include "gc/z/zHeap.inline.hpp"
 #include "gc/z/zOop.inline.hpp"
@@ -40,9 +41,21 @@
   ShouldNotReachHere();
 }
 
+inline void ZNMethodOopClosure::do_oop(oop* p) {
+  if (ZResurrection::is_blocked()) {
+    ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(p);
+  } else {
+    ZBarrier::load_barrier_on_root_oop_field(p);
+  }
+}
+
+inline void ZNMethodOopClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
 template <bool finalizable>
 inline ZMarkBarrierOopClosure<finalizable>::ZMarkBarrierOopClosure() :
-    BasicOopIterateClosure(finalizable ? NULL : ZHeap::heap()->reference_discoverer()) {}
+    MetadataVisitingOopIterateClosure(finalizable ? NULL : ZHeap::heap()->reference_discoverer()) {}
 
 template <bool finalizable>
 inline void ZMarkBarrierOopClosure<finalizable>::do_oop(oop* p) {
@@ -54,6 +67,18 @@
   ShouldNotReachHere();
 }
 
+template <bool finalizable>
+inline void ZMarkBarrierOopClosure<finalizable>::do_klass(Klass* k) {
+  ClassLoaderData* const cld = k->class_loader_data();
+  ZMarkBarrierOopClosure<finalizable>::do_cld(cld);
+}
+
+template <bool finalizable>
+inline void ZMarkBarrierOopClosure<finalizable>::do_cld(ClassLoaderData* cld) {
+  const int claim = finalizable ? ClassLoaderData::_claim_finalizable : ClassLoaderData::_claim_strong;
+  cld->oops_do(this, claim);
+}
+
 inline bool ZPhantomIsAliveObjectClosure::do_object_b(oop o) {
   return ZBarrier::is_alive_barrier_on_phantom_oop(o);
 }
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -27,8 +27,11 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/oopMap.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetNMethod.hpp"
 #include "gc/shared/oopStorageParState.inline.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/z/zBarrierSetNMethod.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zNMethodTable.hpp"
 #include "gc/z/zOopClosures.inline.hpp"
@@ -132,6 +135,30 @@
   }
 }
 
+class ZCodeBlobClosure : public CodeBlobToOopClosure {
+private:
+  BarrierSetNMethod* _bs;
+
+public:
+  ZCodeBlobClosure(OopClosure* cl) :
+    CodeBlobToOopClosure(cl, true /* fix_relocations */),
+    _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
+
+  virtual void do_code_blob(CodeBlob* cb) {
+    nmethod* const nm = cb->as_nmethod_or_null();
+    if (nm == NULL || nm->test_set_oops_do_mark()) {
+      return;
+    }
+    CodeBlobToOopClosure::do_code_blob(cb);
+    _bs->disarm(nm);
+  }
+};
+
+void ZRootsIteratorClosure::do_thread(Thread* thread) {
+  ZCodeBlobClosure code_cl(this);
+  thread->oops_do(this, ClassUnloading ? &code_cl : NULL);
+}
+
 ZRootsIterator::ZRootsIterator() :
     _universe(this),
     _object_synchronizer(this),
@@ -145,16 +172,23 @@
   ZStatTimer timer(ZSubPhasePauseRootsSetup);
   Threads::change_thread_claim_parity();
   COMPILER2_PRESENT(DerivedPointerTable::clear());
-  CodeCache::gc_prologue();
-  ZNMethodTable::gc_prologue();
+  if (ClassUnloading) {
+    nmethod::oops_do_marking_prologue();
+  } else {
+    ZNMethodTable::nmethod_entries_do_begin();
+  }
 }
 
 ZRootsIterator::~ZRootsIterator() {
   ZStatTimer timer(ZSubPhasePauseRootsTeardown);
   ResourceMark rm;
-  ZNMethodTable::gc_epilogue();
-  CodeCache::gc_epilogue();
+  if (ClassUnloading) {
+    nmethod::oops_do_marking_epilogue();
+  } else {
+    ZNMethodTable::nmethod_entries_do_end();
+  }
   JvmtiExport::gc_epilogue();
+
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   Threads::assert_all_threads_claimed();
 }
@@ -209,7 +243,9 @@
   _jvmti_export.oops_do(cl);
   _system_dictionary.oops_do(cl);
   _threads.oops_do(cl);
-  _code_cache.oops_do(cl);
+  if (!ClassUnloading) {
+    _code_cache.oops_do(cl);
+  }
   if (visit_jvmti_weak_export) {
     _jvmti_weak_export.oops_do(cl);
   }
@@ -242,8 +278,13 @@
 
 void ZConcurrentRootsIterator::do_class_loader_data_graph(ZRootsIteratorClosure* cl) {
   ZStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph);
-  CLDToOopClosure cld_cl(cl, _marking ? ClassLoaderData::_claim_strong : ClassLoaderData::_claim_none);
-  ClassLoaderDataGraph::cld_do(&cld_cl);
+  if (_marking) {
+    CLDToOopClosure cld_cl(cl, ClassLoaderData::_claim_strong);
+    ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
+  } else {
+    CLDToOopClosure cld_cl(cl, ClassLoaderData::_claim_none);
+    ClassLoaderDataGraph::cld_do(&cld_cl);
+  }
 }
 
 void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
--- a/src/hotspot/share/gc/z/zRootsIterator.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zRootsIterator.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -33,9 +33,7 @@
 
 class ZRootsIteratorClosure : public OopClosure, public ThreadClosure {
 public:
-  virtual void do_thread(Thread* thread) {
-    thread->oops_do(this, NULL);
-  }
+  virtual void do_thread(Thread* thread);
 };
 
 typedef OopStorage::ParState<true /* concurrent */, false /* is_const */> ZOopStorageIterator;
--- a/src/hotspot/share/gc/z/zThreadLocalData.hpp	Tue Dec 11 11:29:28 2018 +0100
+++ b/src/hotspot/share/gc/z/zThreadLocalData.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -25,6 +25,7 @@
 #define SHARE_GC_Z_ZTHREADLOCALDATA_HPP
 
 #include "gc/z/zMarkStack.hpp"
+#include "gc/z/zGlobals.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/sizes.hpp"
@@ -62,6 +63,10 @@
   static ByteSize address_bad_mask_offset() {
     return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _address_bad_mask);
   }
+
+  static ByteSize nmethod_disarmed_offset() {
+    return address_bad_mask_offset() + in_ByteSize(ZNMethodDisarmedOffset);
+  }
 };
 
 #endif // SHARE_GC_Z_ZTHREADLOCALDATA_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zUnload.cpp	Tue Dec 11 11:08:39 2018 +0100
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderDataGraph.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeBehaviours.hpp"
+#include "code/codeCache.hpp"
+#include "code/dependencyContext.hpp"
+#include "gc/shared/gcBehaviours.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zOopClosures.hpp"
+#include "gc/z/zStat.hpp"
+#include "gc/z/zUnload.hpp"
+#include "oops/access.inline.hpp"
+
+static const ZStatSubPhase ZSubPhaseConcurrentClassesUnload("Concurrent Classes Unload");
+
+class ZIsUnloadingOopClosure : public OopClosure {
+private:
+  ZPhantomIsAliveObjectClosure _is_alive;
+  bool                         _is_unloading;
+
+public:
+  ZIsUnloadingOopClosure() :
+      _is_alive(),
+      _is_unloading(false) {}
+
+  virtual void do_oop(oop* p) {
+    const oop o = RawAccess<>::oop_load(p);
+    if (o != NULL && !_is_alive.do_object_b(o)) {
+      _is_unloading = true;
+    }
+  }
+
+  virtual void do_oop(narrowOop* p) {
+    ShouldNotReachHere();
+  }
+
+  bool is_unloading() const {
+    return _is_unloading;
+  }
+};
+
+class ZIsUnloadingBehaviour : public IsUnloadingBehaviour {
+private:
+  bool is_unloading(nmethod* nm) const {
+    ZIsUnloadingOopClosure cl;
+    nm->oops_do(&cl, true /* allow_zombie */);
+    return cl.is_unloading();
+  }
+
+public:
+  virtual bool is_unloading(CompiledMethod* method) const {
+    nmethod* const nm = method->as_nmethod();
+    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    if (lock == NULL) {
+      return is_unloading(nm);
+    } else {
+      ZLocker<ZReentrantLock> locker(lock);
+      return is_unloading(nm);
+    }
+  }
+};
+
+class ZCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
+public:
+  virtual bool lock(CompiledMethod* method) {
+    nmethod* const nm = method->as_nmethod();
+    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    if (lock != NULL) {
+      lock->lock();
+    }
+    return true;
+  }
+
+  virtual void unlock(CompiledMethod* method) {
+    nmethod* const nm = method->as_nmethod();
+    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    if (lock != NULL) {
+      lock->unlock();
+    }
+  }
+
+  virtual bool is_safe(CompiledMethod* method) {
+    if (SafepointSynchronize::is_at_safepoint()) {
+      return true;
+    }
+
+    nmethod* const nm = method->as_nmethod();
+    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    return lock == NULL || lock->is_owned();
+  }
+};
+
+ZUnload::ZUnload(ZWorkers* workers) :
+    _workers(workers) {
+
+  if (!ClassUnloading) {
+    return;
+  }
+
+  static ZIsUnloadingBehaviour is_unloading_behaviour;
+  IsUnloadingBehaviour::set_current(&is_unloading_behaviour);
+
+  static ZCompiledICProtectionBehaviour ic_protection_behaviour;
+  CompiledICProtectionBehaviour::set_current(&ic_protection_behaviour);
+}
+
+void ZUnload::prepare() {
+  if (!ClassUnloading) {
+    return;
+  }
+
+  CodeCache::increment_unloading_cycle();
+  DependencyContext::cleaning_start();
+}
+
+void ZUnload::unlink() {
+  SuspendibleThreadSetJoiner sts;
+  bool unloading_occurred;
+
+  {
+    MutexLockerEx ml(ClassLoaderDataGraph_lock);
+    unloading_occurred = SystemDictionary::do_unloading(ZStatPhase::timer());
+  }
+
+  Klass::clean_weak_klass_links(unloading_occurred);
+
+  ZNMethodTable::unlink(_workers, unloading_occurred);
+
+  DependencyContext::cleaning_end();
+}
+
+void ZUnload::purge() {
+  {
+    SuspendibleThreadSetJoiner sts;
+    ZNMethodTable::purge(_workers);
+  }
+
+  ClassLoaderDataGraph::purge();
+  CodeCache::purge_exception_caches();
+}
+
+class ZUnloadRendezvousClosure : public ThreadClosure {
+public:
+  void do_thread(Thread* thread) {}
+};
+
+void ZUnload::unload() {
+  if (!ClassUnloading) {
+    return;
+  }
+
+  ZStatTimer timer(ZSubPhaseConcurrentClassesUnload);
+
+  // Unlink stale metadata and nmethods
+  unlink();
+
+  // Make sure stale metadata and nmethods are no longer observable
+  ZUnloadRendezvousClosure cl;
+  Handshake::execute(&cl);
+
+  // Purge stale metadata and nmethods that were unlinked
+  purge();
+}
+
+void ZUnload::finish() {
+  // Resize and verify metaspace
+  MetaspaceGC::compute_new_size();
+  MetaspaceUtils::verify_metrics();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zUnload.hpp	Tue Dec 11 11:08:39 2018 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZUNLOAD_HPP
+#define SHARE_GC_Z_ZUNLOAD_HPP
+
+class ZWorkers;
+
+class ZUnload {
+private:
+  ZWorkers* const _workers;
+
+  void unlink();
+  void purge();
+
+public:
+  ZUnload(ZWorkers* workers);
+
+  void prepare();
+  void unload();
+  void finish();
+};
+
+#endif // SHARE_GC_Z_ZUNLOAD_HPP