8156500: Move Reference pending list into VM to prevent deadlocks
authorkbarrett
Tue, 30 Aug 2016 23:48:16 -0400
changeset 40892 330a02d935ad
parent 40891 8010999ff6d0
child 40893 12787d18650e
8156500: Move Reference pending list into VM to prevent deadlocks Summary: Move reference pending list and locking into VM Reviewed-by: coleenp, dholmes, dcubed, mchung, plevart Contributed-by: kim.barrett@oracle.com, per.liden@oracle.com
hotspot/make/symbols/symbols-unix
hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java
hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/sa.js
hotspot/src/share/vm/ci/ciReplay.cpp
hotspot/src/share/vm/classfile/javaClasses.cpp
hotspot/src/share/vm/classfile/javaClasses.hpp
hotspot/src/share/vm/compiler/compileBroker.cpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp
hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp
hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp
hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp
hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp
hotspot/src/share/vm/gc/shared/collectedHeap.hpp
hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp
hotspot/src/share/vm/gc/shared/referencePendingListLocker.cpp
hotspot/src/share/vm/gc/shared/referencePendingListLocker.hpp
hotspot/src/share/vm/gc/shared/referenceProcessor.cpp
hotspot/src/share/vm/gc/shared/referenceProcessor.hpp
hotspot/src/share/vm/gc/shared/vmGCOperations.cpp
hotspot/src/share/vm/gc/shared/vmGCOperations.hpp
hotspot/src/share/vm/memory/universe.cpp
hotspot/src/share/vm/memory/universe.hpp
hotspot/src/share/vm/oops/method.cpp
hotspot/src/share/vm/prims/jvm.cpp
hotspot/src/share/vm/prims/jvm.h
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/make/symbols/symbols-unix	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/make/symbols/symbols-unix	Tue Aug 30 23:48:16 2016 -0400
@@ -67,6 +67,7 @@
 JVM_FreeMemory
 JVM_GC
 JVM_GetAllThreads
+JVM_GetAndClearReferencePendingList
 JVM_GetArrayElement
 JVM_GetArrayLength
 JVM_GetCallerClass
@@ -130,6 +131,7 @@
 JVM_GetTemporaryDirectory
 JVM_GetVmArguments
 JVM_Halt
+JVM_HasReferencePendingList
 JVM_HoldsLock
 JVM_IHashCode
 JVM_InitProperties
@@ -179,6 +181,7 @@
 JVM_ToStackTraceElement
 JVM_TotalMemory
 JVM_UnloadLibrary
+JVM_WaitForReferencePendingList
 JVM_Yield
 
 # Module related API's
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java	Tue Aug 30 23:48:16 2016 -0400
@@ -129,8 +129,6 @@
             virtualConstructor.addMapping("CompilerThread", CompilerThread.class);
             virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class);
         }
-        // for now, use JavaThread itself. fix it later with appropriate class if needed
-        virtualConstructor.addMapping("ReferencePendingListLockerThread", JavaThread.class);
         virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
         virtualConstructor.addMapping("ServiceThread", ServiceThread.class);
     }
@@ -172,7 +170,7 @@
             return thread;
         } catch (Exception e) {
             throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
-            " (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, ReferencePendingListLockerThread, or CodeCacheSweeperThread)", e);
+            " (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread or CodeCacheSweeperThread)", e);
         }
     }
 
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Tue Aug 30 23:48:16 2016 -0400
@@ -837,7 +837,6 @@
 vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
 vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
 vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread;
-vmType2Class["ReferencePendingListLockerThread"] = sapkg.runtime.JavaThread;
 vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;
 
 // gc
--- a/hotspot/src/share/vm/ci/ciReplay.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/ci/ciReplay.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -29,7 +29,6 @@
 #include "ci/ciKlass.hpp"
 #include "ci/ciUtilities.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
@@ -577,9 +576,7 @@
     Method* method = parse_method(CHECK);
     if (had_error()) return;
     /* just copied from Method, to build interpret data*/
-    if (ReferencePendingListLocker::is_locked_by_self()) {
-      return;
-    }
+
     // To be properly initialized, some profiling in the MDO needs the
     // method to be rewritten (number of arguments at a call for
     // instance)
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -3015,41 +3015,6 @@
   }
 }
 
-
-// Support for java_lang_ref_Reference
-HeapWord *java_lang_ref_Reference::pending_list_lock_addr() {
-  InstanceKlass* ik = SystemDictionary::Reference_klass();
-  address addr = ik->static_field_addr(static_lock_offset);
-  return (HeapWord*) addr;
-}
-
-oop java_lang_ref_Reference::pending_list_lock() {
-  InstanceKlass* ik = SystemDictionary::Reference_klass();
-  address addr = ik->static_field_addr(static_lock_offset);
-  if (UseCompressedOops) {
-    return oopDesc::load_decode_heap_oop((narrowOop *)addr);
-  } else {
-    return oopDesc::load_decode_heap_oop((oop*)addr);
-  }
-}
-
-HeapWord *java_lang_ref_Reference::pending_list_addr() {
-  InstanceKlass* ik = SystemDictionary::Reference_klass();
-  address addr = ik->static_field_addr(static_pending_offset);
-  // XXX This might not be HeapWord aligned, almost rather be char *.
-  return (HeapWord*)addr;
-}
-
-oop java_lang_ref_Reference::pending_list() {
-  char *addr = (char *)pending_list_addr();
-  if (UseCompressedOops) {
-    return oopDesc::load_decode_heap_oop((narrowOop *)addr);
-  } else {
-    return oopDesc::load_decode_heap_oop((oop*)addr);
-  }
-}
-
-
 // Support for java_lang_ref_SoftReference
 
 jlong java_lang_ref_SoftReference::timestamp(oop ref) {
@@ -3616,8 +3581,6 @@
 int java_lang_ref_Reference::queue_offset;
 int java_lang_ref_Reference::next_offset;
 int java_lang_ref_Reference::discovered_offset;
-int java_lang_ref_Reference::static_lock_offset;
-int java_lang_ref_Reference::static_pending_offset;
 int java_lang_ref_Reference::number_of_fake_oop_fields;
 int java_lang_ref_SoftReference::timestamp_offset;
 int java_lang_ref_SoftReference::static_clock_offset;
@@ -3772,8 +3735,6 @@
   java_lang_ref_Reference::queue_offset = java_lang_ref_Reference::hc_queue_offset * x + header;
   java_lang_ref_Reference::next_offset  = java_lang_ref_Reference::hc_next_offset * x + header;
   java_lang_ref_Reference::discovered_offset  = java_lang_ref_Reference::hc_discovered_offset * x + header;
-  java_lang_ref_Reference::static_lock_offset = java_lang_ref_Reference::hc_static_lock_offset *  x;
-  java_lang_ref_Reference::static_pending_offset = java_lang_ref_Reference::hc_static_pending_offset * x;
   // Artificial fields for java_lang_ref_Reference
   // The first field is for the discovered field added in 1.4
   java_lang_ref_Reference::number_of_fake_oop_fields = 1;
@@ -4006,8 +3967,6 @@
   CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, next, "Ljava/lang/ref/Reference;");
   // Fake field
   //CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, discovered, "Ljava/lang/ref/Reference;");
-  CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, lock, "Ljava/lang/ref/Reference$Lock;");
-  CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, pending, "Ljava/lang/ref/Reference;");
 
   // java.lang.ref.SoftReference
 
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -886,17 +886,11 @@
    hc_next_offset       = 2,
    hc_discovered_offset = 3  // Is not last, see SoftRefs.
   };
-  enum {
-   hc_static_lock_offset    = 0,
-   hc_static_pending_offset = 1
-  };
 
   static int referent_offset;
   static int queue_offset;
   static int next_offset;
   static int discovered_offset;
-  static int static_lock_offset;
-  static int static_pending_offset;
   static int number_of_fake_oop_fields;
 
   // Accessors
@@ -912,13 +906,6 @@
   static inline void set_discovered(oop ref, oop value);
   static inline void set_discovered_raw(oop ref, oop value);
   static inline HeapWord* discovered_addr(oop ref);
-
-  // Accessors for statics
-  static oop  pending_list_lock();
-  static oop  pending_list();
-
-  static HeapWord*  pending_list_lock_addr();
-  static HeapWord*  pending_list_addr();
 };
 
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -32,7 +32,6 @@
 #include "compiler/compileLog.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "compiler/directivesParser.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "interpreter/linkResolver.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
@@ -893,15 +892,6 @@
     return;
   }
 
-  // If the requesting thread is holding the pending list lock
-  // then we just return. We can't risk blocking while holding
-  // the pending list lock or a 3-way deadlock may occur
-  // between the reference handler thread, a GC (instigated
-  // by a compiler thread), and compiled method registration.
-  if (ReferencePendingListLocker::is_locked_by_self()) {
-    return;
-  }
-
   if (TieredCompilation) {
     // Tiered policy requires MethodCounters to exist before adding a method to
     // the queue. Create if we don't have them yet.
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -28,7 +28,6 @@
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -77,23 +76,6 @@
     log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
   }
 
-  {
-    MutexLockerEx x(CGC_lock, true);
-    set_CMS_flag(CMS_cms_wants_token);
-    assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
-
-    // Wait until the surrogate locker thread that will do
-    // pending list locking on our behalf has been created.
-    // We cannot start the SLT thread ourselves since we need
-    // to be a JavaThread to do so.
-    CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
-    while (!ReferencePendingListLocker::is_initialized() && !should_terminate()) {
-      CGC_lock->wait(true, 200);
-      loopY.tick();
-    }
-    clear_CMS_flag(CMS_cms_wants_token);
-  }
-
   while (!should_terminate()) {
     sleepBeforeNextCycle();
     if (should_terminate()) break;
--- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -37,14 +37,6 @@
 //////////////////////////////////////////////////////////
 // Methods in abstract class VM_CMS_Operation
 //////////////////////////////////////////////////////////
-void VM_CMS_Operation::acquire_pending_list_lock() {
-  _pending_list_locker.lock();
-}
-
-void VM_CMS_Operation::release_and_notify_pending_list_lock() {
-  _pending_list_locker.unlock();
-}
-
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
@@ -85,17 +77,10 @@
   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "Possible deadlock");
 
-  if (needs_pending_list_lock()) {
-    acquire_pending_list_lock();
-  }
-  // Get the Heap_lock after the pending_list_lock.
   Heap_lock->lock();
   if (lost_race()) {
     assert(_prologue_succeeded == false, "Initialized in c'tor");
     Heap_lock->unlock();
-    if (needs_pending_list_lock()) {
-      release_and_notify_pending_list_lock();
-    }
   } else {
     _prologue_succeeded = true;
   }
@@ -108,11 +93,10 @@
   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "Possible deadlock");
 
-  // Release the Heap_lock first.
+  if (Universe::has_reference_pending_list()) {
+    Heap_lock->notify_all();
+  }
   Heap_lock->unlock();
-  if (needs_pending_list_lock()) {
-    release_and_notify_pending_list_lock();
-  }
 }
 
 //////////////////////////////////////////////////////////
@@ -230,9 +214,11 @@
   Thread* thr = Thread::current();
   assert(thr->is_Java_thread(), "just checking");
   JavaThread* jt = (JavaThread*)thr;
-  // Release the Heap_lock first.
+
+  if (Universe::has_reference_pending_list()) {
+    Heap_lock->notify_all();
+  }
   Heap_lock->unlock();
-  release_and_notify_pending_list_lock();
 
   // It is fine to test whether completed collections has
   // exceeded our request count without locking because
--- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -28,7 +28,6 @@
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "runtime/vm_operations.hpp"
 
@@ -52,9 +51,6 @@
 class CMSCollector;
 
 class VM_CMS_Operation: public VM_Operation {
- private:
-  ReferencePendingListLocker _pending_list_locker;
-
  protected:
   CMSCollector*  _collector;                 // associated collector
   bool           _prologue_succeeded;     // whether doit_prologue succeeded
@@ -62,10 +58,6 @@
 
   bool lost_race() const;
 
-  // java.lang.ref.Reference support
-  void acquire_pending_list_lock();
-  void release_and_notify_pending_list_lock();
-
  public:
   VM_CMS_Operation(CMSCollector* collector):
     _collector(collector),
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -175,7 +175,7 @@
                                 TimeHelper::counter_to_millis(mark_end - mark_start));
 
           CMCheckpointRootsFinalClosure final_cl(_cm);
-          VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
+          VM_CGC_Operation op(&final_cl, "Pause Remark");
           VMThread::execute(&op);
         }
         if (cm()->restart_for_overflow()) {
@@ -199,7 +199,7 @@
         delay_to_keep_mmu(g1_policy, false /* cleanup */);
 
         CMCleanUp cl_cl(_cm);
-        VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */);
+        VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
         VMThread::execute(&op);
       } else {
         // We don't want to update the marking status if a GC pause
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -1273,12 +1273,6 @@
     return true;
   }
 
-  // The reference pending list lock is acquired from from the
-  // ConcurrentMarkThread.
-  virtual bool needs_reference_pending_list_locker_thread() const {
-    return true;
-  }
-
   inline bool is_in_young(const oop obj);
 
   virtual bool is_scavengable(const void* addr);
--- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -204,14 +204,6 @@
   }
 }
 
-void VM_CGC_Operation::acquire_pending_list_lock() {
-  _pending_list_locker.lock();
-}
-
-void VM_CGC_Operation::release_and_notify_pending_list_lock() {
-  _pending_list_locker.unlock();
-}
-
 void VM_CGC_Operation::doit() {
   GCIdMark gc_id_mark(_gc_id);
   GCTraceCPUTime tcpu;
@@ -222,20 +214,13 @@
 }
 
 bool VM_CGC_Operation::doit_prologue() {
-  // Note the relative order of the locks must match that in
-  // VM_GC_Operation::doit_prologue() or deadlocks can occur
-  if (_needs_pending_list_lock) {
-    acquire_pending_list_lock();
-  }
   Heap_lock->lock();
   return true;
 }
 
 void VM_CGC_Operation::doit_epilogue() {
-  // Note the relative order of the unlocks must match that in
-  // VM_GC_Operation::doit_epilogue()
+  if (Universe::has_reference_pending_list()) {
+    Heap_lock->notify_all();
+  }
   Heap_lock->unlock();
-  if (_needs_pending_list_lock) {
-    release_and_notify_pending_list_lock();
-  }
 }
--- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -27,7 +27,6 @@
 
 #include "gc/g1/g1AllocationContext.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 
 // VM_operations for the G1 collector.
@@ -103,20 +102,13 @@
 // Concurrent GC stop-the-world operations such as remark and cleanup;
 // consider sharing these with CMS's counterparts.
 class VM_CGC_Operation: public VM_Operation {
-  VoidClosure*               _cl;
-  const char*                _printGCMessage;
-  bool                       _needs_pending_list_lock;
-  ReferencePendingListLocker _pending_list_locker;
-  uint                       _gc_id;
-
-protected:
-  // java.lang.ref.Reference support
-  void acquire_pending_list_lock();
-  void release_and_notify_pending_list_lock();
+  VoidClosure* _cl;
+  const char*  _printGCMessage;
+  uint         _gc_id;
 
 public:
-  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pending_list_lock)
-    : _cl(cl), _printGCMessage(printGCMsg), _needs_pending_list_lock(needs_pending_list_lock), _gc_id(GCId::current()) {}
+  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
+    : _cl(cl), _printGCMessage(printGCMsg), _gc_id(GCId::current()) {}
   virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
   virtual void doit();
   virtual bool doit_prologue();
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -441,12 +441,6 @@
   // remembered set.
   virtual void flush_deferred_store_barrier(JavaThread* thread);
 
-  // Should return true if the reference pending list lock is
-  // acquired from non-Java threads, such as a concurrent GC thread.
-  virtual bool needs_reference_pending_list_locker_thread() const {
-    return false;
-  }
-
   // Perform a collection of the heap; intended for use in implementing
   // "System.gc".  This probably implies as full a collection as the
   // "CollectedHeap" supports.
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -281,10 +281,6 @@
     return UseConcMarkSweepGC;
   }
 
-  virtual bool needs_reference_pending_list_locker_thread() const {
-    return UseConcMarkSweepGC;
-  }
-
   // We don't need barriers for stores to objects in the
   // young gen and, a fortiori, for initializing stores to
   // objects therein. This applies to DefNew+Tenured and ParNew+CMS
--- a/hotspot/src/share/vm/gc/shared/referencePendingListLocker.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,222 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/javaClasses.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
-#include "memory/universe.hpp"
-#include "runtime/javaCalls.hpp"
-#include "utilities/preserveException.hpp"
-
-ReferencePendingListLockerThread::ReferencePendingListLockerThread() :
-  JavaThread(&start),
-  _monitor(Monitor::nonleaf, "ReferencePendingListLocker", false, Monitor::_safepoint_check_sometimes),
-  _message(NONE) {}
-
-ReferencePendingListLockerThread* ReferencePendingListLockerThread::create(TRAPS) {
-  // Create Java thread objects
-  instanceKlassHandle thread_klass = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_NULL);
-  instanceHandle thread_object = thread_klass->allocate_instance_handle(CHECK_NULL);
-  Handle thread_name = java_lang_String::create_from_str("Reference Pending List Locker", CHECK_NULL);
-  Handle thread_group = Universe::system_thread_group();
-  JavaValue result(T_VOID);
-  JavaCalls::call_special(&result,
-                          thread_object,
-                          thread_klass,
-                          vmSymbols::object_initializer_name(),
-                          vmSymbols::threadgroup_string_void_signature(),
-                          thread_group,
-                          thread_name,
-                          CHECK_NULL);
-
-  {
-    MutexLocker ml(Threads_lock);
-
-    // Allocate thread
-    ReferencePendingListLockerThread* thread = new ReferencePendingListLockerThread();
-    if (thread == NULL || thread->osthread() == NULL) {
-      vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    os::native_thread_creation_failed_msg());
-    }
-
-    // Initialize thread
-    java_lang_Thread::set_thread(thread_object(), thread);
-    java_lang_Thread::set_priority(thread_object(), NearMaxPriority);
-    java_lang_Thread::set_daemon(thread_object());
-    thread->set_threadObj(thread_object());
-
-    // Start thread
-    Threads::add(thread);
-    Thread::start(thread);
-
-    return thread;
-  }
-}
-
-void ReferencePendingListLockerThread::start(JavaThread* thread, TRAPS) {
-  ReferencePendingListLockerThread* locker_thread = static_cast<ReferencePendingListLockerThread*>(thread);
-  locker_thread->receive_and_handle_messages();
-}
-
-bool ReferencePendingListLockerThread::is_hidden_from_external_view() const {
-  return true;
-}
-
-void ReferencePendingListLockerThread::send_message(Message message) {
-  assert(message != NONE, "Should not be none");
-  MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
-
-  // Wait for completion of current message
-  while (_message != NONE) {
-    ml.wait(Monitor::_no_safepoint_check_flag);
-  }
-
-  // Send new message
-  _message = message;
-  ml.notify_all();
-
-  // Wait for completion of new message
-  while (_message != NONE) {
-    ml.wait(Monitor::_no_safepoint_check_flag);
-  }
-}
-
-void ReferencePendingListLockerThread::receive_and_handle_messages() {
-  ReferencePendingListLocker pending_list_locker;
-  MonitorLockerEx ml(&_monitor);
-
-  // Main loop, never terminates
-  for (;;) {
-    // Wait for message
-    while (_message == NONE) {
-      ml.wait();
-    }
-
-    // Handle message
-    if (_message == LOCK) {
-      pending_list_locker.lock();
-    } else if (_message == UNLOCK) {
-      pending_list_locker.unlock();
-    } else {
-      ShouldNotReachHere();
-    }
-
-    // Clear message
-    _message = NONE;
-    ml.notify_all();
-  }
-}
-
-void ReferencePendingListLockerThread::lock() {
-  send_message(LOCK);
-}
-
-void ReferencePendingListLockerThread::unlock() {
-  send_message(UNLOCK);
-}
-
-bool ReferencePendingListLocker::_is_initialized = false;
-ReferencePendingListLockerThread* ReferencePendingListLocker::_locker_thread = NULL;
-
-void ReferencePendingListLocker::initialize(bool needs_locker_thread, TRAPS) {
-  if (needs_locker_thread) {
-    _locker_thread = ReferencePendingListLockerThread::create(CHECK);
-  }
-
-  _is_initialized = true;
-}
-
-bool ReferencePendingListLocker::is_initialized() {
-  return _is_initialized;
-}
-
-bool ReferencePendingListLocker::is_locked_by_self() {
-  oop pending_list_lock = java_lang_ref_Reference::pending_list_lock();
-  if (pending_list_lock == NULL) {
-    return false;
-  }
-
-  JavaThread* thread = JavaThread::current();
-  Handle handle(thread, pending_list_lock);
-  return ObjectSynchronizer::current_thread_holds_lock(thread, handle);
-}
-
-void ReferencePendingListLocker::lock() {
-  assert(!Heap_lock->owned_by_self(), "Heap_lock must not be owned by requesting thread");
-
-  if (Thread::current()->is_Java_thread()) {
-    assert(java_lang_ref_Reference::pending_list_lock() != NULL, "Not initialized");
-
-    // We may enter this with a pending exception
-    PRESERVE_EXCEPTION_MARK;
-
-    HandleMark hm;
-    Handle handle(THREAD, java_lang_ref_Reference::pending_list_lock());
-
-    // Lock
-    ObjectSynchronizer::fast_enter(handle, &_basic_lock, false, THREAD);
-
-    assert(is_locked_by_self(), "Locking failed");
-
-    if (HAS_PENDING_EXCEPTION) {
-      CLEAR_PENDING_EXCEPTION;
-    }
-  } else {
-    // Delegate operation to locker thread
-    assert(_locker_thread != NULL, "Locker thread not created");
-    _locker_thread->lock();
-  }
-}
-
-void ReferencePendingListLocker::unlock() {
-  if (Thread::current()->is_Java_thread()) {
-    assert(java_lang_ref_Reference::pending_list_lock() != NULL, "Not initialized");
-
-    // We may enter this with a pending exception
-    PRESERVE_EXCEPTION_MARK;
-
-    HandleMark hm;
-    Handle handle(THREAD, java_lang_ref_Reference::pending_list_lock());
-
-    assert(is_locked_by_self(), "Should be locked by self");
-
-    // Notify waiters if the pending list is non-empty
-    if (java_lang_ref_Reference::pending_list() != NULL) {
-      ObjectSynchronizer::notifyall(handle, THREAD);
-    }
-
-    // Unlock
-    ObjectSynchronizer::fast_exit(handle(), &_basic_lock, THREAD);
-
-    if (HAS_PENDING_EXCEPTION) {
-      CLEAR_PENDING_EXCEPTION;
-    }
-  } else {
-    // Delegate operation to locker thread
-    assert(_locker_thread != NULL, "Locker thread not created");
-    _locker_thread->unlock();
-  }
-}
--- a/hotspot/src/share/vm/gc/shared/referencePendingListLocker.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP
-#define SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/basicLock.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/thread.hpp"
-#include "utilities/exceptions.hpp"
-
-//
-// The ReferencePendingListLockerThread locks and unlocks the reference
-// pending list lock on behalf a non-Java thread, typically a concurrent
-// GC thread. This interface should not be directly accessed. All uses
-// should instead go through the ReferencePendingListLocker, which calls
-// this thread if needed.
-//
-class ReferencePendingListLockerThread : public JavaThread {
-private:
-  enum Message {
-    NONE,
-    LOCK,
-    UNLOCK
-  };
-
-  Monitor _monitor;
-  Message _message;
-
-  ReferencePendingListLockerThread();
-
-  static void start(JavaThread* thread, TRAPS);
-
-  void send_message(Message message);
-  void receive_and_handle_messages();
-
-public:
-  static ReferencePendingListLockerThread* create(TRAPS);
-
-  virtual bool is_hidden_from_external_view() const;
-
-  void lock();
-  void unlock();
-};
-
-//
-// The ReferencePendingListLocker is the main interface for locking and
-// unlocking the reference pending list lock, which needs to be held by
-// the GC when adding references to the pending list. Since this is a
-// Java-level monitor it can only be locked/unlocked by a Java thread.
-// For this reason there is an option to spawn a helper thread, the
-// ReferencePendingListLockerThread, during initialization. If a helper
-// thread is spawned all lock operations from non-Java threads will be
-// delegated to the helper thread. The helper thread is typically needed
-// by concurrent GCs.
-//
-class ReferencePendingListLocker VALUE_OBJ_CLASS_SPEC {
-private:
-  static bool                              _is_initialized;
-  static ReferencePendingListLockerThread* _locker_thread;
-  BasicLock                                _basic_lock;
-
-public:
-  static void initialize(bool needs_locker_thread, TRAPS);
-  static bool is_initialized();
-
-  static bool is_locked_by_self();
-
-  void lock();
-  void unlock();
-};
-
-#endif // SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -289,39 +289,16 @@
   complete_gc->do_void();
 }
 
-
-template <class T>
-bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
-                                   AbstractRefProcTaskExecutor* task_executor) {
-
-  // Remember old value of pending references list
-  T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
-  T old_pending_list_value = *pending_list_addr;
-
+void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
   // Enqueue references that are not made active again, and
   // clear the decks for the next collection (cycle).
-  ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
-  // Do the post-barrier on pending_list_addr missed in
-  // enqueue_discovered_reflist.
-  oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
+  enqueue_discovered_reflists(task_executor);
 
   // Stop treating discovered references specially.
-  ref->disable_discovery();
-
-  // Return true if new pending references were added
-  return old_pending_list_value != *pending_list_addr;
+  disable_discovery();
 }
 
-bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
-  if (UseCompressedOops) {
-    return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
-  } else {
-    return enqueue_discovered_ref_helper<oop>(this, task_executor);
-  }
-}
-
-void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
-                                                    HeapWord* pending_list_addr) {
+void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) {
   // Given a list of refs linked through the "discovered" field
   // (java.lang.ref.Reference.discovered), self-loop their "next" field
   // thus distinguishing them from active References, then
@@ -354,10 +331,9 @@
       oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
     } else {
       // This is the last object.
-      // Swap refs_list into pending_list_addr and
-      // set obj's discovered to what we read from pending_list_addr.
-      oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
-      // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above.
+      // Swap refs_list into pending list and set obj's
+      // discovered to what we read from the pending list.
+      oop old = Universe::swap_reference_pending_list(refs_list.head());
       java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
       oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
     }
@@ -369,10 +345,8 @@
 public:
   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
                      DiscoveredList      discovered_refs[],
-                     HeapWord*           pending_list_addr,
                      int                 n_queues)
-    : EnqueueTask(ref_processor, discovered_refs,
-                  pending_list_addr, n_queues)
+    : EnqueueTask(ref_processor, discovered_refs, n_queues)
   { }
 
   virtual void work(unsigned int work_id) {
@@ -387,8 +361,7 @@
     for (int j = 0;
          j < ReferenceProcessor::number_of_subclasses_of_ref();
          j++, index += _n_queues) {
-      _ref_processor.enqueue_discovered_reflist(
-        _refs_lists[index], _pending_list_addr);
+      _ref_processor.enqueue_discovered_reflist(_refs_lists[index]);
       _refs_lists[index].set_head(NULL);
       _refs_lists[index].set_length(0);
     }
@@ -396,17 +369,15 @@
 };
 
 // Enqueue references that are not made active again
-void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
-  AbstractRefProcTaskExecutor* task_executor) {
+void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor) {
   if (_processing_is_mt && task_executor != NULL) {
     // Parallel code
-    RefProcEnqueueTask tsk(*this, _discovered_refs,
-                           pending_list_addr, _max_num_q);
+    RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q);
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
     for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-      enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
+      enqueue_discovered_reflist(_discovered_refs[i]);
       _discovered_refs[i].set_head(NULL);
       _discovered_refs[i].set_length(0);
     }
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -290,7 +290,7 @@
                       VoidClosure*       complete_gc);
 
   // Enqueue references with a certain reachability level
-  void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
+  void enqueue_discovered_reflist(DiscoveredList& refs_list);
 
   // "Preclean" all the discovered reference lists
   // by removing references with strongly reachable referents.
@@ -311,7 +311,7 @@
   // occupying the i / _num_q slot.
   const char* list_name(uint i);
 
-  void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
+  void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor);
 
  protected:
   // "Preclean" the given discovered reference list
@@ -424,7 +424,7 @@
                                 GCTimer *gc_timer);
 
   // Enqueue references at end of GC (called by the garbage collector)
-  bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
+  void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
 
   // If a discovery is in process that is being superceded, abandon it: all
   // the discovered lists will be empty, and all the objects on them will
@@ -613,11 +613,9 @@
 protected:
   EnqueueTask(ReferenceProcessor& ref_processor,
               DiscoveredList      refs_lists[],
-              HeapWord*           pending_list_addr,
               int                 n_queues)
     : _ref_processor(ref_processor),
       _refs_lists(refs_lists),
-      _pending_list_addr(pending_list_addr),
       _n_queues(n_queues)
   { }
 
@@ -627,7 +625,6 @@
 protected:
   ReferenceProcessor& _ref_processor;
   DiscoveredList*     _refs_lists;
-  HeapWord*           _pending_list_addr;
   int                 _n_queues;
 };
 
--- a/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -62,14 +62,6 @@
   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
 }
 
-void VM_GC_Operation::acquire_pending_list_lock() {
-  _pending_list_locker.lock();
-}
-
-void VM_GC_Operation::release_and_notify_pending_list_lock() {
-  _pending_list_locker.unlock();
-}
-
 // Allocations may fail in several threads at about the same time,
 // resulting in multiple gc requests.  We only want to do one of them.
 // In case a GC locker is active and the need for a GC is already signaled,
@@ -102,16 +94,13 @@
               proper_unit_for_byte_size(NewSize)));
   }
 
-  acquire_pending_list_lock();
   // If the GC count has changed someone beat us to the collection
-  // Get the Heap_lock after the pending_list_lock.
   Heap_lock->lock();
 
   // Check invocations
   if (skip_operation()) {
     // skip collection
     Heap_lock->unlock();
-    release_and_notify_pending_list_lock();
     _prologue_succeeded = false;
   } else {
     _prologue_succeeded = true;
@@ -122,9 +111,10 @@
 
 void VM_GC_Operation::doit_epilogue() {
   assert(Thread::current()->is_Java_thread(), "just checking");
-  // Release the Heap_lock first.
+  if (Universe::has_reference_pending_list()) {
+    Heap_lock->notify_all();
+  }
   Heap_lock->unlock();
-  release_and_notify_pending_list_lock();
 }
 
 bool VM_GC_HeapInspection::skip_operation() const {
--- a/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -27,7 +27,6 @@
 
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "memory/heapInspection.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/handles.hpp"
@@ -70,9 +69,6 @@
 //
 
 class VM_GC_Operation: public VM_Operation {
- private:
-  ReferencePendingListLocker _pending_list_locker;
-
  protected:
   uint           _gc_count_before;         // gc count before acquiring PLL
   uint           _full_gc_count_before;    // full gc count before acquiring PLL
@@ -83,10 +79,6 @@
 
   virtual bool skip_operation() const;
 
-  // java.lang.ref.Reference support
-  void acquire_pending_list_lock();
-  void release_and_notify_pending_list_lock();
-
  public:
   VM_GC_Operation(uint gc_count_before,
                   GCCause::Cause _cause,
--- a/hotspot/src/share/vm/memory/universe.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/memory/universe.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -135,6 +135,7 @@
 oop Universe::_virtual_machine_error_instance         = NULL;
 oop Universe::_vm_exception                           = NULL;
 oop Universe::_allocation_context_notification_obj    = NULL;
+oop Universe::_reference_pending_list                 = NULL;
 
 Array<int>* Universe::_the_empty_int_array            = NULL;
 Array<u2>* Universe::_the_empty_short_array           = NULL;
@@ -212,6 +213,7 @@
   f->do_oop((oop*)&_system_thread_group);
   f->do_oop((oop*)&_vm_exception);
   f->do_oop((oop*)&_allocation_context_notification_obj);
+  f->do_oop((oop*)&_reference_pending_list);
   debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
 }
 
@@ -488,6 +490,35 @@
   java_lang_Class::set_fixup_mirror_list(NULL);
 }
 
+#define assert_pll_locked(test) \
+  assert(Heap_lock->test(), "Reference pending list access requires lock")
+
+#define assert_pll_ownership() assert_pll_locked(owned_by_self)
+
+oop Universe::reference_pending_list() {
+  assert_pll_ownership();
+  return _reference_pending_list;
+}
+
+void Universe::set_reference_pending_list(oop list) {
+  assert_pll_ownership();
+  _reference_pending_list = list;
+}
+
+bool Universe::has_reference_pending_list() {
+  assert_pll_ownership();
+  return _reference_pending_list != NULL;
+}
+
+oop Universe::swap_reference_pending_list(oop list) {
+  assert_pll_locked(is_locked);
+  return (oop)Atomic::xchg_ptr(list, &_reference_pending_list);
+}
+
+#undef assert_pll_locked
+#undef assert_pll_ownership
+
+
 static bool has_run_finalizers_on_exit = false;
 
 void Universe::run_finalizers_on_exit() {
@@ -565,12 +596,14 @@
 
 oop Universe::gen_out_of_memory_error(oop default_err) {
   // generate an out of memory error:
-  // - if there is a preallocated error with backtrace available then return it wth
-  //   a filled in stack trace.
-  // - if there are no preallocated errors with backtrace available then return
-  //   an error without backtrace.
+  // - if there is a preallocated error and stack traces are available
+  //   (j.l.Throwable is initialized), then return the preallocated
+  //   error with a filled in stack trace, and with the message
+  //   provided by the default error.
+  // - otherwise, return the default error, without a stack trace.
   int next;
-  if (_preallocated_out_of_memory_error_avail_count > 0) {
+  if ((_preallocated_out_of_memory_error_avail_count > 0) &&
+      SystemDictionary::Throwable_klass()->is_initialized()) {
     next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
     assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
   } else {
--- a/hotspot/src/share/vm/memory/universe.hpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/memory/universe.hpp	Tue Aug 30 23:48:16 2016 -0400
@@ -185,6 +185,9 @@
 
   static oop          _allocation_context_notification_obj;
 
+  // References waiting to be transferred to the ReferenceHandler
+  static oop          _reference_pending_list;
+
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
 
@@ -334,6 +337,17 @@
   static inline oop   allocation_context_notification_obj();
   static inline void  set_allocation_context_notification_obj(oop obj);
 
+  // Reference pending list manipulation.  Access is protected by
+  // Heap_lock.  The getter, setter and predicate require the caller
+  // owns the lock.  Swap is used by parallel non-concurrent reference
+  // processing threads, where some higher level controller owns
+  // Heap_lock, so requires the lock is locked, but not necessarily by
+  // the current thread.
+  static oop          reference_pending_list();
+  static void         set_reference_pending_list(oop list);
+  static bool         has_reference_pending_list();
+  static oop          swap_reference_pending_list(oop list);
+
   static Array<int>*       the_empty_int_array()    { return _the_empty_int_array; }
   static Array<u2>*        the_empty_short_array()  { return _the_empty_short_array; }
   static Array<Method*>* the_empty_method_array() { return _the_empty_method_array; }
--- a/hotspot/src/share/vm/oops/method.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/oops/method.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -30,7 +30,6 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "gc/shared/generation.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodeTracer.hpp"
 #include "interpreter/bytecodes.hpp"
@@ -400,12 +399,6 @@
     return;
   }
 
-  // Do not profile method if current thread holds the pending list lock,
-  // which avoids deadlock for acquiring the MethodData_lock.
-  if (ReferencePendingListLocker::is_locked_by_self()) {
-    return;
-  }
-
   // Grab a lock here to prevent multiple
   // MethodData*s from being created.
   MutexLocker ml(MethodData_lock, THREAD);
--- a/hotspot/src/share/vm/prims/jvm.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -3352,6 +3352,35 @@
 JVM_END
 
 
+// java.lang.ref.Reference ///////////////////////////////////////////////////////////////
+
+
+JVM_ENTRY(jobject, JVM_GetAndClearReferencePendingList(JNIEnv* env))
+  JVMWrapper("JVM_GetAndClearReferencePendingList");
+
+  MonitorLockerEx ml(Heap_lock);
+  oop ref = Universe::reference_pending_list();
+  if (ref != NULL) {
+    Universe::set_reference_pending_list(NULL);
+  }
+  return JNIHandles::make_local(env, ref);
+JVM_END
+
+JVM_ENTRY(jboolean, JVM_HasReferencePendingList(JNIEnv* env))
+  JVMWrapper("JVM_HasReferencePendingList");
+  MonitorLockerEx ml(Heap_lock);
+  return Universe::has_reference_pending_list();
+JVM_END
+
+JVM_ENTRY(void, JVM_WaitForReferencePendingList(JNIEnv* env))
+  JVMWrapper("JVM_WaitForReferencePendingList");
+  MonitorLockerEx ml(Heap_lock);
+  while (!Universe::has_reference_pending_list()) {
+    ml.wait();
+  }
+JVM_END
+
+
 // ObjectInputStream ///////////////////////////////////////////////////////////////
 
 bool force_verify_field_access(Klass* current_class, Klass* field_class, AccessFlags access, bool classloader_only) {
--- a/hotspot/src/share/vm/prims/jvm.h	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/prims/jvm.h	Tue Aug 30 23:48:16 2016 -0400
@@ -297,6 +297,18 @@
 JVM_GetSystemPackages(JNIEnv *env);
 
 /*
+ * java.lang.ref.Reference
+ */
+JNIEXPORT jobject JNICALL
+JVM_GetAndClearReferencePendingList(JNIEnv *env);
+
+JNIEXPORT jboolean JNICALL
+JVM_HasReferencePendingList(JNIEnv *env);
+
+JNIEXPORT void JNICALL
+JVM_WaitForReferencePendingList(JNIEnv *env);
+
+/*
  * java.io.ObjectInputStream
  */
 JNIEXPORT jobject JNICALL
--- a/hotspot/src/share/vm/runtime/thread.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -35,7 +35,6 @@
 #include "compiler/compileTask.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/linkResolver.hpp"
@@ -3718,14 +3717,6 @@
   Management::record_vm_init_completed();
 #endif // INCLUDE_MANAGEMENT
 
-  // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
-  // set_init_completed has just been called, causing exceptions not to be shortcut
-  // anymore. We call vm_exit_during_initialization directly instead.
-
-  // Initialize reference pending list locker
-  bool needs_locker_thread = Universe::heap()->needs_reference_pending_list_locker_thread();
-  ReferencePendingListLocker::initialize(needs_locker_thread, CHECK_JNI_ERR);
-
   // Signal Dispatcher needs to be started before VMInit event is posted
   os::signal_init();
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Aug 30 12:48:03 2016 +0300
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Aug 30 23:48:16 2016 -0400
@@ -54,7 +54,6 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/generationSpec.hpp"
-#include "gc/shared/referencePendingListLocker.hpp"
 #include "gc/shared/space.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/bytecodes.hpp"
@@ -1637,7 +1636,6 @@
            declare_type(JavaThread, Thread)                               \
            declare_type(JvmtiAgentThread, JavaThread)                     \
            declare_type(ServiceThread, JavaThread)                        \
-           declare_type(ReferencePendingListLockerThread, JavaThread)     \
   declare_type(CompilerThread, JavaThread)                                \
   declare_type(CodeCacheSweeperThread, JavaThread)                        \
   declare_toplevel_type(OSThread)                                         \