8182703: Correct G1 barrier queue lock orderings
authoreosterlund
Thu, 13 Jul 2017 17:06:08 +0200
changeset 46685 b218dfc2853a
parent 46682 b646732e1473
child 46686 2092d0dade65
8182703: Correct G1 barrier queue lock orderings Summary: Moving G1 barrier queue locks down to a new 'access' rank, below special, and removing hacks around incorrect lock ordering. Reviewed-by: kbarrett, tschatzl
hotspot/src/share/vm/gc/g1/ptrQueue.cpp
hotspot/src/share/vm/runtime/mutex.hpp
hotspot/src/share/vm/runtime/mutexLocker.cpp
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.cpp	Tue Jul 18 09:53:54 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.cpp	Thu Jul 13 17:06:08 2017 +0200
@@ -75,16 +75,7 @@
 
 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
   assert(_lock->owned_by_self(), "Required.");
-
-  // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
-  // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
-  // have the same rank and we may get the "possible deadlock" message
-  _lock->unlock();
-
   qset()->enqueue_complete_buffer(node);
-  // We must relock only because the caller will unlock, for the normal
-  // case.
-  _lock->lock_without_safepoint_check();
 }
 
 
@@ -189,34 +180,11 @@
     if (_lock) {
       assert(_lock->owned_by_self(), "Required.");
 
-      // The current PtrQ may be the shared dirty card queue and
-      // may be being manipulated by more than one worker thread
-      // during a pause. Since the enqueueing of the completed
-      // buffer unlocks the Shared_DirtyCardQ_lock more than one
-      // worker thread can 'race' on reading the shared queue attributes
-      // (_buf and _index) and multiple threads can call into this
-      // routine for the same buffer. This will cause the completed
-      // buffer to be added to the CBL multiple times.
-
-      // We "claim" the current buffer by caching value of _buf in
-      // a local and clearing the field while holding _lock. When
-      // _lock is released (while enqueueing the completed buffer)
-      // the thread that acquires _lock will skip this code,
-      // preventing the subsequent the multiple enqueue, and
-      // install a newly allocated buffer below.
-
       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
       _buf = NULL;         // clear shared _buf field
 
       locking_enqueue_completed_buffer(node); // enqueue completed buffer
-
-      // While the current thread was enqueueing the buffer another thread
-      // may have a allocated a new buffer and inserted it into this pointer
-      // queue. If that happens then we just return so that the current
-      // thread doesn't overwrite the buffer allocated by the other thread
-      // and potentially losing some dirtied cards.
-
-      if (_buf != NULL) return;
+      assert(_buf == NULL, "multiple enqueuers appear to be racing");
     } else {
       BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
       if (qset()->process_or_enqueue_complete_buffer(node)) {
--- a/hotspot/src/share/vm/runtime/mutex.hpp	Tue Jul 18 09:53:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/mutex.hpp	Thu Jul 13 17:06:08 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,9 +90,15 @@
 
  public:
   // A special lock: Is a lock where you are guaranteed not to block while you are
-  // holding it, i.e., no vm operation can happen, taking other locks, etc.
+  // holding it, i.e., no vm operation can happen, taking other (blocking) locks, etc.
+  // The rank 'access' is similar to 'special' and has the same restrictions on usage.
+  // It is reserved for locks that may be required in order to perform memory accesses
+  // that require special barriers, e.g. SATB GC barriers, that in turn uses locks.
+  // Since memory accesses should be able to be performed pretty much anywhere
+  // in the code, that requires locks required for performing accesses being
+  // inherently a bit more special than even locks of the 'special' rank.
   // NOTE: It is critical that the rank 'special' be the lowest (earliest)
-  // (except for "event"?) for the deadlock detection to work correctly.
+  // (except for "event" and "access") for the deadlock detection to work correctly.
   // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
   // which being external to the VM are not subject to deadlock detection.
   // The rank safepoint is used only for synchronization in reaching a
@@ -104,14 +110,15 @@
   // at all.
   enum lock_types {
        event,
-       special,
-       suspend_resume,
-       leaf        = suspend_resume +   2,
-       safepoint   = leaf           +  10,
-       barrier     = safepoint      +   1,
-       nonleaf     = barrier        +   1,
-       max_nonleaf = nonleaf        + 900,
-       native      = max_nonleaf    +   1
+       access         = event          +   1,
+       special        = access         +   2,
+       suspend_resume = special        +   1,
+       leaf           = suspend_resume +   2,
+       safepoint      = leaf           +  10,
+       barrier        = safepoint      +   1,
+       nonleaf        = barrier        +   1,
+       max_nonleaf    = nonleaf        + 900,
+       native         = max_nonleaf    +   1
   };
 
   // The WaitSet and EntryList linked lists are composed of ParkEvents.
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Tue Jul 18 09:53:54 2017 +0200
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Thu Jul 13 17:06:08 2017 +0200
@@ -181,14 +181,13 @@
     def(FullGCCount_lock           , PaddedMonitor, leaf,        true,  Monitor::_safepoint_check_never);      // in support of ExplicitGCInvokesConcurrent
   }
   if (UseG1GC) {
+    def(SATB_Q_FL_lock             , PaddedMutex  , access,      true,  Monitor::_safepoint_check_never);
+    def(SATB_Q_CBL_mon             , PaddedMonitor, access,      true,  Monitor::_safepoint_check_never);
+    def(Shared_SATB_Q_lock         , PaddedMutex  , access + 1,  true,  Monitor::_safepoint_check_never);
 
-    def(SATB_Q_FL_lock             , PaddedMutex  , special  ,   true,  Monitor::_safepoint_check_never);
-    def(SATB_Q_CBL_mon             , PaddedMonitor, leaf - 1 ,   true,  Monitor::_safepoint_check_never);
-    def(Shared_SATB_Q_lock         , PaddedMutex  , leaf - 1 ,   true,  Monitor::_safepoint_check_never);
-
-    def(DirtyCardQ_FL_lock         , PaddedMutex  , special  ,   true,  Monitor::_safepoint_check_never);
-    def(DirtyCardQ_CBL_mon         , PaddedMonitor, leaf - 1 ,   true,  Monitor::_safepoint_check_never);
-    def(Shared_DirtyCardQ_lock     , PaddedMutex  , leaf - 1 ,   true,  Monitor::_safepoint_check_never);
+    def(DirtyCardQ_FL_lock         , PaddedMutex  , access,      true,  Monitor::_safepoint_check_never);
+    def(DirtyCardQ_CBL_mon         , PaddedMonitor, access,      true,  Monitor::_safepoint_check_never);
+    def(Shared_DirtyCardQ_lock     , PaddedMutex  , access + 1,  true,  Monitor::_safepoint_check_never);
 
     def(FreeList_lock              , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_never);
     def(SecondaryFreeList_lock     , PaddedMonitor, leaf     ,   true,  Monitor::_safepoint_check_never);