src/hotspot/share/gc/shared/oopStorage.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54663 f03d5a093093
child 58679 9c3209ff7550
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Thu Oct 17 20:27:44 2019 +0100
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Thu Oct 17 20:53:35 2019 +0100
@@ -35,6 +35,7 @@
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.hpp"
@@ -207,7 +208,7 @@
 OopStorage::Block::Block(const OopStorage* owner, void* memory) :
   _data(),
   _allocated_bitmask(0),
-  _owner(owner),
+  _owner_address(reinterpret_cast<intptr_t>(owner)),
   _memory(memory),
   _active_index(0),
   _allocation_list_entry(),
@@ -227,7 +228,7 @@
   // Clear fields used by block_for_ptr and entry validation, which
   // might help catch bugs.  Volatile to prevent dead-store elimination.
   const_cast<uintx volatile&>(_allocated_bitmask) = 0;
-  const_cast<OopStorage* volatile&>(_owner) = NULL;
+  const_cast<intptr_t volatile&>(_owner_address) = 0;
 }
 
 size_t OopStorage::Block::allocation_size() {
@@ -355,9 +356,7 @@
   intptr_t owner_addr = reinterpret_cast<intptr_t>(owner);
   for (unsigned i = 0; i < section_count; ++i, section += section_size) {
     Block* candidate = reinterpret_cast<Block*>(section);
-    intptr_t* candidate_owner_addr
-      = reinterpret_cast<intptr_t*>(&candidate->_owner);
-    if (SafeFetchN(candidate_owner_addr, 0) == owner_addr) {
+    if (SafeFetchN(&candidate->_owner_address, 0) == owner_addr) {
       return candidate;
     }
   }
@@ -414,20 +413,12 @@
 oop* OopStorage::allocate() {
   MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
 
-  // Note: Without this we might never perform cleanup.  As it is,
-  // cleanup is only requested here, when completing a concurrent
-  // iteration, or when someone entirely else wakes up the service
-  // thread, which isn't ideal.  But we can't notify in release().
-  if (reduce_deferred_updates()) {
-    notify_needs_cleanup();
-  }
-
   Block* block = block_for_allocation();
   if (block == NULL) return NULL; // Block allocation failed.
   assert(!block->is_full(), "invariant");
   if (block->is_empty()) {
     // Transitioning from empty to not empty.
-    log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
+    log_trace(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
   }
   oop* result = block->allocate();
   assert(result != NULL, "allocation failed");
@@ -436,7 +427,7 @@
   if (block->is_full()) {
     // Transitioning from not full to full.
     // Remove full blocks from consideration by future allocates.
-    log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
+    log_trace(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
     _allocation_list.unlink(*block);
   }
   log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
@@ -474,26 +465,23 @@
 
 OopStorage::Block* OopStorage::block_for_allocation() {
   assert_lock_strong(_allocation_mutex);
-
   while (true) {
     // Use the first block in _allocation_list for the allocation.
     Block* block = _allocation_list.head();
     if (block != NULL) {
       return block;
     } else if (reduce_deferred_updates()) {
-      MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
-      notify_needs_cleanup();
+      // Might have added a block to the _allocation_list, so retry.
     } else if (try_add_block()) {
-      block = _allocation_list.head();
-      assert(block != NULL, "invariant");
-      return block;
-    } else if (reduce_deferred_updates()) { // Once more before failure.
-      MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
-      notify_needs_cleanup();
-    } else {
+      // Successfully added a new block to the list, so retry.
+      assert(_allocation_list.chead() != NULL, "invariant");
+    } else if (_allocation_list.chead() != NULL) {
+      // Trying to add a block failed, but some other thread added to the
+      // list while we'd dropped the lock over the new block allocation.
+    } else if (!reduce_deferred_updates()) { // Once more before failure.
       // Attempt to add a block failed, no other thread added a block,
       // and no deferred updated added a block, then allocation failed.
-      log_debug(oopstorage, blocks)("%s: failed block allocation", name());
+      log_info(oopstorage, blocks)("%s: failed block allocation", name());
       return NULL;
     }
   }
@@ -585,13 +573,15 @@
                                     uintx old_allocated,
                                     const OopStorage* owner,
                                     const void* block) {
-  Log(oopstorage, blocks) log;
-  LogStream ls(log.debug());
-  if (is_full_bitmask(old_allocated)) {
-    ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
-  }
-  if (releasing == old_allocated) {
-    ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
+  LogTarget(Trace, oopstorage, blocks) lt;
+  if (lt.is_enabled()) {
+    LogStream ls(lt);
+    if (is_full_bitmask(old_allocated)) {
+      ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
+    }
+    if (releasing == old_allocated) {
+      ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
+    }
   }
 }
 
@@ -618,9 +608,7 @@
   // updates and the associated locking here.
   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
     // Log transitions.  Both transitions are possible in a single update.
-    if (log_is_enabled(Debug, oopstorage, blocks)) {
-      log_release_transitions(releasing, old_allocated, _owner, this);
-    }
+    log_release_transitions(releasing, old_allocated, owner, this);
     // Attempt to claim responsibility for adding this block to the deferred
     // list, by setting the link to non-NULL by self-looping.  If this fails,
     // then someone else has made such a claim and the deferred update has not
@@ -635,9 +623,16 @@
         if (fetched == head) break; // Successful update.
         head = fetched;             // Retry with updated head.
       }
-      owner->record_needs_cleanup();
-      log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
-                                    _owner->name(), p2i(this));
+      // Only request cleanup for to-empty transitions, not for from-full.
+      // There isn't any rush to process from-full transitions.  Allocation
+      // will reduce deferrals before allocating new blocks, so may process
+      // some.  And the service thread will drain the entire deferred list
+      // if there are any pending to-empty transitions.
+      if (releasing == old_allocated) {
+        owner->record_needs_cleanup();
+      }
+      log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
+                                    owner->name(), p2i(this));
     }
   }
   // Release hold on empty block deletion.
@@ -684,10 +679,9 @@
   if (is_empty_bitmask(allocated)) {
     _allocation_list.unlink(*block);
     _allocation_list.push_back(*block);
-    notify_needs_cleanup();
   }
 
-  log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
+  log_trace(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
                                 name(), p2i(block));
   return true;              // Processed one pending update.
 }
@@ -734,23 +728,12 @@
   }
 }
 
-const char* dup_name(const char* name) {
-  char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
-  strcpy(dup, name);
-  return dup;
-}
-
-// Possible values for OopStorage::_needs_cleanup.
-const uint needs_cleanup_none = 0;     // No cleanup needed.
-const uint needs_cleanup_marked = 1;   // Requested, but no notification made.
-const uint needs_cleanup_notified = 2; // Requested and Service thread notified.
-
 const size_t initial_active_array_size = 8;
 
 OopStorage::OopStorage(const char* name,
                        Mutex* allocation_mutex,
                        Mutex* active_mutex) :
-  _name(dup_name(name)),
+  _name(os::strdup(name)),
   _active_array(ActiveArray::create(initial_active_array_size)),
   _allocation_list(),
   _deferred_updates(NULL),
@@ -758,7 +741,7 @@
   _active_mutex(active_mutex),
   _allocation_count(0),
   _concurrent_iteration_count(0),
-  _needs_cleanup(needs_cleanup_none)
+  _needs_cleanup(false)
 {
   _active_array->increment_refcount();
   assert(_active_mutex->rank() < _allocation_mutex->rank(),
@@ -793,43 +776,92 @@
     Block::delete_block(*block);
   }
   ActiveArray::destroy(_active_array);
-  FREE_C_HEAP_ARRAY(char, _name);
+  os::free(const_cast<char*>(_name));
 }
 
-// Called by service thread to check for pending work.
-bool OopStorage::needs_delete_empty_blocks() const {
-  return Atomic::load(&_needs_cleanup) != needs_cleanup_none;
+// Managing service thread notifications.
+//
+// We don't want cleanup work to linger indefinitely, but we also don't want
+// to run the service thread too often.  We're also very limited in what we
+// can do in a release operation, where cleanup work is created.
+//
+// When a release operation changes a block's state to empty, it records the
+// need for cleanup in both the associated storage object and in the global
+// request state.  A safepoint cleanup task notifies the service thread when
+// there may be cleanup work for any storage object, based on the global
+// request state.  But that notification is deferred if the service thread
+// has run recently, and we also avoid duplicate notifications.  The service
+// thread updates the timestamp and resets the state flags on every iteration.
+
+// Global cleanup request state.
+static volatile bool needs_cleanup_requested = false;
+
+// Flag for avoiding duplicate notifications.
+static bool needs_cleanup_triggered = false;
+
+// Time after which a notification can be made.
+static jlong cleanup_trigger_permit_time = 0;
+
+// Minimum time since last service thread check before notification is
+// permitted.  The value of 500ms was an arbitrary choice; frequent, but not
+// too frequent.
+const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
+
+void OopStorage::trigger_cleanup_if_needed() {
+  MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
+  if (Atomic::load(&needs_cleanup_requested) &&
+      !needs_cleanup_triggered &&
+      (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
+    needs_cleanup_triggered = true;
+    ml.notify_all();
+  }
+}
+
+bool OopStorage::has_cleanup_work_and_reset() {
+  assert_lock_strong(Service_lock);
+  cleanup_trigger_permit_time =
+    os::javaTimeNanos() + cleanup_trigger_defer_period;
+  needs_cleanup_triggered = false;
+  // Set the request flag false and return its old value.
+  // Needs to be atomic to avoid dropping a concurrent request.
+  // Can't use Atomic::xchg, which may not support bool.
+  return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
 }
 
 // Record that cleanup is needed, without notifying the Service thread.
 // Used by release(), where we can't lock even Service_lock.
 void OopStorage::record_needs_cleanup() {
-  Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none);
-}
-
-// Record that cleanup is needed, and notify the Service thread.
-void OopStorage::notify_needs_cleanup() {
-  // Avoid re-notification if already notified.
-  const uint notified = needs_cleanup_notified;
-  if (Atomic::xchg(notified, &_needs_cleanup) != notified) {
-    MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
-    ml.notify_all();
-  }
+  // Set local flag first, else service thread could wake up and miss
+  // the request.  This order may instead (rarely) unnecessarily notify.
+  OrderAccess::release_store(&_needs_cleanup, true);
+  OrderAccess::release_store_fence(&needs_cleanup_requested, true);
 }
 
 bool OopStorage::delete_empty_blocks() {
+  // Service thread might have oopstorage work, but not for this object.
+  // Check for deferred updates even though that's not a service thread
+  // trigger; since we're here, we might as well process them.
+  if (!OrderAccess::load_acquire(&_needs_cleanup) &&
+      (OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
+    return false;
+  }
+
   MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
 
   // Clear the request before processing.
-  Atomic::store(needs_cleanup_none, &_needs_cleanup);
-  OrderAccess::fence();
+  OrderAccess::release_store_fence(&_needs_cleanup, false);
 
   // Other threads could be adding to the empty block count or the
   // deferred update list while we're working.  Set an upper bound on
   // how many updates we'll process and blocks we'll try to release,
   // so other threads can't cause an unbounded stay in this function.
-  size_t limit = block_count();
-  if (limit == 0) return false; // Empty storage; nothing at all to do.
+  // We add a bit of slop because the reduce_deferred_updates clause
+  // can cause blocks to be double counted.  If there are few blocks
+  // and many of them are deferred and empty, we might hit the limit
+  // and spin the caller without doing very much work.  Otherwise,
+  // we don't normally hit the limit anyway, instead running out of
+  // work to do.
+  size_t limit = block_count() + 10;
 
   for (size_t i = 0; i < limit; ++i) {
     // Process deferred updates, which might make empty blocks available.
@@ -946,8 +978,8 @@
   _storage->relinquish_block_array(_active_array);
   update_concurrent_iteration_count(-1);
   if (_concurrent) {
-    // We may have deferred some work.
-    const_cast<OopStorage*>(_storage)->notify_needs_cleanup();
+    // We may have deferred some cleanup work.
+    const_cast<OopStorage*>(_storage)->record_needs_cleanup();
   }
 }