src/hotspot/share/utilities/concurrentHashTable.inline.hpp
changeset 51405 8b23aa7cef47
parent 51334 cc2c79d22508
child 52332 d2a3503c72f7
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp	Tue Aug 14 14:08:04 2018 -0700
+++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp	Tue Aug 14 18:42:14 2018 -0500
@@ -540,6 +540,8 @@
 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
 {
+  assert(bucket->is_locked(), "Must be locked.");
+
   size_t dels = 0;
   Node* ndel[BULK_DELETE_LIMIT];
   Node* const volatile * rem_n_prev = bucket->first_ptr();
@@ -874,7 +876,7 @@
 template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC>
 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f,
-                  CALLBACK_FUNC& callback, bool* grow_hint)
+                  CALLBACK_FUNC& callback, bool* grow_hint, bool* clean_hint)
 {
   bool ret = false;
   bool clean = false;
@@ -925,15 +927,20 @@
   } else if (i == 0 && clean) {
     // We only do cleaning on fast inserts.
     Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
-    assert(bucket->is_locked(), "Must be locked.");
     delete_in_bucket(thread, bucket, lookup_f);
     bucket->unlock();
+
+    clean = false;
   }
 
   if (grow_hint != NULL) {
     *grow_hint = loops > _grow_hint;
   }
 
+  if (clean_hint != NULL) {
+    *clean_hint = clean;
+  }
+
   return ret;
 }