src/hotspot/share/utilities/concurrentHashTable.inline.hpp
changeset 55478 ae2e53e379cb
parent 54764 865ec913f916
child 58291 a013100f7a35
equal deleted inserted replaced
55477:c396e381cfa4 55478:ae2e53e379cb
    51 static const void* POISON_PTR = (void*)0xffbadbac;
    51 static const void* POISON_PTR = (void*)0xffbadbac;
    52 #endif
    52 #endif
    53 #endif
    53 #endif
    54 
    54 
    55 // Node
    55 // Node
    56 template <typename VALUE, typename CONFIG, MEMFLAGS F>
    56 template <typename CONFIG, MEMFLAGS F>
    57 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
    57 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    58 ConcurrentHashTable<VALUE, CONFIG, F>::
    58 ConcurrentHashTable<CONFIG, F>::
    59   Node::next() const
    59   Node::next() const
    60 {
    60 {
    61   return OrderAccess::load_acquire(&_next);
    61   return OrderAccess::load_acquire(&_next);
    62 }
    62 }
    63 
    63 
    64 // Bucket
    64 // Bucket
    65 template <typename VALUE, typename CONFIG, MEMFLAGS F>
    65 template <typename CONFIG, MEMFLAGS F>
    66 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
    66 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    67 ConcurrentHashTable<VALUE, CONFIG, F>::
    67 ConcurrentHashTable<CONFIG, F>::
    68   Bucket::first_raw() const
    68   Bucket::first_raw() const
    69 {
    69 {
    70   return OrderAccess::load_acquire(&_first);
    70   return OrderAccess::load_acquire(&_first);
    71 }
    71 }
    72 
    72 
    73 template <typename VALUE, typename CONFIG, MEMFLAGS F>
    73 template <typename CONFIG, MEMFLAGS F>
    74 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
    74 inline void ConcurrentHashTable<CONFIG, F>::
    75   Bucket::release_assign_node_ptr(
    75   Bucket::release_assign_node_ptr(
    76     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst,
    76     typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst,
    77     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const
    77     typename ConcurrentHashTable<CONFIG, F>::Node* node) const
    78 {
    78 {
    79   // Due to this assert this methods is not static.
    79   // Due to this assert this methods is not static.
    80   assert(is_locked(), "Must be locked.");
    80   assert(is_locked(), "Must be locked.");
    81   Node** tmp = (Node**)dst;
    81   Node** tmp = (Node**)dst;
    82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
    82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
    83 }
    83 }
    84 
    84 
    85 template <typename VALUE, typename CONFIG, MEMFLAGS F>
    85 template <typename CONFIG, MEMFLAGS F>
    86 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
    86 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    87 ConcurrentHashTable<VALUE, CONFIG, F>::
    87 ConcurrentHashTable<CONFIG, F>::
    88   Bucket::first() const
    88   Bucket::first() const
    89 {
    89 {
    90   // We strip the states bit before returning the ptr.
    90   // We strip the states bit before returning the ptr.
    91   return clear_state(OrderAccess::load_acquire(&_first));
    91   return clear_state(OrderAccess::load_acquire(&_first));
    92 }
    92 }
    93 
    93 
    94 template <typename VALUE, typename CONFIG, MEMFLAGS F>
    94 template <typename CONFIG, MEMFLAGS F>
    95 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
    95 inline bool ConcurrentHashTable<CONFIG, F>::
    96   Bucket::have_redirect() const
    96   Bucket::have_redirect() const
    97 {
    97 {
    98   return is_state(first_raw(), STATE_REDIRECT_BIT);
    98   return is_state(first_raw(), STATE_REDIRECT_BIT);
    99 }
    99 }
   100 
   100 
   101 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   101 template <typename CONFIG, MEMFLAGS F>
   102 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   102 inline bool ConcurrentHashTable<CONFIG, F>::
   103   Bucket::is_locked() const
   103   Bucket::is_locked() const
   104 {
   104 {
   105   return is_state(first_raw(), STATE_LOCK_BIT);
   105   return is_state(first_raw(), STATE_LOCK_BIT);
   106 }
   106 }
   107 
   107 
   108 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   108 template <typename CONFIG, MEMFLAGS F>
   109 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   109 inline void ConcurrentHashTable<CONFIG, F>::
   110   Bucket::lock()
   110   Bucket::lock()
   111 {
   111 {
   112   int i = 0;
   112   int i = 0;
   113   // SpinYield would be unfair here
   113   // SpinYield would be unfair here
   114   while (!this->trylock()) {
   114   while (!this->trylock()) {
   121       SpinPause();
   121       SpinPause();
   122     }
   122     }
   123   }
   123   }
   124 }
   124 }
   125 
   125 
   126 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   126 template <typename CONFIG, MEMFLAGS F>
   127 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   127 inline void ConcurrentHashTable<CONFIG, F>::
   128   Bucket::release_assign_last_node_next(
   128   Bucket::release_assign_last_node_next(
   129      typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node)
   129      typename ConcurrentHashTable<CONFIG, F>::Node* node)
   130 {
   130 {
   131   assert(is_locked(), "Must be locked.");
   131   assert(is_locked(), "Must be locked.");
   132   Node* const volatile * ret = first_ptr();
   132   Node* const volatile * ret = first_ptr();
   133   while (clear_state(*ret) != NULL) {
   133   while (clear_state(*ret) != NULL) {
   134     ret = clear_state(*ret)->next_ptr();
   134     ret = clear_state(*ret)->next_ptr();
   135   }
   135   }
   136   release_assign_node_ptr(ret, node);
   136   release_assign_node_ptr(ret, node);
   137 }
   137 }
   138 
   138 
   139 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   139 template <typename CONFIG, MEMFLAGS F>
   140 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   140 inline bool ConcurrentHashTable<CONFIG, F>::
   141   Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node,
   141   Bucket::cas_first(typename ConcurrentHashTable<CONFIG, F>::Node* node,
   142                     typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect
   142                     typename ConcurrentHashTable<CONFIG, F>::Node* expect
   143                     )
   143                     )
   144 {
   144 {
   145   if (is_locked()) {
   145   if (is_locked()) {
   146     return false;
   146     return false;
   147   }
   147   }
   149     return true;
   149     return true;
   150   }
   150   }
   151   return false;
   151   return false;
   152 }
   152 }
   153 
   153 
   154 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   154 template <typename CONFIG, MEMFLAGS F>
   155 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   155 inline bool ConcurrentHashTable<CONFIG, F>::
   156   Bucket::trylock()
   156   Bucket::trylock()
   157 {
   157 {
   158   if (is_locked()) {
   158   if (is_locked()) {
   159     return false;
   159     return false;
   160   }
   160   }
   164     return true;
   164     return true;
   165   }
   165   }
   166   return false;
   166   return false;
   167 }
   167 }
   168 
   168 
   169 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   169 template <typename CONFIG, MEMFLAGS F>
   170 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   170 inline void ConcurrentHashTable<CONFIG, F>::
   171   Bucket::unlock()
   171   Bucket::unlock()
   172 {
   172 {
   173   assert(is_locked(), "Must be locked.");
   173   assert(is_locked(), "Must be locked.");
   174   assert(!have_redirect(),
   174   assert(!have_redirect(),
   175          "Unlocking a bucket after it has reached terminal state.");
   175          "Unlocking a bucket after it has reached terminal state.");
   176   OrderAccess::release_store(&_first, clear_state(first()));
   176   OrderAccess::release_store(&_first, clear_state(first()));
   177 }
   177 }
   178 
   178 
   179 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   179 template <typename CONFIG, MEMFLAGS F>
   180 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   180 inline void ConcurrentHashTable<CONFIG, F>::
   181   Bucket::redirect()
   181   Bucket::redirect()
   182 {
   182 {
   183   assert(is_locked(), "Must be locked.");
   183   assert(is_locked(), "Must be locked.");
   184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
   184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
   185 }
   185 }
   186 
   186 
   187 // InternalTable
   187 // InternalTable
   188 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   188 template <typename CONFIG, MEMFLAGS F>
   189 inline ConcurrentHashTable<VALUE, CONFIG, F>::
   189 inline ConcurrentHashTable<CONFIG, F>::
   190   InternalTable::InternalTable(size_t log2_size)
   190   InternalTable::InternalTable(size_t log2_size)
   191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
   191     : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
   192       _hash_mask(~(~((size_t)0) << _log2_size))
   192       _hash_mask(~(~((size_t)0) << _log2_size))
   193 {
   193 {
   194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
   194   assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
   199   for (size_t i = 0; i < _size; ++i) {
   199   for (size_t i = 0; i < _size; ++i) {
   200     new (_buckets + i) Bucket();
   200     new (_buckets + i) Bucket();
   201   }
   201   }
   202 }
   202 }
   203 
   203 
   204 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   204 template <typename CONFIG, MEMFLAGS F>
   205 inline ConcurrentHashTable<VALUE, CONFIG, F>::
   205 inline ConcurrentHashTable<CONFIG, F>::
   206   InternalTable::~InternalTable()
   206   InternalTable::~InternalTable()
   207 {
   207 {
   208   FREE_C_HEAP_ARRAY(Bucket, _buckets);
   208   FREE_C_HEAP_ARRAY(Bucket, _buckets);
   209 }
   209 }
   210 
   210 
   211 // ScopedCS
   211 // ScopedCS
   212 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   212 template <typename CONFIG, MEMFLAGS F>
   213 inline ConcurrentHashTable<VALUE, CONFIG, F>::
   213 inline ConcurrentHashTable<CONFIG, F>::
   214   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht)
   214   ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
   215     : _thread(thread),
   215     : _thread(thread),
   216       _cht(cht),
   216       _cht(cht),
   217       _cs_context(GlobalCounter::critical_section_begin(_thread))
   217       _cs_context(GlobalCounter::critical_section_begin(_thread))
   218 {
   218 {
   219   // This version is published now.
   219   // This version is published now.
   220   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
   220   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
   221     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
   221     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
   222   }
   222   }
   223 }
   223 }
   224 
   224 
   225 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   225 template <typename CONFIG, MEMFLAGS F>
   226 inline ConcurrentHashTable<VALUE, CONFIG, F>::
   226 inline ConcurrentHashTable<CONFIG, F>::
   227   ScopedCS::~ScopedCS()
   227   ScopedCS::~ScopedCS()
   228 {
   228 {
   229   GlobalCounter::critical_section_end(_thread, _cs_context);
   229   GlobalCounter::critical_section_end(_thread, _cs_context);
   230 }
   230 }
   231 
   231 
   232 // BaseConfig
   232 template <typename CONFIG, MEMFLAGS F>
   233 template <typename VALUE, typename CONFIG, MEMFLAGS F>
       
   234 inline void* ConcurrentHashTable<VALUE, CONFIG, F>::
       
   235   BaseConfig::allocate_node(size_t size, const VALUE& value)
       
   236 {
       
   237   return AllocateHeap(size, F);
       
   238 }
       
   239 
       
   240 template <typename VALUE, typename CONFIG, MEMFLAGS F>
       
   241 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
       
   242   BaseConfig::free_node(void* memory, const VALUE& value)
       
   243 {
       
   244   FreeHeap(memory);
       
   245 }
       
   246 
       
   247 template <typename VALUE, typename CONFIG, MEMFLAGS F>
       
   248 template <typename LOOKUP_FUNC>
   233 template <typename LOOKUP_FUNC>
   249 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
   234 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
   250   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
   235   MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
   251 {
   236 {
   252   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
   237   return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
   253 }
   238 }
   254 
   239 
   255 // HaveDeletables
   240 // HaveDeletables
   256 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   241 template <typename CONFIG, MEMFLAGS F>
   257 template <typename EVALUATE_FUNC>
   242 template <typename EVALUATE_FUNC>
   258 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   243 inline bool ConcurrentHashTable<CONFIG, F>::
   259   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
   244   HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
   260                                                       EVALUATE_FUNC& eval_f,
   245                                                       EVALUATE_FUNC& eval_f,
   261                                                       Bucket* prefetch_bucket)
   246                                                       Bucket* prefetch_bucket)
   262 {
   247 {
   263   // Instantiated for pointer type (true), so we can use prefetch.
   248   // Instantiated for pointer type (true), so we can use prefetch.
   279     }
   264     }
   280   }
   265   }
   281   return false;
   266   return false;
   282 }
   267 }
   283 
   268 
   284 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   269 template <typename CONFIG, MEMFLAGS F>
   285 template <bool b, typename EVALUATE_FUNC>
   270 template <bool b, typename EVALUATE_FUNC>
   286 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   271 inline bool ConcurrentHashTable<CONFIG, F>::
   287   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
   272   HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
   288                                                    EVALUATE_FUNC& eval_f,
   273                                                    EVALUATE_FUNC& eval_f,
   289                                                    Bucket* preb)
   274                                                    Bucket* preb)
   290 {
   275 {
   291   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
   276   for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
   295   }
   280   }
   296   return false;
   281   return false;
   297 }
   282 }
   298 
   283 
   299 // ConcurrentHashTable
   284 // ConcurrentHashTable
   300 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   285 template <typename CONFIG, MEMFLAGS F>
   301 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   286 inline void ConcurrentHashTable<CONFIG, F>::
   302   write_synchonize_on_visible_epoch(Thread* thread)
   287   write_synchonize_on_visible_epoch(Thread* thread)
   303 {
   288 {
   304   assert(_resize_lock_owner == thread, "Re-size lock not held");
   289   assert(_resize_lock_owner == thread, "Re-size lock not held");
   305   OrderAccess::fence(); // Prevent below load from floating up.
   290   OrderAccess::fence(); // Prevent below load from floating up.
   306   // If no reader saw this version we can skip write_synchronize.
   291   // If no reader saw this version we can skip write_synchronize.
   312   // A reader will zero this flag if it reads this/next version.
   297   // A reader will zero this flag if it reads this/next version.
   313   OrderAccess::release_store(&_invisible_epoch, thread);
   298   OrderAccess::release_store(&_invisible_epoch, thread);
   314   GlobalCounter::write_synchronize();
   299   GlobalCounter::write_synchronize();
   315 }
   300 }
   316 
   301 
   317 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   302 template <typename CONFIG, MEMFLAGS F>
   318 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   303 inline bool ConcurrentHashTable<CONFIG, F>::
   319   try_resize_lock(Thread* locker)
   304   try_resize_lock(Thread* locker)
   320 {
   305 {
   321   if (_resize_lock->try_lock()) {
   306   if (_resize_lock->try_lock()) {
   322     if (_resize_lock_owner != NULL) {
   307     if (_resize_lock_owner != NULL) {
   323       assert(locker != _resize_lock_owner, "Already own lock");
   308       assert(locker != _resize_lock_owner, "Already own lock");
   331   _invisible_epoch = 0;
   316   _invisible_epoch = 0;
   332   _resize_lock_owner = locker;
   317   _resize_lock_owner = locker;
   333   return true;
   318   return true;
   334 }
   319 }
   335 
   320 
   336 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   321 template <typename CONFIG, MEMFLAGS F>
   337 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   322 inline void ConcurrentHashTable<CONFIG, F>::
   338   lock_resize_lock(Thread* locker)
   323   lock_resize_lock(Thread* locker)
   339 {
   324 {
   340   size_t i = 0;
   325   size_t i = 0;
   341   // If lock is hold by some other thread, the chances that it is return quick
   326   // If lock is hold by some other thread, the chances that it is return quick
   342   // is low. So we will prefer yielding.
   327   // is low. So we will prefer yielding.
   356   } while(true);
   341   } while(true);
   357   _resize_lock_owner = locker;
   342   _resize_lock_owner = locker;
   358   _invisible_epoch = 0;
   343   _invisible_epoch = 0;
   359 }
   344 }
   360 
   345 
   361 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   346 template <typename CONFIG, MEMFLAGS F>
   362 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   347 inline void ConcurrentHashTable<CONFIG, F>::
   363   unlock_resize_lock(Thread* locker)
   348   unlock_resize_lock(Thread* locker)
   364 {
   349 {
   365   _invisible_epoch = 0;
   350   _invisible_epoch = 0;
   366   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
   351   assert(locker == _resize_lock_owner, "Not unlocked by locker.");
   367   _resize_lock_owner = NULL;
   352   _resize_lock_owner = NULL;
   368   _resize_lock->unlock();
   353   _resize_lock->unlock();
   369 }
   354 }
   370 
   355 
   371 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   356 template <typename CONFIG, MEMFLAGS F>
   372 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   357 inline void ConcurrentHashTable<CONFIG, F>::
   373   free_nodes()
   358   free_nodes()
   374 {
   359 {
   375   // We assume we are not MT during freeing.
   360   // We assume we are not MT during freeing.
   376   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
   361   for (size_t node_it = 0; node_it < _table->_size; node_it++) {
   377     Bucket* bucket = _table->get_buckets() + node_it;
   362     Bucket* bucket = _table->get_buckets() + node_it;
   382       Node::destroy_node(free_node);
   367       Node::destroy_node(free_node);
   383     }
   368     }
   384   }
   369   }
   385 }
   370 }
   386 
   371 
   387 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   372 template <typename CONFIG, MEMFLAGS F>
   388 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
   373 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   389 ConcurrentHashTable<VALUE, CONFIG, F>::
   374 ConcurrentHashTable<CONFIG, F>::
   390   get_table() const
   375   get_table() const
   391 {
   376 {
   392   return OrderAccess::load_acquire(&_table);
   377   return OrderAccess::load_acquire(&_table);
   393 }
   378 }
   394 
   379 
   395 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   380 template <typename CONFIG, MEMFLAGS F>
   396 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
   381 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   397 ConcurrentHashTable<VALUE, CONFIG, F>::
   382 ConcurrentHashTable<CONFIG, F>::
   398   get_new_table() const
   383   get_new_table() const
   399 {
   384 {
   400   return OrderAccess::load_acquire(&_new_table);
   385   return OrderAccess::load_acquire(&_new_table);
   401 }
   386 }
   402 
   387 
   403 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   388 template <typename CONFIG, MEMFLAGS F>
   404 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable*
   389 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   405 ConcurrentHashTable<VALUE, CONFIG, F>::
   390 ConcurrentHashTable<CONFIG, F>::
   406   set_table_from_new()
   391   set_table_from_new()
   407 {
   392 {
   408   InternalTable* old_table = _table;
   393   InternalTable* old_table = _table;
   409   // Publish the new table.
   394   // Publish the new table.
   410   OrderAccess::release_store(&_table, _new_table);
   395   OrderAccess::release_store(&_table, _new_table);
   414   _new_table = NULL;
   399   _new_table = NULL;
   415   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
   400   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
   416   return old_table;
   401   return old_table;
   417 }
   402 }
   418 
   403 
   419 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   404 template <typename CONFIG, MEMFLAGS F>
   420 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   405 inline void ConcurrentHashTable<CONFIG, F>::
   421   internal_grow_range(Thread* thread, size_t start, size_t stop)
   406   internal_grow_range(Thread* thread, size_t start, size_t stop)
   422 {
   407 {
   423   assert(stop <= _table->_size, "Outside backing array");
   408   assert(stop <= _table->_size, "Outside backing array");
   424   assert(_new_table != NULL, "Grow not proper setup before start");
   409   assert(_new_table != NULL, "Grow not proper setup before start");
   425   // The state is also copied here. Hence all buckets in new table will be
   410   // The state is also copied here. Hence all buckets in new table will be
   454           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
   439           _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
   455     )
   440     )
   456   }
   441   }
   457 }
   442 }
   458 
   443 
   459 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   444 template <typename CONFIG, MEMFLAGS F>
   460 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
   445 template <typename LOOKUP_FUNC, typename DELETE_FUNC>
   461 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   446 inline bool ConcurrentHashTable<CONFIG, F>::
   462   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
   447   internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
   463 {
   448 {
   464   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
   449   Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
   465   assert(bucket->is_locked(), "Must be locked.");
   450   assert(bucket->is_locked(), "Must be locked.");
   466   Node* const volatile * rem_n_prev = bucket->first_ptr();
   451   Node* const volatile * rem_n_prev = bucket->first_ptr();
   487   Node::destroy_node(rem_n);
   472   Node::destroy_node(rem_n);
   488   JFR_ONLY(_stats_rate.remove();)
   473   JFR_ONLY(_stats_rate.remove();)
   489   return true;
   474   return true;
   490 }
   475 }
   491 
   476 
   492 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   477 template <typename CONFIG, MEMFLAGS F>
   493 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
   478 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
   494 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   479 inline void ConcurrentHashTable<CONFIG, F>::
   495   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
   480   do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
   496                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
   481                             EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
   497 {
   482 {
   498   // Here we have resize lock so table is SMR safe, and there is no new
   483   // Here we have resize lock so table is SMR safe, and there is no new
   499   // table. Can do this in parallel if we want.
   484   // table. Can do this in parallel if we want.
   540     cs_context = GlobalCounter::critical_section_begin(thread);
   525     cs_context = GlobalCounter::critical_section_begin(thread);
   541   }
   526   }
   542   GlobalCounter::critical_section_end(thread, cs_context);
   527   GlobalCounter::critical_section_end(thread, cs_context);
   543 }
   528 }
   544 
   529 
   545 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   530 template <typename CONFIG, MEMFLAGS F>
   546 template <typename LOOKUP_FUNC>
   531 template <typename LOOKUP_FUNC>
   547 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   532 inline void ConcurrentHashTable<CONFIG, F>::
   548   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
   533   delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
   549 {
   534 {
   550   assert(bucket->is_locked(), "Must be locked.");
   535   assert(bucket->is_locked(), "Must be locked.");
   551 
   536 
   552   size_t dels = 0;
   537   size_t dels = 0;
   577       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
   562       DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
   578     }
   563     }
   579   }
   564   }
   580 }
   565 }
   581 
   566 
   582 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   567 template <typename CONFIG, MEMFLAGS F>
   583 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
   568 inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
   584 ConcurrentHashTable<VALUE, CONFIG, F>::
   569 ConcurrentHashTable<CONFIG, F>::
   585   get_bucket(uintx hash) const
   570   get_bucket(uintx hash) const
   586 {
   571 {
   587   InternalTable* table = get_table();
   572   InternalTable* table = get_table();
   588   Bucket* bucket = get_bucket_in(table, hash);
   573   Bucket* bucket = get_bucket_in(table, hash);
   589   if (bucket->have_redirect()) {
   574   if (bucket->have_redirect()) {
   591     bucket = get_bucket_in(table, hash);
   576     bucket = get_bucket_in(table, hash);
   592   }
   577   }
   593   return bucket;
   578   return bucket;
   594 }
   579 }
   595 
   580 
   596 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   581 template <typename CONFIG, MEMFLAGS F>
   597 inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket*
   582 inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
   598 ConcurrentHashTable<VALUE, CONFIG, F>::
   583 ConcurrentHashTable<CONFIG, F>::
   599   get_bucket_locked(Thread* thread, const uintx hash)
   584   get_bucket_locked(Thread* thread, const uintx hash)
   600 {
   585 {
   601   Bucket* bucket;
   586   Bucket* bucket;
   602   int i = 0;
   587   int i = 0;
   603   // SpinYield would be unfair here
   588   // SpinYield would be unfair here
   622   }
   607   }
   623   return bucket;
   608   return bucket;
   624 }
   609 }
   625 
   610 
   626 // Always called within critical section
   611 // Always called within critical section
   627 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   612 template <typename CONFIG, MEMFLAGS F>
   628 template <typename LOOKUP_FUNC>
   613 template <typename LOOKUP_FUNC>
   629 typename ConcurrentHashTable<VALUE, CONFIG, F>::Node*
   614 typename ConcurrentHashTable<CONFIG, F>::Node*
   630 ConcurrentHashTable<VALUE, CONFIG, F>::
   615 ConcurrentHashTable<CONFIG, F>::
   631   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
   616   get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
   632            bool* have_dead, size_t* loops) const
   617            bool* have_dead, size_t* loops) const
   633 {
   618 {
   634   size_t loop_count = 0;
   619   size_t loop_count = 0;
   635   Node* node = bucket->first();
   620   Node* node = bucket->first();
   648     *loops = loop_count;
   633     *loops = loop_count;
   649   }
   634   }
   650   return node;
   635   return node;
   651 }
   636 }
   652 
   637 
   653 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   638 template <typename CONFIG, MEMFLAGS F>
   654 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   639 inline bool ConcurrentHashTable<CONFIG, F>::
   655   unzip_bucket(Thread* thread, InternalTable* old_table,
   640   unzip_bucket(Thread* thread, InternalTable* old_table,
   656                InternalTable* new_table, size_t even_index, size_t odd_index)
   641                InternalTable* new_table, size_t even_index, size_t odd_index)
   657 {
   642 {
   658   Node* aux = old_table->get_bucket(even_index)->first();
   643   Node* aux = old_table->get_bucket(even_index)->first();
   659   if (aux == NULL) {
   644   if (aux == NULL) {
   706     }
   691     }
   707   }
   692   }
   708   return true;
   693   return true;
   709 }
   694 }
   710 
   695 
   711 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   696 template <typename CONFIG, MEMFLAGS F>
   712 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   697 inline bool ConcurrentHashTable<CONFIG, F>::
   713   internal_shrink_prolog(Thread* thread, size_t log2_size)
   698   internal_shrink_prolog(Thread* thread, size_t log2_size)
   714 {
   699 {
   715   if (!try_resize_lock(thread)) {
   700   if (!try_resize_lock(thread)) {
   716     return false;
   701     return false;
   717   }
   702   }
   723   }
   708   }
   724   _new_table = new InternalTable(_table->_log2_size - 1);
   709   _new_table = new InternalTable(_table->_log2_size - 1);
   725   return true;
   710   return true;
   726 }
   711 }
   727 
   712 
   728 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   713 template <typename CONFIG, MEMFLAGS F>
   729 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   714 inline void ConcurrentHashTable<CONFIG, F>::
   730   internal_shrink_epilog(Thread* thread)
   715   internal_shrink_epilog(Thread* thread)
   731 {
   716 {
   732   assert(_resize_lock_owner == thread, "Re-size lock not held");
   717   assert(_resize_lock_owner == thread, "Re-size lock not held");
   733 
   718 
   734   InternalTable* old_table = set_table_from_new();
   719   InternalTable* old_table = set_table_from_new();
   742 #endif
   727 #endif
   743   // ABA safe, old_table not visible to any other threads.
   728   // ABA safe, old_table not visible to any other threads.
   744   delete old_table;
   729   delete old_table;
   745 }
   730 }
   746 
   731 
   747 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   732 template <typename CONFIG, MEMFLAGS F>
   748 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   733 inline void ConcurrentHashTable<CONFIG, F>::
   749   internal_shrink_range(Thread* thread, size_t start, size_t stop)
   734   internal_shrink_range(Thread* thread, size_t start, size_t stop)
   750 {
   735 {
   751   // The state is also copied here.
   736   // The state is also copied here.
   752   // Hence all buckets in new table will be locked.
   737   // Hence all buckets in new table will be locked.
   753   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
   738   for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
   779     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
   764     DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
   780                                                   (Node*)POISON_PTR);)
   765                                                   (Node*)POISON_PTR);)
   781   }
   766   }
   782 }
   767 }
   783 
   768 
   784 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   769 template <typename CONFIG, MEMFLAGS F>
   785 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   770 inline bool ConcurrentHashTable<CONFIG, F>::
   786   internal_shrink(Thread* thread, size_t log2_size)
   771   internal_shrink(Thread* thread, size_t log2_size)
   787 {
   772 {
   788   if (!internal_shrink_prolog(thread, log2_size)) {
   773   if (!internal_shrink_prolog(thread, log2_size)) {
   789     assert(_resize_lock_owner != thread, "Re-size lock held");
   774     assert(_resize_lock_owner != thread, "Re-size lock held");
   790     return false;
   775     return false;
   794   internal_shrink_epilog(thread);
   779   internal_shrink_epilog(thread);
   795   assert(_resize_lock_owner != thread, "Re-size lock held");
   780   assert(_resize_lock_owner != thread, "Re-size lock held");
   796   return true;
   781   return true;
   797 }
   782 }
   798 
   783 
   799 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   784 template <typename CONFIG, MEMFLAGS F>
   800 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   785 inline bool ConcurrentHashTable<CONFIG, F>::
   801   internal_grow_prolog(Thread* thread, size_t log2_size)
   786   internal_grow_prolog(Thread* thread, size_t log2_size)
   802 {
   787 {
   803   // This double checking of _size_limit_reached/is_max_size_reached()
   788   // This double checking of _size_limit_reached/is_max_size_reached()
   804   //  we only do in grow path, since grow means high load on table
   789   //  we only do in grow path, since grow means high load on table
   805   // while shrink means low load.
   790   // while shrink means low load.
   823   }
   808   }
   824 
   809 
   825   return true;
   810   return true;
   826 }
   811 }
   827 
   812 
   828 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   813 template <typename CONFIG, MEMFLAGS F>
   829 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   814 inline void ConcurrentHashTable<CONFIG, F>::
   830   internal_grow_epilog(Thread* thread)
   815   internal_grow_epilog(Thread* thread)
   831 {
   816 {
   832   assert(_resize_lock_owner == thread, "Should be locked");
   817   assert(_resize_lock_owner == thread, "Should be locked");
   833 
   818 
   834   InternalTable* old_table = set_table_from_new();
   819   InternalTable* old_table = set_table_from_new();
   841 #endif
   826 #endif
   842   // ABA safe, old_table not visible to any other threads.
   827   // ABA safe, old_table not visible to any other threads.
   843   delete old_table;
   828   delete old_table;
   844 }
   829 }
   845 
   830 
   846 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   831 template <typename CONFIG, MEMFLAGS F>
   847 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   832 inline bool ConcurrentHashTable<CONFIG, F>::
   848   internal_grow(Thread* thread, size_t log2_size)
   833   internal_grow(Thread* thread, size_t log2_size)
   849 {
   834 {
   850   if (!internal_grow_prolog(thread, log2_size)) {
   835   if (!internal_grow_prolog(thread, log2_size)) {
   851     assert(_resize_lock_owner != thread, "Re-size lock held");
   836     assert(_resize_lock_owner != thread, "Re-size lock held");
   852     return false;
   837     return false;
   857   assert(_resize_lock_owner != thread, "Re-size lock held");
   842   assert(_resize_lock_owner != thread, "Re-size lock held");
   858   return true;
   843   return true;
   859 }
   844 }
   860 
   845 
   861 // Always called within critical section
   846 // Always called within critical section
   862 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   847 template <typename CONFIG, MEMFLAGS F>
   863 template <typename LOOKUP_FUNC>
   848 template <typename LOOKUP_FUNC>
   864 inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>::
   849 inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
   865   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
   850   internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
   866 {
   851 {
   867   bool clean = false;
   852   bool clean = false;
   868   size_t loops = 0;
   853   size_t loops = 0;
   869   VALUE* ret = NULL;
   854   VALUE* ret = NULL;
   878   }
   863   }
   879 
   864 
   880   return ret;
   865   return ret;
   881 }
   866 }
   882 
   867 
   883 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   868 template <typename CONFIG, MEMFLAGS F>
   884 template <typename LOOKUP_FUNC>
   869 template <typename LOOKUP_FUNC>
   885 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   870 inline bool ConcurrentHashTable<CONFIG, F>::
   886   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
   871   internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
   887                   bool* grow_hint, bool* clean_hint)
   872                   bool* grow_hint, bool* clean_hint)
   888 {
   873 {
   889   bool ret = false;
   874   bool ret = false;
   890   bool clean = false;
   875   bool clean = false;
   943   }
   928   }
   944 
   929 
   945   return ret;
   930   return ret;
   946 }
   931 }
   947 
   932 
   948 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   933 template <typename CONFIG, MEMFLAGS F>
   949 template <typename FUNC>
   934 template <typename FUNC>
   950 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
   935 inline bool ConcurrentHashTable<CONFIG, F>::
   951   visit_nodes(Bucket* bucket, FUNC& visitor_f)
   936   visit_nodes(Bucket* bucket, FUNC& visitor_f)
   952 {
   937 {
   953   Node* current_node = bucket->first();
   938   Node* current_node = bucket->first();
   954   while (current_node != NULL) {
   939   while (current_node != NULL) {
   955     if (!visitor_f(current_node->value())) {
   940     if (!visitor_f(current_node->value())) {
   958     current_node = current_node->next();
   943     current_node = current_node->next();
   959   }
   944   }
   960   return true;
   945   return true;
   961 }
   946 }
   962 
   947 
   963 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   948 template <typename CONFIG, MEMFLAGS F>
   964 template <typename FUNC>
   949 template <typename FUNC>
   965 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
   950 inline void ConcurrentHashTable<CONFIG, F>::
   966   do_scan_locked(Thread* thread, FUNC& scan_f)
   951   do_scan_locked(Thread* thread, FUNC& scan_f)
   967 {
   952 {
   968   assert(_resize_lock_owner == thread, "Re-size lock not held");
   953   assert(_resize_lock_owner == thread, "Re-size lock not held");
   969   // We can do a critical section over the entire loop but that would block
   954   // We can do a critical section over the entire loop but that would block
   970   // updates for a long time. Instead we choose to block resizes.
   955   // updates for a long time. Instead we choose to block resizes.
   975       break; /* ends critical section */
   960       break; /* ends critical section */
   976     }
   961     }
   977   } /* ends critical section */
   962   } /* ends critical section */
   978 }
   963 }
   979 
   964 
   980 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   965 template <typename CONFIG, MEMFLAGS F>
   981 template <typename EVALUATE_FUNC>
   966 template <typename EVALUATE_FUNC>
   982 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
   967 inline size_t ConcurrentHashTable<CONFIG, F>::
   983   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
   968   delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
   984                      size_t num_del, Node** ndel)
   969                      size_t num_del, Node** ndel)
   985 {
   970 {
   986   size_t dels = 0;
   971   size_t dels = 0;
   987   Node* const volatile * rem_n_prev = bucket->first_ptr();
   972   Node* const volatile * rem_n_prev = bucket->first_ptr();
  1002   }
   987   }
  1003   return dels;
   988   return dels;
  1004 }
   989 }
  1005 
   990 
  1006 // Constructor
   991 // Constructor
  1007 template <typename VALUE, typename CONFIG, MEMFLAGS F>
   992 template <typename CONFIG, MEMFLAGS F>
  1008 inline ConcurrentHashTable<VALUE, CONFIG, F>::
   993 inline ConcurrentHashTable<CONFIG, F>::
  1009   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
   994   ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint)
  1010     : _new_table(NULL), _log2_size_limit(log2size_limit),
   995     : _new_table(NULL), _log2_size_limit(log2size_limit),
  1011        _log2_start_size(log2size), _grow_hint(grow_hint),
   996        _log2_start_size(log2size), _grow_hint(grow_hint),
  1012        _size_limit_reached(false), _resize_lock_owner(NULL),
   997        _size_limit_reached(false), _resize_lock_owner(NULL),
  1013        _invisible_epoch(0)
   998        _invisible_epoch(0)
  1019   _table = new InternalTable(log2size);
  1004   _table = new InternalTable(log2size);
  1020   assert(log2size_limit >= log2size, "bad ergo");
  1005   assert(log2size_limit >= log2size, "bad ergo");
  1021   _size_limit_reached = _table->_log2_size == _log2_size_limit;
  1006   _size_limit_reached = _table->_log2_size == _log2_size_limit;
  1022 }
  1007 }
  1023 
  1008 
  1024 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1009 template <typename CONFIG, MEMFLAGS F>
  1025 inline ConcurrentHashTable<VALUE, CONFIG, F>::
  1010 inline ConcurrentHashTable<CONFIG, F>::
  1026   ~ConcurrentHashTable()
  1011   ~ConcurrentHashTable()
  1027 {
  1012 {
  1028   delete _resize_lock;
  1013   delete _resize_lock;
  1029   free_nodes();
  1014   free_nodes();
  1030   delete _table;
  1015   delete _table;
  1031 }
  1016 }
  1032 
  1017 
  1033 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1018 template <typename CONFIG, MEMFLAGS F>
  1034 inline size_t ConcurrentHashTable<VALUE, CONFIG, F>::
  1019 inline size_t ConcurrentHashTable<CONFIG, F>::
  1035   get_size_log2(Thread* thread)
  1020   get_size_log2(Thread* thread)
  1036 {
  1021 {
  1037   ScopedCS cs(thread, this);
  1022   ScopedCS cs(thread, this);
  1038   return _table->_log2_size;
  1023   return _table->_log2_size;
  1039 }
  1024 }
  1040 
  1025 
  1041 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1026 template <typename CONFIG, MEMFLAGS F>
  1042 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  1027 inline bool ConcurrentHashTable<CONFIG, F>::
  1043   shrink(Thread* thread, size_t size_limit_log2)
  1028   shrink(Thread* thread, size_t size_limit_log2)
  1044 {
  1029 {
  1045   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
  1030   size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
  1046   bool ret = internal_shrink(thread, tmp);
  1031   bool ret = internal_shrink(thread, tmp);
  1047   return ret;
  1032   return ret;
  1048 }
  1033 }
  1049 
  1034 
  1050 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1035 template <typename CONFIG, MEMFLAGS F>
  1051 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  1036 inline bool ConcurrentHashTable<CONFIG, F>::
  1052   grow(Thread* thread, size_t size_limit_log2)
  1037   grow(Thread* thread, size_t size_limit_log2)
  1053 {
  1038 {
  1054   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
  1039   size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
  1055   return internal_grow(thread, tmp);
  1040   return internal_grow(thread, tmp);
  1056 }
  1041 }
  1057 
  1042 
  1058 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1043 template <typename CONFIG, MEMFLAGS F>
  1059 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
  1044 template <typename LOOKUP_FUNC, typename FOUND_FUNC>
  1060 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  1045 inline bool ConcurrentHashTable<CONFIG, F>::
  1061   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
  1046   get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
  1062 {
  1047 {
  1063   bool ret = false;
  1048   bool ret = false;
  1064   ScopedCS cs(thread, this);
  1049   ScopedCS cs(thread, this);
  1065   VALUE* val = internal_get(thread, lookup_f, grow_hint);
  1050   VALUE* val = internal_get(thread, lookup_f, grow_hint);
  1068     ret = true;
  1053     ret = true;
  1069   }
  1054   }
  1070   return ret;
  1055   return ret;
  1071 }
  1056 }
  1072 
  1057 
  1073 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1058 template <typename CONFIG, MEMFLAGS F>
  1074 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  1059 inline bool ConcurrentHashTable<CONFIG, F>::
  1075   unsafe_insert(const VALUE& value) {
  1060   unsafe_insert(const VALUE& value) {
  1076   bool dead_hash = false;
  1061   bool dead_hash = false;
  1077   size_t hash = CONFIG::get_hash(value, &dead_hash);
  1062   size_t hash = CONFIG::get_hash(value, &dead_hash);
  1078   if (dead_hash) {
  1063   if (dead_hash) {
  1079     return false;
  1064     return false;
  1088   }
  1073   }
  1089   JFR_ONLY(_stats_rate.add();)
  1074   JFR_ONLY(_stats_rate.add();)
  1090   return true;
  1075   return true;
  1091 }
  1076 }
  1092 
  1077 
  1093 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1078 template <typename CONFIG, MEMFLAGS F>
  1094 template <typename SCAN_FUNC>
  1079 template <typename SCAN_FUNC>
  1095 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  1080 inline bool ConcurrentHashTable<CONFIG, F>::
  1096   try_scan(Thread* thread, SCAN_FUNC& scan_f)
  1081   try_scan(Thread* thread, SCAN_FUNC& scan_f)
  1097 {
  1082 {
  1098   if (!try_resize_lock(thread)) {
  1083   if (!try_resize_lock(thread)) {
  1099     return false;
  1084     return false;
  1100   }
  1085   }
  1101   do_scan_locked(thread, scan_f);
  1086   do_scan_locked(thread, scan_f);
  1102   unlock_resize_lock(thread);
  1087   unlock_resize_lock(thread);
  1103   return true;
  1088   return true;
  1104 }
  1089 }
  1105 
  1090 
  1106 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1091 template <typename CONFIG, MEMFLAGS F>
  1107 template <typename SCAN_FUNC>
  1092 template <typename SCAN_FUNC>
  1108 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  1093 inline void ConcurrentHashTable<CONFIG, F>::
  1109   do_scan(Thread* thread, SCAN_FUNC& scan_f)
  1094   do_scan(Thread* thread, SCAN_FUNC& scan_f)
  1110 {
  1095 {
  1111   assert(!SafepointSynchronize::is_at_safepoint(),
  1096   assert(!SafepointSynchronize::is_at_safepoint(),
  1112          "must be outside a safepoint");
  1097          "must be outside a safepoint");
  1113   assert(_resize_lock_owner != thread, "Re-size lock held");
  1098   assert(_resize_lock_owner != thread, "Re-size lock held");
  1115   do_scan_locked(thread, scan_f);
  1100   do_scan_locked(thread, scan_f);
  1116   unlock_resize_lock(thread);
  1101   unlock_resize_lock(thread);
  1117   assert(_resize_lock_owner != thread, "Re-size lock held");
  1102   assert(_resize_lock_owner != thread, "Re-size lock held");
  1118 }
  1103 }
  1119 
  1104 
  1120 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1105 template <typename CONFIG, MEMFLAGS F>
  1121 template <typename SCAN_FUNC>
  1106 template <typename SCAN_FUNC>
  1122 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  1107 inline void ConcurrentHashTable<CONFIG, F>::
  1123   do_safepoint_scan(SCAN_FUNC& scan_f)
  1108   do_safepoint_scan(SCAN_FUNC& scan_f)
  1124 {
  1109 {
  1125   // We only allow this method to be used during a safepoint.
  1110   // We only allow this method to be used during a safepoint.
  1126   assert(SafepointSynchronize::is_at_safepoint(),
  1111   assert(SafepointSynchronize::is_at_safepoint(),
  1127          "must only be called in a safepoint");
  1112          "must only be called in a safepoint");
  1158       return;
  1143       return;
  1159     }
  1144     }
  1160   }
  1145   }
  1161 }
  1146 }
  1162 
  1147 
  1163 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1148 template <typename CONFIG, MEMFLAGS F>
  1164 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
  1149 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
  1165 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  1150 inline bool ConcurrentHashTable<CONFIG, F>::
  1166   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
  1151   try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
  1167 {
  1152 {
  1168   if (!try_resize_lock(thread)) {
  1153   if (!try_resize_lock(thread)) {
  1169     return false;
  1154     return false;
  1170   }
  1155   }
  1172   unlock_resize_lock(thread);
  1157   unlock_resize_lock(thread);
  1173   assert(_resize_lock_owner != thread, "Re-size lock held");
  1158   assert(_resize_lock_owner != thread, "Re-size lock held");
  1174   return true;
  1159   return true;
  1175 }
  1160 }
  1176 
  1161 
  1177 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1162 template <typename CONFIG, MEMFLAGS F>
  1178 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
  1163 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
  1179 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  1164 inline void ConcurrentHashTable<CONFIG, F>::
  1180   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
  1165   bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
  1181 {
  1166 {
  1182   assert(!SafepointSynchronize::is_at_safepoint(),
  1167   assert(!SafepointSynchronize::is_at_safepoint(),
  1183          "must be outside a safepoint");
  1168          "must be outside a safepoint");
  1184   lock_resize_lock(thread);
  1169   lock_resize_lock(thread);
  1185   do_bulk_delete_locked(thread, eval_f, del_f);
  1170   do_bulk_delete_locked(thread, eval_f, del_f);
  1186   unlock_resize_lock(thread);
  1171   unlock_resize_lock(thread);
  1187 }
  1172 }
  1188 
  1173 
  1189 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1174 template <typename CONFIG, MEMFLAGS F>
  1190 template <typename VALUE_SIZE_FUNC>
  1175 template <typename VALUE_SIZE_FUNC>
  1191 inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>::
  1176 inline TableStatistics ConcurrentHashTable<CONFIG, F>::
  1192   statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
  1177   statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
  1193 {
  1178 {
  1194   NumberSeq summary;
  1179   NumberSeq summary;
  1195   size_t literal_bytes = 0;
  1180   size_t literal_bytes = 0;
  1196   InternalTable* table = get_table();
  1181   InternalTable* table = get_table();
  1211   }
  1196   }
  1212 
  1197 
  1213   return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));
  1198   return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));
  1214 }
  1199 }
  1215 
  1200 
  1216 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1201 template <typename CONFIG, MEMFLAGS F>
  1217 template <typename VALUE_SIZE_FUNC>
  1202 template <typename VALUE_SIZE_FUNC>
  1218 inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>::
  1203 inline TableStatistics ConcurrentHashTable<CONFIG, F>::
  1219   statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)
  1204   statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)
  1220 {
  1205 {
  1221   if (!try_resize_lock(thread)) {
  1206   if (!try_resize_lock(thread)) {
  1222     return old;
  1207     return old;
  1223   }
  1208   }
  1226   unlock_resize_lock(thread);
  1211   unlock_resize_lock(thread);
  1227 
  1212 
  1228   return ts;
  1213   return ts;
  1229 }
  1214 }
  1230 
  1215 
  1231 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1216 template <typename CONFIG, MEMFLAGS F>
  1232 template <typename VALUE_SIZE_FUNC>
  1217 template <typename VALUE_SIZE_FUNC>
  1233 inline void ConcurrentHashTable<VALUE, CONFIG, F>::
  1218 inline void ConcurrentHashTable<CONFIG, F>::
  1234   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
  1219   statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
  1235                 outputStream* st, const char* table_name)
  1220                 outputStream* st, const char* table_name)
  1236 {
  1221 {
  1237   if (!try_resize_lock(thread)) {
  1222   if (!try_resize_lock(thread)) {
  1238     st->print_cr("statistics unavailable at this moment");
  1223     st->print_cr("statistics unavailable at this moment");
  1243   unlock_resize_lock(thread);
  1228   unlock_resize_lock(thread);
  1244 
  1229 
  1245   ts.print(st, table_name);
  1230   ts.print(st, table_name);
  1246 }
  1231 }
  1247 
  1232 
  1248 template <typename VALUE, typename CONFIG, MEMFLAGS F>
  1233 template <typename CONFIG, MEMFLAGS F>
  1249 inline bool ConcurrentHashTable<VALUE, CONFIG, F>::
  1234 inline bool ConcurrentHashTable<CONFIG, F>::
  1250   try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht)
  1235   try_move_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht)
  1251 {
  1236 {
  1252   if (!try_resize_lock(thread)) {
  1237   if (!try_resize_lock(thread)) {
  1253     return false;
  1238     return false;
  1254   }
  1239   }
  1255   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
  1240   assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");