src/hotspot/share/utilities/concurrentHashTable.inline.hpp
changeset 59247 56bf71d64d51
parent 58291 a013100f7a35
child 59252 623722a6aeb9
equal deleted inserted replaced
59246:fcad92f425c5 59247:56bf71d64d51
    56 template <typename CONFIG, MEMFLAGS F>
    56 template <typename CONFIG, MEMFLAGS F>
    57 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    57 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    58 ConcurrentHashTable<CONFIG, F>::
    58 ConcurrentHashTable<CONFIG, F>::
    59   Node::next() const
    59   Node::next() const
    60 {
    60 {
    61   return OrderAccess::load_acquire(&_next);
    61   return Atomic::load_acquire(&_next);
    62 }
    62 }
    63 
    63 
    64 // Bucket
    64 // Bucket
    65 template <typename CONFIG, MEMFLAGS F>
    65 template <typename CONFIG, MEMFLAGS F>
    66 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    66 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    67 ConcurrentHashTable<CONFIG, F>::
    67 ConcurrentHashTable<CONFIG, F>::
    68   Bucket::first_raw() const
    68   Bucket::first_raw() const
    69 {
    69 {
    70   return OrderAccess::load_acquire(&_first);
    70   return Atomic::load_acquire(&_first);
    71 }
    71 }
    72 
    72 
    73 template <typename CONFIG, MEMFLAGS F>
    73 template <typename CONFIG, MEMFLAGS F>
    74 inline void ConcurrentHashTable<CONFIG, F>::
    74 inline void ConcurrentHashTable<CONFIG, F>::
    75   Bucket::release_assign_node_ptr(
    75   Bucket::release_assign_node_ptr(
    77     typename ConcurrentHashTable<CONFIG, F>::Node* node) const
    77     typename ConcurrentHashTable<CONFIG, F>::Node* node) const
    78 {
    78 {
    79   // Due to this assert this methods is not static.
    79   // Due to this assert this methods is not static.
    80   assert(is_locked(), "Must be locked.");
    80   assert(is_locked(), "Must be locked.");
    81   Node** tmp = (Node**)dst;
    81   Node** tmp = (Node**)dst;
    82   OrderAccess::release_store(tmp, clear_set_state(node, *dst));
    82   Atomic::release_store(tmp, clear_set_state(node, *dst));
    83 }
    83 }
    84 
    84 
    85 template <typename CONFIG, MEMFLAGS F>
    85 template <typename CONFIG, MEMFLAGS F>
    86 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    86 inline typename ConcurrentHashTable<CONFIG, F>::Node*
    87 ConcurrentHashTable<CONFIG, F>::
    87 ConcurrentHashTable<CONFIG, F>::
    88   Bucket::first() const
    88   Bucket::first() const
    89 {
    89 {
    90   // We strip the states bit before returning the ptr.
    90   // We strip the states bit before returning the ptr.
    91   return clear_state(OrderAccess::load_acquire(&_first));
    91   return clear_state(Atomic::load_acquire(&_first));
    92 }
    92 }
    93 
    93 
    94 template <typename CONFIG, MEMFLAGS F>
    94 template <typename CONFIG, MEMFLAGS F>
    95 inline bool ConcurrentHashTable<CONFIG, F>::
    95 inline bool ConcurrentHashTable<CONFIG, F>::
    96   Bucket::have_redirect() const
    96   Bucket::have_redirect() const
   171   Bucket::unlock()
   171   Bucket::unlock()
   172 {
   172 {
   173   assert(is_locked(), "Must be locked.");
   173   assert(is_locked(), "Must be locked.");
   174   assert(!have_redirect(),
   174   assert(!have_redirect(),
   175          "Unlocking a bucket after it has reached terminal state.");
   175          "Unlocking a bucket after it has reached terminal state.");
   176   OrderAccess::release_store(&_first, clear_state(first()));
   176   Atomic::release_store(&_first, clear_state(first()));
   177 }
   177 }
   178 
   178 
   179 template <typename CONFIG, MEMFLAGS F>
   179 template <typename CONFIG, MEMFLAGS F>
   180 inline void ConcurrentHashTable<CONFIG, F>::
   180 inline void ConcurrentHashTable<CONFIG, F>::
   181   Bucket::redirect()
   181   Bucket::redirect()
   182 {
   182 {
   183   assert(is_locked(), "Must be locked.");
   183   assert(is_locked(), "Must be locked.");
   184   OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
   184   Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
   185 }
   185 }
   186 
   186 
   187 // InternalTable
   187 // InternalTable
   188 template <typename CONFIG, MEMFLAGS F>
   188 template <typename CONFIG, MEMFLAGS F>
   189 inline ConcurrentHashTable<CONFIG, F>::
   189 inline ConcurrentHashTable<CONFIG, F>::
   215     : _thread(thread),
   215     : _thread(thread),
   216       _cht(cht),
   216       _cht(cht),
   217       _cs_context(GlobalCounter::critical_section_begin(_thread))
   217       _cs_context(GlobalCounter::critical_section_begin(_thread))
   218 {
   218 {
   219   // This version is published now.
   219   // This version is published now.
   220   if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
   220   if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
   221     OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
   221     Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
   222   }
   222   }
   223 }
   223 }
   224 
   224 
   225 template <typename CONFIG, MEMFLAGS F>
   225 template <typename CONFIG, MEMFLAGS F>
   226 inline ConcurrentHashTable<CONFIG, F>::
   226 inline ConcurrentHashTable<CONFIG, F>::
   287   write_synchonize_on_visible_epoch(Thread* thread)
   287   write_synchonize_on_visible_epoch(Thread* thread)
   288 {
   288 {
   289   assert(_resize_lock_owner == thread, "Re-size lock not held");
   289   assert(_resize_lock_owner == thread, "Re-size lock not held");
   290   OrderAccess::fence(); // Prevent below load from floating up.
   290   OrderAccess::fence(); // Prevent below load from floating up.
   291   // If no reader saw this version we can skip write_synchronize.
   291   // If no reader saw this version we can skip write_synchronize.
   292   if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
   292   if (Atomic::load_acquire(&_invisible_epoch) == thread) {
   293     return;
   293     return;
   294   }
   294   }
   295   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
   295   assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
   296   // We set this/next version that we are synchronizing for to not published.
   296   // We set this/next version that we are synchronizing for to not published.
   297   // A reader will zero this flag if it reads this/next version.
   297   // A reader will zero this flag if it reads this/next version.
   298   OrderAccess::release_store(&_invisible_epoch, thread);
   298   Atomic::release_store(&_invisible_epoch, thread);
   299   GlobalCounter::write_synchronize();
   299   GlobalCounter::write_synchronize();
   300 }
   300 }
   301 
   301 
   302 template <typename CONFIG, MEMFLAGS F>
   302 template <typename CONFIG, MEMFLAGS F>
   303 inline bool ConcurrentHashTable<CONFIG, F>::
   303 inline bool ConcurrentHashTable<CONFIG, F>::
   372 template <typename CONFIG, MEMFLAGS F>
   372 template <typename CONFIG, MEMFLAGS F>
   373 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   373 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   374 ConcurrentHashTable<CONFIG, F>::
   374 ConcurrentHashTable<CONFIG, F>::
   375   get_table() const
   375   get_table() const
   376 {
   376 {
   377   return OrderAccess::load_acquire(&_table);
   377   return Atomic::load_acquire(&_table);
   378 }
   378 }
   379 
   379 
   380 template <typename CONFIG, MEMFLAGS F>
   380 template <typename CONFIG, MEMFLAGS F>
   381 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   381 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   382 ConcurrentHashTable<CONFIG, F>::
   382 ConcurrentHashTable<CONFIG, F>::
   383   get_new_table() const
   383   get_new_table() const
   384 {
   384 {
   385   return OrderAccess::load_acquire(&_new_table);
   385   return Atomic::load_acquire(&_new_table);
   386 }
   386 }
   387 
   387 
   388 template <typename CONFIG, MEMFLAGS F>
   388 template <typename CONFIG, MEMFLAGS F>
   389 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   389 inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
   390 ConcurrentHashTable<CONFIG, F>::
   390 ConcurrentHashTable<CONFIG, F>::
   391   set_table_from_new()
   391   set_table_from_new()
   392 {
   392 {
   393   InternalTable* old_table = _table;
   393   InternalTable* old_table = _table;
   394   // Publish the new table.
   394   // Publish the new table.
   395   OrderAccess::release_store(&_table, _new_table);
   395   Atomic::release_store(&_table, _new_table);
   396   // All must see this.
   396   // All must see this.
   397   GlobalCounter::write_synchronize();
   397   GlobalCounter::write_synchronize();
   398   // _new_table not read any more.
   398   // _new_table not read any more.
   399   _new_table = NULL;
   399   _new_table = NULL;
   400   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
   400   DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)