src/hotspot/share/gc/shared/oopStorage.cpp
changeset 50300 d11e87c8cd44
parent 50298 188a87cbfac3
child 50332 d0d933d61610
equal deleted inserted replaced
50299:896486c6e357 50300:d11e87c8cd44
    38 #include "runtime/stubRoutines.hpp"
    38 #include "runtime/stubRoutines.hpp"
    39 #include "runtime/thread.hpp"
    39 #include "runtime/thread.hpp"
    40 #include "utilities/align.hpp"
    40 #include "utilities/align.hpp"
    41 #include "utilities/count_trailing_zeros.hpp"
    41 #include "utilities/count_trailing_zeros.hpp"
    42 #include "utilities/debug.hpp"
    42 #include "utilities/debug.hpp"
       
    43 #include "utilities/globalCounter.inline.hpp"
    43 #include "utilities/globalDefinitions.hpp"
    44 #include "utilities/globalDefinitions.hpp"
    44 #include "utilities/macros.hpp"
    45 #include "utilities/macros.hpp"
    45 #include "utilities/ostream.hpp"
    46 #include "utilities/ostream.hpp"
    46 #include "utilities/spinYield.hpp"
    47 #include "utilities/spinYield.hpp"
    47 
    48 
   499   replace_active_array(new_array);
   500   replace_active_array(new_array);
   500   relinquish_block_array(old_array);
   501   relinquish_block_array(old_array);
   501   return true;
   502   return true;
   502 }
   503 }
   503 
   504 
   504 OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {}
       
   505 
       
   506 // Begin read-side critical section.
       
   507 uint OopStorage::ProtectActive::read_enter() {
       
   508   return Atomic::add(2u, &_enter);
       
   509 }
       
   510 
       
   511 // End read-side critical section.
       
   512 void OopStorage::ProtectActive::read_exit(uint enter_value) {
       
   513   Atomic::add(2u, &_exit[enter_value & 1]);
       
   514 }
       
   515 
       
   516 // Wait until all readers that entered the critical section before
       
   517 // synchronization have exited that critical section.
       
   518 void OopStorage::ProtectActive::write_synchronize() {
       
   519   SpinYield spinner;
       
   520   // Determine old and new exit counters, based on bit0 of the
       
   521   // on-entry _enter counter.
       
   522   uint value = OrderAccess::load_acquire(&_enter);
       
   523   volatile uint* new_ptr = &_exit[(value + 1) & 1];
       
   524   // Atomically change the in-use exit counter to the new counter, by
       
   525   // adding 1 to the _enter counter (flipping bit0 between 0 and 1)
       
   526   // and initializing the new exit counter to that enter value.  Note:
       
   527   // The new exit counter is not being used by read operations until
       
   528   // this change succeeds.
       
   529   uint old;
       
   530   do {
       
   531     old = value;
       
   532     *new_ptr = ++value;
       
   533     value = Atomic::cmpxchg(value, &_enter, old);
       
   534   } while (old != value);
       
   535   // Readers that entered the critical section before we changed the
       
   536   // selected exit counter will use the old exit counter.  Readers
       
   537   // entering after the change will use the new exit counter.  Wait
       
   538   // for all the critical sections started before the change to
       
   539   // complete, e.g. for the value of old_ptr to catch up with old.
       
   540   volatile uint* old_ptr = &_exit[old & 1];
       
   541   while (old != OrderAccess::load_acquire(old_ptr)) {
       
   542     spinner.wait();
       
   543   }
       
   544 }
       
   545 
       
   546 // Make new_array the _active_array.  Increments new_array's refcount
   505 // Make new_array the _active_array.  Increments new_array's refcount
   547 // to account for the new reference.  The assignment is atomic wrto
   506 // to account for the new reference.  The assignment is atomic wrto
   548 // obtain_active_array; once this function returns, it is safe for the
   507 // obtain_active_array; once this function returns, it is safe for the
   549 // caller to relinquish the old array.
   508 // caller to relinquish the old array.
   550 void OopStorage::replace_active_array(ActiveArray* new_array) {
   509 void OopStorage::replace_active_array(ActiveArray* new_array) {
   552   // Update new_array refcount to account for the new reference.
   511   // Update new_array refcount to account for the new reference.
   553   new_array->increment_refcount();
   512   new_array->increment_refcount();
   554   // Install new_array, ensuring its initialization is complete first.
   513   // Install new_array, ensuring its initialization is complete first.
   555   OrderAccess::release_store(&_active_array, new_array);
   514   OrderAccess::release_store(&_active_array, new_array);
   556   // Wait for any readers that could read the old array from _active_array.
   515   // Wait for any readers that could read the old array from _active_array.
   557   _protect_active.write_synchronize();
   516   GlobalCounter::write_synchronize();
   558   // All obtain critical sections that could see the old array have
   517   // All obtain_active_array critical sections that could see the old array
   559   // completed, having incremented the refcount of the old array.  The
   518   // have completed, having incremented the refcount of the old array.  The
   560   // caller can now safely relinquish the old array.
   519   // caller can now safely relinquish the old array.
   561 }
   520 }
   562 
   521 
   563 // Atomically (wrto replace_active_array) get the active array and
   522 // Atomically (wrto replace_active_array) get the active array and
   564 // increment its refcount.  This provides safe access to the array,
   523 // increment its refcount.  This provides safe access to the array,
   565 // even if an allocate operation expands and replaces the value of
   524 // even if an allocate operation expands and replaces the value of
   566 // _active_array.  The caller must relinquish the array when done
   525 // _active_array.  The caller must relinquish the array when done
   567 // using it.
   526 // using it.
   568 OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
   527 OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
   569   uint enter_value = _protect_active.read_enter();
   528   GlobalCounter::CriticalSection cs(Thread::current());
   570   ActiveArray* result = OrderAccess::load_acquire(&_active_array);
   529   ActiveArray* result = OrderAccess::load_acquire(&_active_array);
   571   result->increment_refcount();
   530   result->increment_refcount();
   572   _protect_active.read_exit(enter_value);
       
   573   return result;
   531   return result;
   574 }
   532 }
   575 
   533 
   576 // Decrement refcount of array and destroy if refcount is zero.
   534 // Decrement refcount of array and destroy if refcount is zero.
   577 void OopStorage::relinquish_block_array(ActiveArray* array) const {
   535 void OopStorage::relinquish_block_array(ActiveArray* array) const {