src/hotspot/share/gc/shared/oopStorage.cpp
changeset 50954 f85092465b0c
parent 50513 7f166e010af4
child 51511 eb8d5aeabab3
equal deleted inserted replaced
50953:0fad17c646c9 50954:f85092465b0c
    43 #include "utilities/globalDefinitions.hpp"
    43 #include "utilities/globalDefinitions.hpp"
    44 #include "utilities/macros.hpp"
    44 #include "utilities/macros.hpp"
    45 #include "utilities/ostream.hpp"
    45 #include "utilities/ostream.hpp"
    46 #include "utilities/spinYield.hpp"
    46 #include "utilities/spinYield.hpp"
    47 
    47 
    48 OopStorage::AllocateEntry::AllocateEntry() : _prev(NULL), _next(NULL) {}
    48 OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {}
    49 
    49 
    50 OopStorage::AllocateEntry::~AllocateEntry() {
    50 OopStorage::AllocationListEntry::~AllocationListEntry() {
    51   assert(_prev == NULL, "deleting attached block");
    51   assert(_prev == NULL, "deleting attached block");
    52   assert(_next == NULL, "deleting attached block");
    52   assert(_next == NULL, "deleting attached block");
    53 }
    53 }
    54 
    54 
    55 OopStorage::AllocateList::AllocateList() : _head(NULL), _tail(NULL) {}
    55 OopStorage::AllocationList::AllocationList() : _head(NULL), _tail(NULL) {}
    56 
    56 
    57 OopStorage::AllocateList::~AllocateList() {
    57 OopStorage::AllocationList::~AllocationList() {
    58   // ~OopStorage() empties its lists before destroying them.
    58   // ~OopStorage() empties its lists before destroying them.
    59   assert(_head == NULL, "deleting non-empty block list");
    59   assert(_head == NULL, "deleting non-empty block list");
    60   assert(_tail == NULL, "deleting non-empty block list");
    60   assert(_tail == NULL, "deleting non-empty block list");
    61 }
    61 }
    62 
    62 
    63 void OopStorage::AllocateList::push_front(const Block& block) {
    63 void OopStorage::AllocationList::push_front(const Block& block) {
    64   const Block* old = _head;
    64   const Block* old = _head;
    65   if (old == NULL) {
    65   if (old == NULL) {
    66     assert(_tail == NULL, "invariant");
    66     assert(_tail == NULL, "invariant");
    67     _head = _tail = █
    67     _head = _tail = █
    68   } else {
    68   } else {
    69     block.allocate_entry()._next = old;
    69     block.allocation_list_entry()._next = old;
    70     old->allocate_entry()._prev = █
    70     old->allocation_list_entry()._prev = █
    71     _head = █
    71     _head = █
    72   }
    72   }
    73 }
    73 }
    74 
    74 
    75 void OopStorage::AllocateList::push_back(const Block& block) {
    75 void OopStorage::AllocationList::push_back(const Block& block) {
    76   const Block* old = _tail;
    76   const Block* old = _tail;
    77   if (old == NULL) {
    77   if (old == NULL) {
    78     assert(_head == NULL, "invariant");
    78     assert(_head == NULL, "invariant");
    79     _head = _tail = █
    79     _head = _tail = █
    80   } else {
    80   } else {
    81     old->allocate_entry()._next = █
    81     old->allocation_list_entry()._next = █
    82     block.allocate_entry()._prev = old;
    82     block.allocation_list_entry()._prev = old;
    83     _tail = █
    83     _tail = █
    84   }
    84   }
    85 }
    85 }
    86 
    86 
    87 void OopStorage::AllocateList::unlink(const Block& block) {
    87 void OopStorage::AllocationList::unlink(const Block& block) {
    88   const AllocateEntry& block_entry = block.allocate_entry();
    88   const AllocationListEntry& block_entry = block.allocation_list_entry();
    89   const Block* prev_blk = block_entry._prev;
    89   const Block* prev_blk = block_entry._prev;
    90   const Block* next_blk = block_entry._next;
    90   const Block* next_blk = block_entry._next;
    91   block_entry._prev = NULL;
    91   block_entry._prev = NULL;
    92   block_entry._next = NULL;
    92   block_entry._next = NULL;
    93   if ((prev_blk == NULL) && (next_blk == NULL)) {
    93   if ((prev_blk == NULL) && (next_blk == NULL)) {
    94     assert(_head == &block, "invariant");
    94     assert(_head == &block, "invariant");
    95     assert(_tail == &block, "invariant");
    95     assert(_tail == &block, "invariant");
    96     _head = _tail = NULL;
    96     _head = _tail = NULL;
    97   } else if (prev_blk == NULL) {
    97   } else if (prev_blk == NULL) {
    98     assert(_head == &block, "invariant");
    98     assert(_head == &block, "invariant");
    99     next_blk->allocate_entry()._prev = NULL;
    99     next_blk->allocation_list_entry()._prev = NULL;
   100     _head = next_blk;
   100     _head = next_blk;
   101   } else if (next_blk == NULL) {
   101   } else if (next_blk == NULL) {
   102     assert(_tail == &block, "invariant");
   102     assert(_tail == &block, "invariant");
   103     prev_blk->allocate_entry()._next = NULL;
   103     prev_blk->allocation_list_entry()._next = NULL;
   104     _tail = prev_blk;
   104     _tail = prev_blk;
   105   } else {
   105   } else {
   106     next_blk->allocate_entry()._prev = prev_blk;
   106     next_blk->allocation_list_entry()._prev = prev_blk;
   107     prev_blk->allocate_entry()._next = next_blk;
   107     prev_blk->allocation_list_entry()._next = next_blk;
   108   }
   108   }
   109 }
   109 }
   110 
   110 
   111 OopStorage::ActiveArray::ActiveArray(size_t size) :
   111 OopStorage::ActiveArray::ActiveArray(size_t size) :
   112   _size(size),
   112   _size(size),
   208   _data(),
   208   _data(),
   209   _allocated_bitmask(0),
   209   _allocated_bitmask(0),
   210   _owner(owner),
   210   _owner(owner),
   211   _memory(memory),
   211   _memory(memory),
   212   _active_index(0),
   212   _active_index(0),
   213   _allocate_entry(),
   213   _allocation_list_entry(),
   214   _deferred_updates_next(NULL),
   214   _deferred_updates_next(NULL),
   215   _release_refcount(0)
   215   _release_refcount(0)
   216 {
   216 {
   217   STATIC_ASSERT(_data_pos == 0);
   217   STATIC_ASSERT(_data_pos == 0);
   218   STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
   218   STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
   365 }
   365 }
   366 
   366 
   367 //////////////////////////////////////////////////////////////////////////////
   367 //////////////////////////////////////////////////////////////////////////////
   368 // Allocation
   368 // Allocation
   369 //
   369 //
   370 // Allocation involves the _allocate_list, which contains a subset of the
   370 // Allocation involves the _allocation_list, which contains a subset of the
   371 // blocks owned by a storage object.  This is a doubly-linked list, linked
   371 // blocks owned by a storage object.  This is a doubly-linked list, linked
   372 // through dedicated fields in the blocks.  Full blocks are removed from this
   372 // through dedicated fields in the blocks.  Full blocks are removed from this
   373 // list, though they are still present in the _active_array.  Empty blocks are
   373 // list, though they are still present in the _active_array.  Empty blocks are
   374 // kept at the end of the _allocate_list, to make it easy for empty block
   374 // kept at the end of the _allocation_list, to make it easy for empty block
   375 // deletion to find them.
   375 // deletion to find them.
   376 //
   376 //
   377 // allocate(), and delete_empty_blocks_concurrent() lock the
   377 // allocate(), and delete_empty_blocks_concurrent() lock the
   378 // _allocate_mutex while performing any list and array modifications.
   378 // _allocation_mutex while performing any list and array modifications.
   379 //
   379 //
   380 // allocate() and release() update a block's _allocated_bitmask using CAS
   380 // allocate() and release() update a block's _allocated_bitmask using CAS
   381 // loops.  This prevents loss of updates even though release() performs
   381 // loops.  This prevents loss of updates even though release() performs
   382 // its updates without any locking.
   382 // its updates without any locking.
   383 //
   383 //
   384 // allocate() obtains the entry from the first block in the _allocate_list,
   384 // allocate() obtains the entry from the first block in the _allocation_list,
   385 // and updates that block's _allocated_bitmask to indicate the entry is in
   385 // and updates that block's _allocated_bitmask to indicate the entry is in
   386 // use.  If this makes the block full (all entries in use), the block is
   386 // use.  If this makes the block full (all entries in use), the block is
   387 // removed from the _allocate_list so it won't be considered by future
   387 // removed from the _allocation_list so it won't be considered by future
   388 // allocations until some entries in it are released.
   388 // allocations until some entries in it are released.
   389 //
   389 //
   390 // release() is performed lock-free. release() first looks up the block for
   390 // release() is performed lock-free. release() first looks up the block for
   391 // the entry, using address alignment to find the enclosing block (thereby
   391 // the entry, using address alignment to find the enclosing block (thereby
   392 // avoiding iteration over the _active_array).  Once the block has been
   392 // avoiding iteration over the _active_array).  Once the block has been
   393 // determined, its _allocated_bitmask needs to be updated, and its position in
   393 // determined, its _allocated_bitmask needs to be updated, and its position in
   394 // the _allocate_list may need to be updated.  There are two cases:
   394 // the _allocation_list may need to be updated.  There are two cases:
   395 //
   395 //
   396 // (a) If the block is neither full nor would become empty with the release of
   396 // (a) If the block is neither full nor would become empty with the release of
   397 // the entry, only its _allocated_bitmask needs to be updated.  But if the CAS
   397 // the entry, only its _allocated_bitmask needs to be updated.  But if the CAS
   398 // update fails, the applicable case may change for the retry.
   398 // update fails, the applicable case may change for the retry.
   399 //
   399 //
   400 // (b) Otherwise, the _allocate_list also needs to be modified.  This requires
   400 // (b) Otherwise, the _allocation_list also needs to be modified.  This requires
   401 // locking the _allocate_mutex.  To keep the release() operation lock-free,
   401 // locking the _allocation_mutex.  To keep the release() operation lock-free,
   402 // rather than updating the _allocate_list itself, it instead performs a
   402 // rather than updating the _allocation_list itself, it instead performs a
   403 // lock-free push of the block onto the _deferred_updates list.  Entries on
   403 // lock-free push of the block onto the _deferred_updates list.  Entries on
   404 // that list are processed by allocate() and delete_empty_blocks_XXX(), while
   404 // that list are processed by allocate() and delete_empty_blocks_XXX(), while
   405 // they already hold the necessary lock.  That processing makes the block's
   405 // they already hold the necessary lock.  That processing makes the block's
   406 // list state consistent with its current _allocated_bitmask.  The block is
   406 // list state consistent with its current _allocated_bitmask.  The block is
   407 // added to the _allocate_list if not already present and the bitmask is not
   407 // added to the _allocation_list if not already present and the bitmask is not
   408 // full.  The block is moved to the end of the _allocated_list if the bitmask
   408 // full.  The block is moved to the end of the _allocation_list if the bitmask
   409 // is empty, for ease of empty block deletion processing.
   409 // is empty, for ease of empty block deletion processing.
   410 
   410 
   411 oop* OopStorage::allocate() {
   411 oop* OopStorage::allocate() {
   412   MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   412   MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
   413   // Do some deferred update processing every time we allocate.
   413   // Do some deferred update processing every time we allocate.
   414   // Continue processing deferred updates if _allocate_list is empty,
   414   // Continue processing deferred updates if _allocation_list is empty,
   415   // in the hope that we'll get a block from that, rather than
   415   // in the hope that we'll get a block from that, rather than
   416   // allocating a new block.
   416   // allocating a new block.
   417   while (reduce_deferred_updates() && (_allocate_list.head() == NULL)) {}
   417   while (reduce_deferred_updates() && (_allocation_list.head() == NULL)) {}
   418 
   418 
   419   // Use the first block in _allocate_list for the allocation.
   419   // Use the first block in _allocation_list for the allocation.
   420   Block* block = _allocate_list.head();
   420   Block* block = _allocation_list.head();
   421   if (block == NULL) {
   421   if (block == NULL) {
   422     // No available blocks; make a new one, and add to storage.
   422     // No available blocks; make a new one, and add to storage.
   423     {
   423     {
   424       MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   424       MutexUnlockerEx mul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
   425       block = Block::new_block(this);
   425       block = Block::new_block(this);
   426     }
   426     }
   427     if (block == NULL) {
   427     if (block == NULL) {
   428       while (_allocate_list.head() == NULL) {
   428       while (_allocation_list.head() == NULL) {
   429         if (!reduce_deferred_updates()) {
   429         if (!reduce_deferred_updates()) {
   430           // Failed to make new block, no other thread made a block
   430           // Failed to make new block, no other thread made a block
   431           // available while the mutex was released, and didn't get
   431           // available while the mutex was released, and didn't get
   432           // one from a deferred update either, so return failure.
   432           // one from a deferred update either, so return failure.
   433           log_info(oopstorage, ref)("%s: failed block allocation", name());
   433           log_info(oopstorage, ref)("%s: failed block allocation", name());
   446           log_info(oopstorage, blocks)("%s: failed active array expand", name());
   446           log_info(oopstorage, blocks)("%s: failed active array expand", name());
   447           Block::delete_block(*block);
   447           Block::delete_block(*block);
   448           return NULL;
   448           return NULL;
   449         }
   449         }
   450       }
   450       }
   451       // Add to end of _allocate_list.  The mutex release allowed
   451       // Add to end of _allocation_list.  The mutex release allowed
   452       // other threads to add blocks to the _allocate_list.  We prefer
   452       // other threads to add blocks to the _allocation_list.  We prefer
   453       // to allocate from non-empty blocks, to allow empty blocks to
   453       // to allocate from non-empty blocks, to allow empty blocks to
   454       // be deleted.
   454       // be deleted.
   455       _allocate_list.push_back(*block);
   455       _allocation_list.push_back(*block);
   456     }
   456     }
   457     block = _allocate_list.head();
   457     block = _allocation_list.head();
   458   }
   458   }
   459   // Allocate from first block.
   459   // Allocate from first block.
   460   assert(block != NULL, "invariant");
   460   assert(block != NULL, "invariant");
   461   assert(!block->is_full(), "invariant");
   461   assert(!block->is_full(), "invariant");
   462   if (block->is_empty()) {
   462   if (block->is_empty()) {
   469   Atomic::inc(&_allocation_count); // release updates outside lock.
   469   Atomic::inc(&_allocation_count); // release updates outside lock.
   470   if (block->is_full()) {
   470   if (block->is_full()) {
   471     // Transitioning from not full to full.
   471     // Transitioning from not full to full.
   472     // Remove full blocks from consideration by future allocates.
   472     // Remove full blocks from consideration by future allocates.
   473     log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
   473     log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
   474     _allocate_list.unlink(*block);
   474     _allocation_list.unlink(*block);
   475   }
   475   }
   476   log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
   476   log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
   477   return result;
   477   return result;
   478 }
   478 }
   479 
   479 
   480 // Create a new, larger, active array with the same content as the
   480 // Create a new, larger, active array with the same content as the
   481 // current array, and then replace, relinquishing the old array.
   481 // current array, and then replace, relinquishing the old array.
   482 // Return true if the array was successfully expanded, false to
   482 // Return true if the array was successfully expanded, false to
   483 // indicate allocation failure.
   483 // indicate allocation failure.
   484 bool OopStorage::expand_active_array() {
   484 bool OopStorage::expand_active_array() {
   485   assert_lock_strong(_allocate_mutex);
   485   assert_lock_strong(_allocation_mutex);
   486   ActiveArray* old_array = _active_array;
   486   ActiveArray* old_array = _active_array;
   487   size_t new_size = 2 * old_array->size();
   487   size_t new_size = 2 * old_array->size();
   488   log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
   488   log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
   489                                name(), new_size);
   489                                name(), new_size);
   490   ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
   490   ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
   630 
   630 
   631   // Now that the bitmask has been updated, if we have a state transition
   631   // Now that the bitmask has been updated, if we have a state transition
   632   // (updated bitmask is empty or old bitmask was full), atomically push
   632   // (updated bitmask is empty or old bitmask was full), atomically push
   633   // this block onto the deferred updates list.  Some future call to
   633   // this block onto the deferred updates list.  Some future call to
   634   // reduce_deferred_updates will make any needed changes related to this
   634   // reduce_deferred_updates will make any needed changes related to this
   635   // block and _allocate_list.  This deferral avoids list updates and the
   635   // block and _allocation_list.  This deferral avoids list updates and the
   636   // associated locking here.
   636   // associated locking here.
   637   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
   637   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
   638     // Log transitions.  Both transitions are possible in a single update.
   638     // Log transitions.  Both transitions are possible in a single update.
   639     if (log_is_enabled(Debug, oopstorage, blocks)) {
   639     if (log_is_enabled(Debug, oopstorage, blocks)) {
   640       log_release_transitions(releasing, old_allocated, _owner, this);
   640       log_release_transitions(releasing, old_allocated, _owner, this);
   661   Atomic::dec(&_release_refcount);
   661   Atomic::dec(&_release_refcount);
   662 }
   662 }
   663 
   663 
   664 // Process one available deferred update.  Returns true if one was processed.
   664 // Process one available deferred update.  Returns true if one was processed.
   665 bool OopStorage::reduce_deferred_updates() {
   665 bool OopStorage::reduce_deferred_updates() {
   666   assert_locked_or_safepoint(_allocate_mutex);
   666   assert_locked_or_safepoint(_allocation_mutex);
   667   // Atomically pop a block off the list, if any available.
   667   // Atomically pop a block off the list, if any available.
   668   // No ABA issue because this is only called by one thread at a time.
   668   // No ABA issue because this is only called by one thread at a time.
   669   // The atomicity is wrto pushes by release().
   669   // The atomicity is wrto pushes by release().
   670   Block* block = OrderAccess::load_acquire(&_deferred_updates);
   670   Block* block = OrderAccess::load_acquire(&_deferred_updates);
   671   while (true) {
   671   while (true) {
   685   OrderAccess::storeload();
   685   OrderAccess::storeload();
   686   // Process popped block.
   686   // Process popped block.
   687   uintx allocated = block->allocated_bitmask();
   687   uintx allocated = block->allocated_bitmask();
   688 
   688 
   689   // Make membership in list consistent with bitmask state.
   689   // Make membership in list consistent with bitmask state.
   690   if ((_allocate_list.ctail() != NULL) &&
   690   if ((_allocation_list.ctail() != NULL) &&
   691       ((_allocate_list.ctail() == block) ||
   691       ((_allocation_list.ctail() == block) ||
   692        (_allocate_list.next(*block) != NULL))) {
   692        (_allocation_list.next(*block) != NULL))) {
   693     // Block is in the allocate list.
   693     // Block is in the _allocation_list.
   694     assert(!is_full_bitmask(allocated), "invariant");
   694     assert(!is_full_bitmask(allocated), "invariant");
   695   } else if (!is_full_bitmask(allocated)) {
   695   } else if (!is_full_bitmask(allocated)) {
   696     // Block is not in the allocate list, but now should be.
   696     // Block is not in the _allocation_list, but now should be.
   697     _allocate_list.push_front(*block);
   697     _allocation_list.push_front(*block);
   698   } // Else block is full and not in list, which is correct.
   698   } // Else block is full and not in list, which is correct.
   699 
   699 
   700   // Move empty block to end of list, for possible deletion.
   700   // Move empty block to end of list, for possible deletion.
   701   if (is_empty_bitmask(allocated)) {
   701   if (is_empty_bitmask(allocated)) {
   702     _allocate_list.unlink(*block);
   702     _allocation_list.unlink(*block);
   703     _allocate_list.push_back(*block);
   703     _allocation_list.push_back(*block);
   704   }
   704   }
   705 
   705 
   706   log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
   706   log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
   707                                 name(), p2i(block));
   707                                 name(), p2i(block));
   708   return true;              // Processed one pending update.
   708   return true;              // Processed one pending update.
   757 }
   757 }
   758 
   758 
   759 const size_t initial_active_array_size = 8;
   759 const size_t initial_active_array_size = 8;
   760 
   760 
   761 OopStorage::OopStorage(const char* name,
   761 OopStorage::OopStorage(const char* name,
   762                        Mutex* allocate_mutex,
   762                        Mutex* allocation_mutex,
   763                        Mutex* active_mutex) :
   763                        Mutex* active_mutex) :
   764   _name(dup_name(name)),
   764   _name(dup_name(name)),
   765   _active_array(ActiveArray::create(initial_active_array_size)),
   765   _active_array(ActiveArray::create(initial_active_array_size)),
   766   _allocate_list(),
   766   _allocation_list(),
   767   _deferred_updates(NULL),
   767   _deferred_updates(NULL),
   768   _allocate_mutex(allocate_mutex),
   768   _allocation_mutex(allocation_mutex),
   769   _active_mutex(active_mutex),
   769   _active_mutex(active_mutex),
   770   _allocation_count(0),
   770   _allocation_count(0),
   771   _concurrent_iteration_active(false)
   771   _concurrent_iteration_active(false)
   772 {
   772 {
   773   _active_array->increment_refcount();
   773   _active_array->increment_refcount();
   774   assert(_active_mutex->rank() < _allocate_mutex->rank(),
   774   assert(_active_mutex->rank() < _allocation_mutex->rank(),
   775          "%s: active_mutex must have lower rank than allocate_mutex", _name);
   775          "%s: active_mutex must have lower rank than allocation_mutex", _name);
   776   assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
   776   assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
   777          "%s: active mutex requires safepoint check", _name);
   777          "%s: active mutex requires safepoint check", _name);
   778   assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
   778   assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
   779          "%s: allocate mutex requires safepoint check", _name);
   779          "%s: allocation mutex requires safepoint check", _name);
   780 }
   780 }
   781 
   781 
   782 void OopStorage::delete_empty_block(const Block& block) {
   782 void OopStorage::delete_empty_block(const Block& block) {
   783   assert(block.is_empty(), "discarding non-empty block");
   783   assert(block.is_empty(), "discarding non-empty block");
   784   log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
   784   log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
   789   Block* block;
   789   Block* block;
   790   while ((block = _deferred_updates) != NULL) {
   790   while ((block = _deferred_updates) != NULL) {
   791     _deferred_updates = block->deferred_updates_next();
   791     _deferred_updates = block->deferred_updates_next();
   792     block->set_deferred_updates_next(NULL);
   792     block->set_deferred_updates_next(NULL);
   793   }
   793   }
   794   while ((block = _allocate_list.head()) != NULL) {
   794   while ((block = _allocation_list.head()) != NULL) {
   795     _allocate_list.unlink(*block);
   795     _allocation_list.unlink(*block);
   796   }
   796   }
   797   bool unreferenced = _active_array->decrement_refcount();
   797   bool unreferenced = _active_array->decrement_refcount();
   798   assert(unreferenced, "deleting storage while _active_array is referenced");
   798   assert(unreferenced, "deleting storage while _active_array is referenced");
   799   for (size_t i = _active_array->block_count(); 0 < i; ) {
   799   for (size_t i = _active_array->block_count(); 0 < i; ) {
   800     block = _active_array->at(--i);
   800     block = _active_array->at(--i);
   809   // Process any pending release updates, which may make more empty
   809   // Process any pending release updates, which may make more empty
   810   // blocks available for deletion.
   810   // blocks available for deletion.
   811   while (reduce_deferred_updates()) {}
   811   while (reduce_deferred_updates()) {}
   812   // Don't interfere with a concurrent iteration.
   812   // Don't interfere with a concurrent iteration.
   813   if (_concurrent_iteration_active) return;
   813   if (_concurrent_iteration_active) return;
   814   // Delete empty (and otherwise deletable) blocks from end of _allocate_list.
   814   // Delete empty (and otherwise deletable) blocks from end of _allocation_list.
   815   for (Block* block = _allocate_list.tail();
   815   for (Block* block = _allocation_list.tail();
   816        (block != NULL) && block->is_deletable();
   816        (block != NULL) && block->is_deletable();
   817        block = _allocate_list.tail()) {
   817        block = _allocation_list.tail()) {
   818     _active_array->remove(block);
   818     _active_array->remove(block);
   819     _allocate_list.unlink(*block);
   819     _allocation_list.unlink(*block);
   820     delete_empty_block(*block);
   820     delete_empty_block(*block);
   821   }
   821   }
   822 }
   822 }
   823 
   823 
   824 void OopStorage::delete_empty_blocks_concurrent() {
   824 void OopStorage::delete_empty_blocks_concurrent() {
   825   MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   825   MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
   826   // Other threads could be adding to the empty block count while we
   826   // Other threads could be adding to the empty block count while we
   827   // release the mutex across the block deletions.  Set an upper bound
   827   // release the mutex across the block deletions.  Set an upper bound
   828   // on how many blocks we'll try to release, so other threads can't
   828   // on how many blocks we'll try to release, so other threads can't
   829   // cause an unbounded stay in this function.
   829   // cause an unbounded stay in this function.
   830   size_t limit = block_count();
   830   size_t limit = block_count();
   832   for (size_t i = 0; i < limit; ++i) {
   832   for (size_t i = 0; i < limit; ++i) {
   833     // Additional updates might become available while we dropped the
   833     // Additional updates might become available while we dropped the
   834     // lock.  But limit number processed to limit lock duration.
   834     // lock.  But limit number processed to limit lock duration.
   835     reduce_deferred_updates();
   835     reduce_deferred_updates();
   836 
   836 
   837     Block* block = _allocate_list.tail();
   837     Block* block = _allocation_list.tail();
   838     if ((block == NULL) || !block->is_deletable()) {
   838     if ((block == NULL) || !block->is_deletable()) {
   839       // No block to delete, so done.  There could be more pending
   839       // No block to delete, so done.  There could be more pending
   840       // deferred updates that could give us more work to do; deal with
   840       // deferred updates that could give us more work to do; deal with
   841       // that in some later call, to limit lock duration here.
   841       // that in some later call, to limit lock duration here.
   842       return;
   842       return;
   846       MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
   846       MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
   847       // Don't interfere with a concurrent iteration.
   847       // Don't interfere with a concurrent iteration.
   848       if (_concurrent_iteration_active) return;
   848       if (_concurrent_iteration_active) return;
   849       _active_array->remove(block);
   849       _active_array->remove(block);
   850     }
   850     }
   851     // Remove block from _allocate_list and delete it.
   851     // Remove block from _allocation_list and delete it.
   852     _allocate_list.unlink(*block);
   852     _allocation_list.unlink(*block);
   853     // Release mutex while deleting block.
   853     // Release mutex while deleting block.
   854     MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   854     MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
   855     delete_empty_block(*block);
   855     delete_empty_block(*block);
   856   }
   856   }
   857 }
   857 }
   858 
   858 
   859 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
   859 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
   860   const Block* block = find_block_or_null(ptr);
   860   const Block* block = find_block_or_null(ptr);
   861   if (block != NULL) {
   861   if (block != NULL) {
   862     // Prevent block deletion and _active_array modification.
   862     // Prevent block deletion and _active_array modification.
   863     MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   863     MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
   864     // Block could be a false positive, so get index carefully.
   864     // Block could be a false positive, so get index carefully.
   865     size_t index = Block::active_index_safe(block);
   865     size_t index = Block::active_index_safe(block);
   866     if ((index < _active_array->block_count()) &&
   866     if ((index < _active_array->block_count()) &&
   867         (block == _active_array->at(index)) &&
   867         (block == _active_array->at(index)) &&
   868         block->contains(ptr)) {
   868         block->contains(ptr)) {