src/hotspot/share/gc/shared/oopStorage.cpp
changeset 48886 e1d09bd56d2d
parent 48816 3495d6050efe
child 49333 489f1dd40582
equal deleted inserted replaced
48885:00e159258897 48886:e1d09bd56d2d
    24 
    24 
    25 #include "precompiled.hpp"
    25 #include "precompiled.hpp"
    26 #include "gc/shared/oopStorage.inline.hpp"
    26 #include "gc/shared/oopStorage.inline.hpp"
    27 #include "gc/shared/oopStorageParState.inline.hpp"
    27 #include "gc/shared/oopStorageParState.inline.hpp"
    28 #include "logging/log.hpp"
    28 #include "logging/log.hpp"
       
    29 #include "logging/logStream.hpp"
    29 #include "memory/allocation.inline.hpp"
    30 #include "memory/allocation.inline.hpp"
       
    31 #include "memory/resourceArea.hpp"
    30 #include "runtime/atomic.hpp"
    32 #include "runtime/atomic.hpp"
    31 #include "runtime/handles.inline.hpp"
    33 #include "runtime/handles.inline.hpp"
    32 #include "runtime/mutex.hpp"
    34 #include "runtime/mutex.hpp"
    33 #include "runtime/mutexLocker.hpp"
    35 #include "runtime/mutexLocker.hpp"
    34 #include "runtime/orderAccess.inline.hpp"
    36 #include "runtime/orderAccess.inline.hpp"
   105     _get_entry(*prev_blk)._next = next_blk;
   107     _get_entry(*prev_blk)._next = next_blk;
   106   }
   108   }
   107 }
   109 }
   108 
   110 
   109 // Blocks start with an array of BitsPerWord oop entries.  That array
   111 // Blocks start with an array of BitsPerWord oop entries.  That array
   110 // is divided into conceptual BytesPerWord sections of BitsPerWord
   112 // is divided into conceptual BytesPerWord sections of BitsPerByte
   111 // entries.  Blocks are allocated aligned on section boundaries, for
   113 // entries.  Blocks are allocated aligned on section boundaries, for
   112 // the convenience of mapping from an entry to the containing block;
   114 // the convenience of mapping from an entry to the containing block;
   113 // see block_for_ptr().  Aligning on section boundary rather than on
   115 // see block_for_ptr().  Aligning on section boundary rather than on
   114 // the full _data wastes a lot less space, but makes for a bit more
   116 // the full _data wastes a lot less space, but makes for a bit more
   115 // work in block_for_ptr().
   117 // work in block_for_ptr().
   128   _data(),
   130   _data(),
   129   _allocated_bitmask(0),
   131   _allocated_bitmask(0),
   130   _owner(owner),
   132   _owner(owner),
   131   _memory(memory),
   133   _memory(memory),
   132   _active_entry(),
   134   _active_entry(),
   133   _allocate_entry()
   135   _allocate_entry(),
       
   136   _deferred_updates_next(NULL),
       
   137   _release_refcount(0)
   134 {
   138 {
   135   STATIC_ASSERT(_data_pos == 0);
   139   STATIC_ASSERT(_data_pos == 0);
   136   STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
   140   STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
   137   assert(offset_of(Block, _data) == _data_pos, "invariant");
   141   assert(offset_of(Block, _data) == _data_pos, "invariant");
   138   assert(owner != NULL, "NULL owner");
   142   assert(owner != NULL, "NULL owner");
   141 #ifdef _WINDOWS
   145 #ifdef _WINDOWS
   142 #pragma warning(pop)
   146 #pragma warning(pop)
   143 #endif
   147 #endif
   144 
   148 
   145 OopStorage::Block::~Block() {
   149 OopStorage::Block::~Block() {
       
   150   assert(_release_refcount == 0, "deleting block while releasing");
       
   151   assert(_deferred_updates_next == NULL, "deleting block with deferred update");
   146   // Clear fields used by block_for_ptr and entry validation, which
   152   // Clear fields used by block_for_ptr and entry validation, which
   147   // might help catch bugs.  Volatile to prevent dead-store elimination.
   153   // might help catch bugs.  Volatile to prevent dead-store elimination.
   148   const_cast<uintx volatile&>(_allocated_bitmask) = 0;
   154   const_cast<uintx volatile&>(_allocated_bitmask) = 0;
   149   const_cast<OopStorage* volatile&>(_owner) = NULL;
   155   const_cast<OopStorage* volatile&>(_owner) = NULL;
   150 }
   156 }
   180 
   186 
   181 uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
   187 uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
   182   return bitmask_for_index(get_index(ptr));
   188   return bitmask_for_index(get_index(ptr));
   183 }
   189 }
   184 
   190 
   185 uintx OopStorage::Block::cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value) {
   191 // A block is deletable if
   186   return Atomic::cmpxchg(new_value, &_allocated_bitmask, compare_value);
   192 // (1) It is empty.
       
   193 // (2) There is not a release() operation currently operating on it.
       
   194 // (3) It is not in the deferred updates list.
       
   195 // The order of tests is important for proper interaction between release()
       
   196 // and concurrent deletion.
       
   197 bool OopStorage::Block::is_deletable() const {
       
   198   return (OrderAccess::load_acquire(&_allocated_bitmask) == 0) &&
       
   199          (OrderAccess::load_acquire(&_release_refcount) == 0) &&
       
   200          (OrderAccess::load_acquire(&_deferred_updates_next) == NULL);
       
   201 }
       
   202 
       
   203 OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
       
   204   return _deferred_updates_next;
       
   205 }
       
   206 
       
   207 void OopStorage::Block::set_deferred_updates_next(Block* block) {
       
   208   _deferred_updates_next = block;
   187 }
   209 }
   188 
   210 
   189 bool OopStorage::Block::contains(const oop* ptr) const {
   211 bool OopStorage::Block::contains(const oop* ptr) const {
   190   const oop* base = get_pointer(0);
   212   const oop* base = get_pointer(0);
   191   return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
   213   return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
   201   uintx allocated = allocated_bitmask();
   223   uintx allocated = allocated_bitmask();
   202   while (true) {
   224   while (true) {
   203     assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
   225     assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
   204     unsigned index = count_trailing_zeros(~allocated);
   226     unsigned index = count_trailing_zeros(~allocated);
   205     uintx new_value = allocated | bitmask_for_index(index);
   227     uintx new_value = allocated | bitmask_for_index(index);
   206     uintx fetched = cmpxchg_allocated_bitmask(new_value, allocated);
   228     uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated);
   207     if (fetched == allocated) {
   229     if (fetched == allocated) {
   208       return get_pointer(index); // CAS succeeded; return entry for index.
   230       return get_pointer(index); // CAS succeeded; return entry for index.
   209     }
   231     }
   210     allocated = fetched;       // CAS failed; retry with latest value.
   232     allocated = fetched;       // CAS failed; retry with latest value.
   211   }
   233   }
   259     }
   281     }
   260   }
   282   }
   261   return NULL;
   283   return NULL;
   262 }
   284 }
   263 
   285 
   264 bool OopStorage::is_valid_block_locked_or_safepoint(const Block* check_block) const {
       
   265   assert_locked_or_safepoint(_allocate_mutex);
       
   266   // For now, simple linear search.  Do something more clever if this
       
   267   // is a performance bottleneck, particularly for allocation_status.
       
   268   for (const Block* block = _active_list.chead();
       
   269        block != NULL;
       
   270        block = _active_list.next(*block)) {
       
   271     if (check_block == block) {
       
   272       return true;
       
   273     }
       
   274   }
       
   275   return false;
       
   276 }
       
   277 
       
   278 #ifdef ASSERT
   286 #ifdef ASSERT
   279 void OopStorage::assert_at_safepoint() {
   287 void OopStorage::assert_at_safepoint() {
   280   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   288   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   281 }
   289 }
   282 #endif // ASSERT
   290 #endif // ASSERT
   289 // through dedicated fields in the blocks.  Full blocks are removed from this
   297 // through dedicated fields in the blocks.  Full blocks are removed from this
   290 // list, though they are still present in the _active_list.  Empty blocks are
   298 // list, though they are still present in the _active_list.  Empty blocks are
   291 // kept at the end of the _allocate_list, to make it easy for empty block
   299 // kept at the end of the _allocate_list, to make it easy for empty block
   292 // deletion to find them.
   300 // deletion to find them.
   293 //
   301 //
   294 // allocate(), release(), and delete_empty_blocks_concurrent() all lock the
   302 // allocate(), and delete_empty_blocks_concurrent() lock the
   295 // _allocate_mutex while performing any list modifications.
   303 // _allocate_mutex while performing any list modifications.
   296 //
   304 //
   297 // allocate() and release() update a block's _allocated_bitmask using CAS
   305 // allocate() and release() update a block's _allocated_bitmask using CAS
   298 // loops.  This prevents loss of updates even though release() may perform
   306 // loops.  This prevents loss of updates even though release() performs
   299 // some updates without any locking.
   307 // its updates without any locking.
   300 //
   308 //
   301 // allocate() obtains the entry from the first block in the _allocate_list,
   309 // allocate() obtains the entry from the first block in the _allocate_list,
   302 // and updates that block's _allocated_bitmask to indicate the entry is in
   310 // and updates that block's _allocated_bitmask to indicate the entry is in
   303 // use.  If this makes the block full (all entries in use), the block is
   311 // use.  If this makes the block full (all entries in use), the block is
   304 // removed from the _allocate_list so it won't be considered by future
   312 // removed from the _allocate_list so it won't be considered by future
   305 // allocations until some entries in it are relased.
   313 // allocations until some entries in it are released.
   306 //
   314 //
   307 // release() looks up the block for the entry without locking.  Once the block
   315 // release() is performed lock-free. release() first looks up the block for
   308 // has been determined, its _allocated_bitmask needs to be updated, and its
   316 // the entry, using address alignment to find the enclosing block (thereby
   309 // position in the _allocate_list may need to be updated.  There are two
   317 // avoiding iteration over the _active_list).  Once the block has been
   310 // cases:
   318 // determined, its _allocated_bitmask needs to be updated, and its position in
       
   319 // the _allocate_list may need to be updated.  There are two cases:
   311 //
   320 //
   312 // (a) If the block is neither full nor would become empty with the release of
   321 // (a) If the block is neither full nor would become empty with the release of
   313 // the entry, only its _allocated_bitmask needs to be updated.  But if the CAS
   322 // the entry, only its _allocated_bitmask needs to be updated.  But if the CAS
   314 // update fails, the applicable case may change for the retry.
   323 // update fails, the applicable case may change for the retry.
   315 //
   324 //
   316 // (b) Otherwise, the _allocate_list will also need to be modified.  This
   325 // (b) Otherwise, the _allocate_list also needs to be modified.  This requires
   317 // requires locking the _allocate_mutex, and then attempting to CAS the
   326 // locking the _allocate_mutex.  To keep the release() operation lock-free,
   318 // _allocated_bitmask.  If the CAS fails, the applicable case may change for
   327 // rather than updating the _allocate_list itself, it instead performs a
   319 // the retry.  If the CAS succeeds, then update the _allocate_list according
   328 // lock-free push of the block onto the _deferred_updates list.  Entries on
   320 // to the the state changes.  If the block changed from full to not full, then
   329 // that list are processed by allocate() and delete_empty_blocks_XXX(), while
   321 // it needs to be added to the _allocate_list, for use in future allocations.
   330 // they already hold the necessary lock.  That processing makes the block's
   322 // If the block changed from not empty to empty, then it is moved to the end
   331 // list state consistent with its current _allocated_bitmask.  The block is
   323 // of the _allocate_list, for ease of empty block deletion processing.
   332 // added to the _allocate_list if not already present and the bitmask is not
       
   333 // full.  The block is moved to the end of the _allocated_list if the bitmask
       
   334 // is empty, for ease of empty block deletion processing.
   324 
   335 
   325 oop* OopStorage::allocate() {
   336 oop* OopStorage::allocate() {
   326   MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   337   MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
       
   338   // Do some deferred update processing every time we allocate.
       
   339   // Continue processing deferred updates if _allocate_list is empty,
       
   340   // in the hope that we'll get a block from that, rather than
       
   341   // allocating a new block.
       
   342   while (reduce_deferred_updates() && (_allocate_list.head() == NULL)) {}
       
   343 
       
   344   // Use the first block in _allocate_list for the allocation.
   327   Block* block = _allocate_list.head();
   345   Block* block = _allocate_list.head();
   328   if (block == NULL) {
   346   if (block == NULL) {
   329     // No available blocks; make a new one, and add to storage.
   347     // No available blocks; make a new one, and add to storage.
   330     {
   348     {
   331       MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   349       MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   332       block = Block::new_block(this);
   350       block = Block::new_block(this);
   333     }
   351     }
   334     if (block != NULL) {
   352     if (block == NULL) {
       
   353       while (_allocate_list.head() == NULL) {
       
   354         if (!reduce_deferred_updates()) {
       
   355           // Failed to make new block, no other thread made a block
       
   356           // available while the mutex was released, and didn't get
       
   357           // one from a deferred update either, so return failure.
       
   358           log_info(oopstorage, ref)("%s: failed allocation", name());
       
   359           return NULL;
       
   360         }
       
   361       }
       
   362     } else {
   335       // Add new block to storage.
   363       // Add new block to storage.
   336       log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
   364       log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
   337 
   365 
   338       // Add to end of _allocate_list.  The mutex release allowed
   366       // Add to end of _allocate_list.  The mutex release allowed
   339       // other threads to add blocks to the _allocate_list.  We prefer
   367       // other threads to add blocks to the _allocate_list.  We prefer
   340       // to allocate from non-empty blocks, to allow empty blocks to
   368       // to allocate from non-empty blocks, to allow empty blocks to
   341       // be deleted.
   369       // be deleted.
   342       _allocate_list.push_back(*block);
   370       _allocate_list.push_back(*block);
   343       ++_empty_block_count;
       
   344       // Add to front of _active_list, and then record as the head
   371       // Add to front of _active_list, and then record as the head
   345       // block, for concurrent iteration protocol.
   372       // block, for concurrent iteration protocol.
   346       _active_list.push_front(*block);
   373       _active_list.push_front(*block);
   347       ++_block_count;
   374       ++_block_count;
   348       // Ensure all setup of block is complete before making it visible.
   375       // Ensure all setup of block is complete before making it visible.
   349       OrderAccess::release_store(&_active_head, block);
   376       OrderAccess::release_store(&_active_head, block);
   350     } else {
       
   351       log_info(oopstorage, blocks)("%s: failed new block allocation", name());
       
   352     }
   377     }
   353     block = _allocate_list.head();
   378     block = _allocate_list.head();
   354     if (block == NULL) {
       
   355       // Failed to make new block, and no other thread made a block
       
   356       // available while the mutex was released, so return failure.
       
   357       return NULL;
       
   358     }
       
   359   }
   379   }
   360   // Allocate from first block.
   380   // Allocate from first block.
   361   assert(block != NULL, "invariant");
   381   assert(block != NULL, "invariant");
   362   assert(!block->is_full(), "invariant");
   382   assert(!block->is_full(), "invariant");
   363   if (block->is_empty()) {
   383   if (block->is_empty()) {
   364     // Transitioning from empty to not empty.
   384     // Transitioning from empty to not empty.
   365     log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
   385     log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
   366     --_empty_block_count;
       
   367   }
   386   }
   368   oop* result = block->allocate();
   387   oop* result = block->allocate();
   369   assert(result != NULL, "allocation failed");
   388   assert(result != NULL, "allocation failed");
   370   assert(!block->is_empty(), "postcondition");
   389   assert(!block->is_empty(), "postcondition");
   371   Atomic::inc(&_allocation_count); // release updates outside lock.
   390   Atomic::inc(&_allocation_count); // release updates outside lock.
   382 OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const {
   401 OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const {
   383   assert(ptr != NULL, "precondition");
   402   assert(ptr != NULL, "precondition");
   384   return Block::block_for_ptr(this, ptr);
   403   return Block::block_for_ptr(this, ptr);
   385 }
   404 }
   386 
   405 
   387 void OopStorage::release_from_block(Block& block, uintx releasing) {
   406 static void log_release_transitions(uintx releasing,
   388   assert(releasing != 0, "invariant");
   407                                     uintx old_allocated,
   389   uintx allocated = block.allocated_bitmask();
   408                                     const OopStorage* owner,
       
   409                                     const void* block) {
       
   410   ResourceMark rm;
       
   411   Log(oopstorage, blocks) log;
       
   412   LogStream ls(log.debug());
       
   413   if (is_full_bitmask(old_allocated)) {
       
   414     ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
       
   415   }
       
   416   if (releasing == old_allocated) {
       
   417     ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
       
   418   }
       
   419 }
       
   420 
       
   421 void OopStorage::Block::release_entries(uintx releasing, Block* volatile* deferred_list) {
       
   422   assert(releasing != 0, "preconditon");
       
   423   // Prevent empty block deletion when transitioning to empty.
       
   424   Atomic::inc(&_release_refcount);
       
   425 
       
   426   // Atomically update allocated bitmask.
       
   427   uintx old_allocated = _allocated_bitmask;
   390   while (true) {
   428   while (true) {
   391     assert(releasing == (allocated & releasing), "invariant");
   429     assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
   392     uintx new_value = allocated ^ releasing;
   430     uintx new_value = old_allocated ^ releasing;
   393     // CAS new_value into block's allocated bitmask, retrying with
   431     uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated);
   394     // updated allocated bitmask until the CAS succeeds.
   432     if (fetched == old_allocated) break; // Successful update.
   395     uintx fetched;
   433     old_allocated = fetched;             // Retry with updated bitmask.
   396     if (!is_full_bitmask(allocated) && !is_empty_bitmask(new_value)) {
   434   }
   397       fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
   435 
   398       if (fetched == allocated) return;
   436   // Now that the bitmask has been updated, if we have a state transition
   399     } else {
   437   // (updated bitmask is empty or old bitmask was full), atomically push
   400       // Need special handling if transitioning from full to not full,
   438   // this block onto the deferred updates list.  Some future call to
   401       // or from not empty to empty.  For those cases, must hold the
   439   // reduce_deferred_updates will make any needed changes related to this
   402       // _allocation_mutex when updating the allocated bitmask, to
   440   // block and _allocate_list.  This deferral avoids list updates and the
   403       // ensure the associated list manipulations will be consistent
   441   // associated locking here.
   404       // with the allocation bitmask that is visible to other threads
   442   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
   405       // in allocate() or deleting empty blocks.
   443     // Log transitions.  Both transitions are possible in a single update.
   406       MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   444     if (log_is_enabled(Debug, oopstorage, blocks)) {
   407       fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
   445       log_release_transitions(releasing, old_allocated, _owner, this);
   408       if (fetched == allocated) {
   446     }
   409         // CAS succeeded; handle special cases, which might no longer apply.
   447     // Attempt to claim responsibility for adding this block to the deferred
   410         if (is_full_bitmask(allocated)) {
   448     // list, by setting the link to non-NULL by self-looping.  If this fails,
   411           // Transitioning from full to not-full; add to _allocate_list.
   449     // then someone else has made such a claim and the deferred update has not
   412           log_debug(oopstorage, blocks)("%s: block not full " PTR_FORMAT, name(), p2i(&block));
   450     // yet been processed and will include our change, so we don't need to do
   413           _allocate_list.push_front(block);
   451     // anything further.
   414           assert(!block.is_full(), "invariant"); // Still not full.
   452     if (Atomic::replace_if_null(this, &_deferred_updates_next)) {
   415         }
   453       // Successfully claimed.  Push, with self-loop for end-of-list.
   416         if (is_empty_bitmask(new_value)) {
   454       Block* head = *deferred_list;
   417           // Transitioning from not-empty to empty; move to end of
   455       while (true) {
   418           // _allocate_list, to make it a deletion candidate.
   456         _deferred_updates_next = (head == NULL) ? this : head;
   419           log_debug(oopstorage, blocks)("%s: block empty " PTR_FORMAT, name(), p2i(&block));
   457         Block* fetched = Atomic::cmpxchg(this, deferred_list, head);
   420           _allocate_list.unlink(block);
   458         if (fetched == head) break; // Successful update.
   421           _allocate_list.push_back(block);
   459         head = fetched;             // Retry with updated head.
   422           ++_empty_block_count;
       
   423           assert(block.is_empty(), "invariant"); // Still empty.
       
   424         }
       
   425         return;                 // Successful CAS and transitions handled.
       
   426       }
   460       }
   427     }
   461       log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
   428     // CAS failed; retry with latest value.
   462                                     _owner->name(), p2i(this));
   429     allocated = fetched;
   463     }
   430   }
   464   }
   431 }
   465   // Release hold on empty block deletion.
   432 
   466   Atomic::dec(&_release_refcount);
   433 #ifdef ASSERT
   467 }
   434 void OopStorage::check_release(const Block* block, const oop* ptr) const {
   468 
   435   switch (allocation_status_validating_block(block, ptr)) {
   469 // Process one available deferred update.  Returns true if one was processed.
   436   case INVALID_ENTRY:
   470 bool OopStorage::reduce_deferred_updates() {
   437     fatal("Releasing invalid entry: " PTR_FORMAT, p2i(ptr));
   471   assert_locked_or_safepoint(_allocate_mutex);
   438     break;
   472   // Atomically pop a block off the list, if any available.
   439 
   473   // No ABA issue because this is only called by one thread at a time.
   440   case UNALLOCATED_ENTRY:
   474   // The atomicity is wrto pushes by release().
   441     fatal("Releasing unallocated entry: " PTR_FORMAT, p2i(ptr));
   475   Block* block = OrderAccess::load_acquire(&_deferred_updates);
   442     break;
   476   while (true) {
   443 
   477     if (block == NULL) return false;
   444   case ALLOCATED_ENTRY:
   478     // Try atomic pop of block from list.
   445     assert(block->contains(ptr), "invariant");
   479     Block* tail = block->deferred_updates_next();
   446     break;
   480     if (block == tail) tail = NULL; // Handle self-loop end marker.
   447 
   481     Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
   448   default:
   482     if (fetched == block) break; // Update successful.
   449     ShouldNotReachHere();
   483     block = fetched;             // Retry with updated block.
   450   }
   484   }
   451 }
   485   block->set_deferred_updates_next(NULL); // Clear tail after updating head.
   452 #endif // ASSERT
   486   // Ensure bitmask read after pop is complete, including clearing tail, for
       
   487   // ordering with release().  Without this, we may be processing a stale
       
   488   // bitmask state here while blocking a release() operation from recording
       
   489   // the deferred update needed for its bitmask change.
       
   490   OrderAccess::storeload();
       
   491   // Process popped block.
       
   492   uintx allocated = block->allocated_bitmask();
       
   493 
       
   494   // Make membership in list consistent with bitmask state.
       
   495   if ((_allocate_list.ctail() != NULL) &&
       
   496       ((_allocate_list.ctail() == block) ||
       
   497        (_allocate_list.next(*block) != NULL))) {
       
   498     // Block is in the allocate list.
       
   499     assert(!is_full_bitmask(allocated), "invariant");
       
   500   } else if (!is_full_bitmask(allocated)) {
       
   501     // Block is not in the allocate list, but now should be.
       
   502     _allocate_list.push_front(*block);
       
   503   } // Else block is full and not in list, which is correct.
       
   504 
       
   505   // Move empty block to end of list, for possible deletion.
       
   506   if (is_empty_bitmask(allocated)) {
       
   507     _allocate_list.unlink(*block);
       
   508     _allocate_list.push_back(*block);
       
   509   }
       
   510 
       
   511   log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
       
   512                                 name(), p2i(block));
       
   513   return true;              // Processed one pending update.
       
   514 }
   453 
   515 
   454 inline void check_release_entry(const oop* entry) {
   516 inline void check_release_entry(const oop* entry) {
   455   assert(entry != NULL, "Releasing NULL");
   517   assert(entry != NULL, "Releasing NULL");
   456   assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry));
   518   assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry));
   457 }
   519 }
   458 
   520 
   459 void OopStorage::release(const oop* ptr) {
   521 void OopStorage::release(const oop* ptr) {
   460   check_release_entry(ptr);
   522   check_release_entry(ptr);
   461   Block* block = find_block_or_null(ptr);
   523   Block* block = find_block_or_null(ptr);
   462   check_release(block, ptr);
   524   assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
   463   log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
   525   log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
   464   release_from_block(*block, block->bitmask_for_entry(ptr));
   526   block->release_entries(block->bitmask_for_entry(ptr), &_deferred_updates);
   465   Atomic::dec(&_allocation_count);
   527   Atomic::dec(&_allocation_count);
   466 }
   528 }
   467 
   529 
   468 void OopStorage::release(const oop* const* ptrs, size_t size) {
   530 void OopStorage::release(const oop* const* ptrs, size_t size) {
   469   size_t i = 0;
   531   size_t i = 0;
   470   while (i < size) {
   532   while (i < size) {
   471     check_release_entry(ptrs[i]);
   533     check_release_entry(ptrs[i]);
   472     Block* block = find_block_or_null(ptrs[i]);
   534     Block* block = find_block_or_null(ptrs[i]);
   473     check_release(block, ptrs[i]);
   535     assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptrs[i]));
   474     log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
   536     log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
   475     size_t count = 0;
   537     size_t count = 0;
   476     uintx releasing = 0;
   538     uintx releasing = 0;
   477     for ( ; i < size; ++i) {
   539     for ( ; i < size; ++i) {
   478       const oop* entry = ptrs[i];
   540       const oop* entry = ptrs[i];
       
   541       check_release_entry(entry);
   479       // If entry not in block, finish block and resume outer loop with entry.
   542       // If entry not in block, finish block and resume outer loop with entry.
   480       if (!block->contains(entry)) break;
   543       if (!block->contains(entry)) break;
   481       check_release_entry(entry);
       
   482       // Add entry to releasing bitmap.
   544       // Add entry to releasing bitmap.
   483       log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
   545       log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
   484       uintx entry_bitmask = block->bitmask_for_entry(entry);
   546       uintx entry_bitmask = block->bitmask_for_entry(entry);
   485       assert((releasing & entry_bitmask) == 0,
   547       assert((releasing & entry_bitmask) == 0,
   486              "Duplicate entry: " PTR_FORMAT, p2i(entry));
   548              "Duplicate entry: " PTR_FORMAT, p2i(entry));
   487       releasing |= entry_bitmask;
   549       releasing |= entry_bitmask;
   488       ++count;
   550       ++count;
   489     }
   551     }
   490     // Release the contiguous entries that are in block.
   552     // Release the contiguous entries that are in block.
   491     release_from_block(*block, releasing);
   553     block->release_entries(releasing, &_deferred_updates);
   492     Atomic::sub(count, &_allocation_count);
   554     Atomic::sub(count, &_allocation_count);
   493   }
   555   }
   494 }
   556 }
   495 
   557 
   496 const char* dup_name(const char* name) {
   558 const char* dup_name(const char* name) {
   504                        Mutex* active_mutex) :
   566                        Mutex* active_mutex) :
   505   _name(dup_name(name)),
   567   _name(dup_name(name)),
   506   _active_list(&Block::get_active_entry),
   568   _active_list(&Block::get_active_entry),
   507   _allocate_list(&Block::get_allocate_entry),
   569   _allocate_list(&Block::get_allocate_entry),
   508   _active_head(NULL),
   570   _active_head(NULL),
       
   571   _deferred_updates(NULL),
   509   _allocate_mutex(allocate_mutex),
   572   _allocate_mutex(allocate_mutex),
   510   _active_mutex(active_mutex),
   573   _active_mutex(active_mutex),
   511   _allocation_count(0),
   574   _allocation_count(0),
   512   _block_count(0),
   575   _block_count(0),
   513   _empty_block_count(0),
       
   514   _concurrent_iteration_active(false)
   576   _concurrent_iteration_active(false)
   515 {
   577 {
   516   assert(_active_mutex->rank() < _allocate_mutex->rank(),
   578   assert(_active_mutex->rank() < _allocate_mutex->rank(),
   517          "%s: active_mutex must have lower rank than allocate_mutex", _name);
   579          "%s: active_mutex must have lower rank than allocate_mutex", _name);
   518   assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
   580   assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
   527   Block::delete_block(block);
   589   Block::delete_block(block);
   528 }
   590 }
   529 
   591 
   530 OopStorage::~OopStorage() {
   592 OopStorage::~OopStorage() {
   531   Block* block;
   593   Block* block;
       
   594   while ((block = _deferred_updates) != NULL) {
       
   595     _deferred_updates = block->deferred_updates_next();
       
   596     block->set_deferred_updates_next(NULL);
       
   597   }
   532   while ((block = _allocate_list.head()) != NULL) {
   598   while ((block = _allocate_list.head()) != NULL) {
   533     _allocate_list.unlink(*block);
   599     _allocate_list.unlink(*block);
   534   }
   600   }
   535   while ((block = _active_list.head()) != NULL) {
   601   while ((block = _active_list.head()) != NULL) {
   536     _active_list.unlink(*block);
   602     _active_list.unlink(*block);
   537     Block::delete_block(*block);
   603     Block::delete_block(*block);
   538   }
   604   }
   539   FREE_C_HEAP_ARRAY(char, _name);
   605   FREE_C_HEAP_ARRAY(char, _name);
   540 }
   606 }
   541 
   607 
   542 void OopStorage::delete_empty_blocks_safepoint(size_t retain) {
   608 void OopStorage::delete_empty_blocks_safepoint() {
   543   assert_at_safepoint();
   609   assert_at_safepoint();
       
   610   // Process any pending release updates, which may make more empty
       
   611   // blocks available for deletion.
       
   612   while (reduce_deferred_updates()) {}
   544   // Don't interfere with a concurrent iteration.
   613   // Don't interfere with a concurrent iteration.
   545   if (_concurrent_iteration_active) return;
   614   if (_concurrent_iteration_active) return;
   546   // Compute the number of blocks to remove, to minimize volatile accesses.
   615   // Delete empty (and otherwise deletable) blocks from end of _allocate_list.
   547   size_t empty_blocks = _empty_block_count;
   616   for (const Block* block = _allocate_list.ctail();
   548   if (retain < empty_blocks) {
   617        (block != NULL) && block->is_deletable();
   549     size_t remove_count = empty_blocks - retain;
   618        block = _allocate_list.ctail()) {
   550     // Update volatile counters once.
   619     _active_list.unlink(*block);
   551     _block_count -= remove_count;
   620     _allocate_list.unlink(*block);
   552     _empty_block_count -= remove_count;
   621     delete_empty_block(*block);
   553     do {
   622     --_block_count;
   554       const Block* block = _allocate_list.ctail();
   623   }
   555       assert(block != NULL, "invariant");
   624   // Update _active_head, in case current value was in deleted set.
   556       assert(block->is_empty(), "invariant");
   625   _active_head = _active_list.head();
   557       // Remove block from lists, and delete it.
   626 }
   558       _active_list.unlink(*block);
   627 
   559       _allocate_list.unlink(*block);
   628 void OopStorage::delete_empty_blocks_concurrent() {
   560       delete_empty_block(*block);
       
   561     } while (--remove_count > 0);
       
   562     // Update _active_head, in case current value was in deleted set.
       
   563     _active_head = _active_list.head();
       
   564   }
       
   565 }
       
   566 
       
   567 void OopStorage::delete_empty_blocks_concurrent(size_t retain) {
       
   568   MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   629   MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   569   // Other threads could be adding to the empty block count while we
   630   // Other threads could be adding to the empty block count while we
   570   // release the mutex across the block deletions.  Set an upper bound
   631   // release the mutex across the block deletions.  Set an upper bound
   571   // on how many blocks we'll try to release, so other threads can't
   632   // on how many blocks we'll try to release, so other threads can't
   572   // cause an unbounded stay in this function.
   633   // cause an unbounded stay in this function.
   573   if (_empty_block_count <= retain) return;
   634   size_t limit = _block_count;
   574   size_t limit = _empty_block_count - retain;
   635 
   575   for (size_t i = 0; (i < limit) && (retain < _empty_block_count); ++i) {
   636   for (size_t i = 0; i < limit; ++i) {
       
   637     // Additional updates might become available while we dropped the
       
   638     // lock.  But limit number processed to limit lock duration.
       
   639     reduce_deferred_updates();
       
   640 
   576     const Block* block = _allocate_list.ctail();
   641     const Block* block = _allocate_list.ctail();
   577     assert(block != NULL, "invariant");
   642     if ((block == NULL) || !block->is_deletable()) {
   578     assert(block->is_empty(), "invariant");
   643       // No block to delete, so done.  There could be more pending
       
   644       // deferred updates that could give us more work to do; deal with
       
   645       // that in some later call, to limit lock duration here.
       
   646       return;
       
   647     }
       
   648 
   579     {
   649     {
   580       MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
   650       MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
   581       // Don't interfere with a concurrent iteration.
   651       // Don't interfere with a concurrent iteration.
   582       if (_concurrent_iteration_active) return;
   652       if (_concurrent_iteration_active) return;
   583       // Remove block from _active_list, updating head if needed.
   653       // Remove block from _active_list, updating head if needed.
   587         _active_head = _active_list.head();
   657         _active_head = _active_list.head();
   588       }
   658       }
   589     }
   659     }
   590     // Remove block from _allocate_list and delete it.
   660     // Remove block from _allocate_list and delete it.
   591     _allocate_list.unlink(*block);
   661     _allocate_list.unlink(*block);
   592     --_empty_block_count;
       
   593     // Release mutex while deleting block.
   662     // Release mutex while deleting block.
   594     MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   663     MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
   595     delete_empty_block(*block);
   664     delete_empty_block(*block);
   596   }
   665   }
   597 }
   666 }
   598 
   667 
   599 OopStorage::EntryStatus
       
   600 OopStorage::allocation_status_validating_block(const Block* block,
       
   601                                                const oop* ptr) const {
       
   602   MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
       
   603   if ((block == NULL) || !is_valid_block_locked_or_safepoint(block)) {
       
   604     return INVALID_ENTRY;
       
   605   } else if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
       
   606     return ALLOCATED_ENTRY;
       
   607   } else {
       
   608     return UNALLOCATED_ENTRY;
       
   609   }
       
   610 }
       
   611 
       
   612 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
   668 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
   613   return allocation_status_validating_block(find_block_or_null(ptr), ptr);
   669   const Block* block = find_block_or_null(ptr);
       
   670   if (block != NULL) {
       
   671     // Verify block is a real block.  For now, simple linear search.
       
   672     // Do something more clever if this is a performance bottleneck.
       
   673     MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
       
   674     for (const Block* check_block = _active_list.chead();
       
   675          check_block != NULL;
       
   676          check_block = _active_list.next(*check_block)) {
       
   677       if (check_block == block) {
       
   678         if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
       
   679           return ALLOCATED_ENTRY;
       
   680         } else {
       
   681           return UNALLOCATED_ENTRY;
       
   682         }
       
   683       }
       
   684     }
       
   685   }
       
   686   return INVALID_ENTRY;
   614 }
   687 }
   615 
   688 
   616 size_t OopStorage::allocation_count() const {
   689 size_t OopStorage::allocation_count() const {
   617   return _allocation_count;
   690   return _allocation_count;
   618 }
   691 }
   619 
   692 
   620 size_t OopStorage::block_count() const {
   693 size_t OopStorage::block_count() const {
   621   return _block_count;
   694   return _block_count;
   622 }
       
   623 
       
   624 size_t OopStorage::empty_block_count() const {
       
   625   return _empty_block_count;
       
   626 }
   695 }
   627 
   696 
   628 size_t OopStorage::total_memory_usage() const {
   697 size_t OopStorage::total_memory_usage() const {
   629   size_t total_size = sizeof(OopStorage);
   698   size_t total_size = sizeof(OopStorage);
   630   total_size += strlen(name()) + 1;
   699   total_size += strlen(name()) + 1;
   688 #ifndef PRODUCT
   757 #ifndef PRODUCT
   689 
   758 
   690 void OopStorage::print_on(outputStream* st) const {
   759 void OopStorage::print_on(outputStream* st) const {
   691   size_t allocations = _allocation_count;
   760   size_t allocations = _allocation_count;
   692   size_t blocks = _block_count;
   761   size_t blocks = _block_count;
   693   size_t empties = _empty_block_count;
       
   694   // Comparison is being careful about racy accesses.
       
   695   size_t used = (blocks < empties) ? 0 : (blocks - empties);
       
   696 
   762 
   697   double data_size = section_size * section_count;
   763   double data_size = section_size * section_count;
   698   double alloc_percentage = percent_of((double)allocations, used * data_size);
   764   double alloc_percentage = percent_of((double)allocations, blocks * data_size);
   699 
   765 
   700   st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), "
   766   st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes",
   701             SIZE_FORMAT " empties, " SIZE_FORMAT " bytes",
   767             name(), allocations, blocks, alloc_percentage, total_memory_usage());
   702             name(), allocations, used, alloc_percentage,
       
   703             empties, total_memory_usage());
       
   704   if (_concurrent_iteration_active) {
   768   if (_concurrent_iteration_active) {
   705     st->print(", concurrent iteration active");
   769     st->print(", concurrent iteration active");
   706   }
   770   }
   707 }
   771 }
   708 
   772