252 |
253 |
253 uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const { |
254 uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const { |
254 return bitmask_for_index(get_index(ptr)); |
255 return bitmask_for_index(get_index(ptr)); |
255 } |
256 } |
256 |
257 |
257 // A block is deletable if |
258 // An empty block is not yet deletable if either: |
258 // (1) It is empty. |
259 // (1) There is a release() operation currently operating on it. |
259 // (2) There is not a release() operation currently operating on it. |
260 // (2) It is in the deferred updates list. |
260 // (3) It is not in the deferred updates list. |
261 // For interaction with release(), these must follow the empty check, |
261 // The order of tests is important for proper interaction between release() |
262 // and the order of these checks is important. |
262 // and concurrent deletion. |
263 bool OopStorage::Block::is_safe_to_delete() const { |
263 bool OopStorage::Block::is_deletable() const { |
264 assert(is_empty(), "precondition"); |
264 return (OrderAccess::load_acquire(&_allocated_bitmask) == 0) && |
265 OrderAccess::loadload(); |
265 (OrderAccess::load_acquire(&_release_refcount) == 0) && |
266 return (OrderAccess::load_acquire(&_release_refcount) == 0) && |
266 (OrderAccess::load_acquire(&_deferred_updates_next) == NULL); |
267 (OrderAccess::load_acquire(&_deferred_updates_next) == NULL); |
267 } |
268 } |
268 |
269 |
269 OopStorage::Block* OopStorage::Block::deferred_updates_next() const { |
270 OopStorage::Block* OopStorage::Block::deferred_updates_next() const { |
270 return _deferred_updates_next; |
271 return _deferred_updates_next; |
384 // and updates that block's _allocated_bitmask to indicate the entry is in |
385 // and updates that block's _allocated_bitmask to indicate the entry is in |
385 // use. If this makes the block full (all entries in use), the block is |
386 // use. If this makes the block full (all entries in use), the block is |
386 // removed from the _allocation_list so it won't be considered by future |
387 // removed from the _allocation_list so it won't be considered by future |
387 // allocations until some entries in it are released. |
388 // allocations until some entries in it are released. |
388 // |
389 // |
389 // release() is performed lock-free. release() first looks up the block for |
390 // release() is performed lock-free. (Note: This means it can't notify the |
|
391 // service thread of pending cleanup work. It must be lock-free because |
|
392 // it is called in all kinds of contexts where even quite low ranked locks |
|
393 // may be held.) release() first looks up the block for |
390 // the entry, using address alignment to find the enclosing block (thereby |
394 // the entry, using address alignment to find the enclosing block (thereby |
391 // avoiding iteration over the _active_array). Once the block has been |
395 // avoiding iteration over the _active_array). Once the block has been |
392 // determined, its _allocated_bitmask needs to be updated, and its position in |
396 // determined, its _allocated_bitmask needs to be updated, and its position in |
393 // the _allocation_list may need to be updated. There are two cases: |
397 // the _allocation_list may need to be updated. There are two cases: |
394 // |
398 // |
398 // |
402 // |
399 // (b) Otherwise, the _allocation_list also needs to be modified. This requires |
403 // (b) Otherwise, the _allocation_list also needs to be modified. This requires |
400 // locking the _allocation_mutex. To keep the release() operation lock-free, |
404 // locking the _allocation_mutex. To keep the release() operation lock-free, |
401 // rather than updating the _allocation_list itself, it instead performs a |
405 // rather than updating the _allocation_list itself, it instead performs a |
402 // lock-free push of the block onto the _deferred_updates list. Entries on |
406 // lock-free push of the block onto the _deferred_updates list. Entries on |
403 // that list are processed by allocate() and delete_empty_blocks_XXX(), while |
407 // that list are processed by allocate() and delete_empty_blocks(), while |
404 // they already hold the necessary lock. That processing makes the block's |
408 // they already hold the necessary lock. That processing makes the block's |
405 // list state consistent with its current _allocated_bitmask. The block is |
409 // list state consistent with its current _allocated_bitmask. The block is |
406 // added to the _allocation_list if not already present and the bitmask is not |
410 // added to the _allocation_list if not already present and the bitmask is not |
407 // full. The block is moved to the end of the _allocation_list if the bitmask |
411 // full. The block is moved to the end of the _allocation_list if the bitmask |
408 // is empty, for ease of empty block deletion processing. |
412 // is empty, for ease of empty block deletion processing. |
409 |
413 |
410 oop* OopStorage::allocate() { |
414 oop* OopStorage::allocate() { |
411 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
415 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
412 // Do some deferred update processing every time we allocate. |
416 |
413 // Continue processing deferred updates if _allocation_list is empty, |
417 // Note: Without this we might never perform cleanup. As it is, |
414 // in the hope that we'll get a block from that, rather than |
418 // cleanup is only requested here, when completing a concurrent |
415 // allocating a new block. |
419 // iteration, or when someone entirely else wakes up the service |
416 while (reduce_deferred_updates() && (_allocation_list.head() == NULL)) {} |
420 // thread, which isn't ideal. But we can't notify in release(). |
417 |
421 if (reduce_deferred_updates()) { |
418 // Use the first block in _allocation_list for the allocation. |
422 notify_needs_cleanup(); |
419 Block* block = _allocation_list.head(); |
423 } |
420 if (block == NULL) { |
424 |
421 // No available blocks; make a new one, and add to storage. |
425 Block* block = block_for_allocation(); |
422 { |
426 if (block == NULL) return NULL; // Block allocation failed. |
423 MutexUnlockerEx mul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
|
424 block = Block::new_block(this); |
|
425 } |
|
426 if (block == NULL) { |
|
427 while (_allocation_list.head() == NULL) { |
|
428 if (!reduce_deferred_updates()) { |
|
429 // Failed to make new block, no other thread made a block |
|
430 // available while the mutex was released, and didn't get |
|
431 // one from a deferred update either, so return failure. |
|
432 log_debug(oopstorage, blocks)("%s: failed block allocation", name()); |
|
433 return NULL; |
|
434 } |
|
435 } |
|
436 } else { |
|
437 // Add new block to storage. |
|
438 log_debug(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block)); |
|
439 |
|
440 // Add new block to the _active_array, growing if needed. |
|
441 if (!_active_array->push(block)) { |
|
442 if (expand_active_array()) { |
|
443 guarantee(_active_array->push(block), "push failed after expansion"); |
|
444 } else { |
|
445 log_debug(oopstorage, blocks)("%s: failed active array expand", name()); |
|
446 Block::delete_block(*block); |
|
447 return NULL; |
|
448 } |
|
449 } |
|
450 // Add to end of _allocation_list. The mutex release allowed |
|
451 // other threads to add blocks to the _allocation_list. We prefer |
|
452 // to allocate from non-empty blocks, to allow empty blocks to |
|
453 // be deleted. |
|
454 _allocation_list.push_back(*block); |
|
455 } |
|
456 block = _allocation_list.head(); |
|
457 } |
|
458 // Allocate from first block. |
|
459 assert(block != NULL, "invariant"); |
|
460 assert(!block->is_full(), "invariant"); |
427 assert(!block->is_full(), "invariant"); |
461 if (block->is_empty()) { |
428 if (block->is_empty()) { |
462 // Transitioning from empty to not empty. |
429 // Transitioning from empty to not empty. |
463 log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); |
430 log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); |
464 } |
431 } |
472 log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); |
439 log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); |
473 _allocation_list.unlink(*block); |
440 _allocation_list.unlink(*block); |
474 } |
441 } |
475 log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); |
442 log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); |
476 return result; |
443 return result; |
|
444 } |
|
445 |
|
446 bool OopStorage::try_add_block() { |
|
447 assert_lock_strong(_allocation_mutex); |
|
448 Block* block; |
|
449 { |
|
450 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
|
451 block = Block::new_block(this); |
|
452 } |
|
453 if (block == NULL) return false; |
|
454 |
|
455 // Add new block to the _active_array, growing if needed. |
|
456 if (!_active_array->push(block)) { |
|
457 if (expand_active_array()) { |
|
458 guarantee(_active_array->push(block), "push failed after expansion"); |
|
459 } else { |
|
460 log_debug(oopstorage, blocks)("%s: failed active array expand", name()); |
|
461 Block::delete_block(*block); |
|
462 return false; |
|
463 } |
|
464 } |
|
465 // Add to end of _allocation_list. The mutex release allowed other |
|
466 // threads to add blocks to the _allocation_list. We prefer to |
|
467 // allocate from non-empty blocks, to allow empty blocks to be |
|
468 // deleted. But we don't bother notifying about the empty block |
|
469 // because we're (probably) about to allocate an entry from it. |
|
470 _allocation_list.push_back(*block); |
|
471 log_debug(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block)); |
|
472 return true; |
|
473 } |
|
474 |
|
475 OopStorage::Block* OopStorage::block_for_allocation() { |
|
476 assert_lock_strong(_allocation_mutex); |
|
477 |
|
478 while (true) { |
|
479 // Use the first block in _allocation_list for the allocation. |
|
480 Block* block = _allocation_list.head(); |
|
481 if (block != NULL) { |
|
482 return block; |
|
483 } else if (reduce_deferred_updates()) { |
|
484 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
|
485 notify_needs_cleanup(); |
|
486 } else if (try_add_block()) { |
|
487 block = _allocation_list.head(); |
|
488 assert(block != NULL, "invariant"); |
|
489 return block; |
|
490 } else if (reduce_deferred_updates()) { // Once more before failure. |
|
491 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
|
492 notify_needs_cleanup(); |
|
493 } else { |
|
494 // Attempt to add a block failed, no other thread added a block, |
|
495 // and no deferred updated added a block, then allocation failed. |
|
496 log_debug(oopstorage, blocks)("%s: failed block allocation", name()); |
|
497 return NULL; |
|
498 } |
|
499 } |
477 } |
500 } |
478 |
501 |
479 // Create a new, larger, active array with the same content as the |
502 // Create a new, larger, active array with the same content as the |
480 // current array, and then replace, relinquishing the old array. |
503 // current array, and then replace, relinquishing the old array. |
481 // Return true if the array was successfully expanded, false to |
504 // Return true if the array was successfully expanded, false to |
589 |
612 |
590 // Now that the bitmask has been updated, if we have a state transition |
613 // Now that the bitmask has been updated, if we have a state transition |
591 // (updated bitmask is empty or old bitmask was full), atomically push |
614 // (updated bitmask is empty or old bitmask was full), atomically push |
592 // this block onto the deferred updates list. Some future call to |
615 // this block onto the deferred updates list. Some future call to |
593 // reduce_deferred_updates will make any needed changes related to this |
616 // reduce_deferred_updates will make any needed changes related to this |
594 // block and _allocation_list. This deferral avoids list updates and the |
617 // block and _allocation_list. This deferral avoids _allocation_list |
595 // associated locking here. |
618 // updates and the associated locking here. |
596 if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { |
619 if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { |
597 // Log transitions. Both transitions are possible in a single update. |
620 // Log transitions. Both transitions are possible in a single update. |
598 if (log_is_enabled(Debug, oopstorage, blocks)) { |
621 if (log_is_enabled(Debug, oopstorage, blocks)) { |
599 log_release_transitions(releasing, old_allocated, _owner, this); |
622 log_release_transitions(releasing, old_allocated, _owner, this); |
600 } |
623 } |
603 // then someone else has made such a claim and the deferred update has not |
626 // then someone else has made such a claim and the deferred update has not |
604 // yet been processed and will include our change, so we don't need to do |
627 // yet been processed and will include our change, so we don't need to do |
605 // anything further. |
628 // anything further. |
606 if (Atomic::replace_if_null(this, &_deferred_updates_next)) { |
629 if (Atomic::replace_if_null(this, &_deferred_updates_next)) { |
607 // Successfully claimed. Push, with self-loop for end-of-list. |
630 // Successfully claimed. Push, with self-loop for end-of-list. |
608 Block* head = *deferred_list; |
631 Block* head = owner->_deferred_updates; |
609 while (true) { |
632 while (true) { |
610 _deferred_updates_next = (head == NULL) ? this : head; |
633 _deferred_updates_next = (head == NULL) ? this : head; |
611 Block* fetched = Atomic::cmpxchg(this, deferred_list, head); |
634 Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); |
612 if (fetched == head) break; // Successful update. |
635 if (fetched == head) break; // Successful update. |
613 head = fetched; // Retry with updated head. |
636 head = fetched; // Retry with updated head. |
614 } |
637 } |
|
638 owner->record_needs_cleanup(); |
615 log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT, |
639 log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT, |
616 _owner->name(), p2i(this)); |
640 _owner->name(), p2i(this)); |
617 } |
641 } |
618 } |
642 } |
619 // Release hold on empty block deletion. |
643 // Release hold on empty block deletion. |
620 Atomic::dec(&_release_refcount); |
644 Atomic::dec(&_release_refcount); |
621 } |
645 } |
622 |
646 |
623 // Process one available deferred update. Returns true if one was processed. |
647 // Process one available deferred update. Returns true if one was processed. |
624 bool OopStorage::reduce_deferred_updates() { |
648 bool OopStorage::reduce_deferred_updates() { |
625 assert_locked_or_safepoint(_allocation_mutex); |
649 assert_lock_strong(_allocation_mutex); |
626 // Atomically pop a block off the list, if any available. |
650 // Atomically pop a block off the list, if any available. |
627 // No ABA issue because this is only called by one thread at a time. |
651 // No ABA issue because this is only called by one thread at a time. |
628 // The atomicity is wrto pushes by release(). |
652 // The atomicity is wrto pushes by release(). |
629 Block* block = OrderAccess::load_acquire(&_deferred_updates); |
653 Block* block = OrderAccess::load_acquire(&_deferred_updates); |
630 while (true) { |
654 while (true) { |
639 block->set_deferred_updates_next(NULL); // Clear tail after updating head. |
663 block->set_deferred_updates_next(NULL); // Clear tail after updating head. |
640 // Ensure bitmask read after pop is complete, including clearing tail, for |
664 // Ensure bitmask read after pop is complete, including clearing tail, for |
641 // ordering with release(). Without this, we may be processing a stale |
665 // ordering with release(). Without this, we may be processing a stale |
642 // bitmask state here while blocking a release() operation from recording |
666 // bitmask state here while blocking a release() operation from recording |
643 // the deferred update needed for its bitmask change. |
667 // the deferred update needed for its bitmask change. |
644 OrderAccess::storeload(); |
668 OrderAccess::fence(); |
645 // Process popped block. |
669 // Process popped block. |
646 uintx allocated = block->allocated_bitmask(); |
670 uintx allocated = block->allocated_bitmask(); |
647 |
671 |
648 // Make membership in list consistent with bitmask state. |
672 // Make membership in list consistent with bitmask state. |
649 if ((_allocation_list.ctail() != NULL) && |
673 if ((_allocation_list.ctail() != NULL) && |
702 "Duplicate entry: " PTR_FORMAT, p2i(entry)); |
727 "Duplicate entry: " PTR_FORMAT, p2i(entry)); |
703 releasing |= entry_bitmask; |
728 releasing |= entry_bitmask; |
704 ++count; |
729 ++count; |
705 } |
730 } |
706 // Release the contiguous entries that are in block. |
731 // Release the contiguous entries that are in block. |
707 block->release_entries(releasing, &_deferred_updates); |
732 block->release_entries(releasing, this); |
708 Atomic::sub(count, &_allocation_count); |
733 Atomic::sub(count, &_allocation_count); |
709 } |
734 } |
710 } |
735 } |
711 |
736 |
712 const char* dup_name(const char* name) { |
737 const char* dup_name(const char* name) { |
713 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC); |
738 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC); |
714 strcpy(dup, name); |
739 strcpy(dup, name); |
715 return dup; |
740 return dup; |
716 } |
741 } |
|
742 |
|
743 // Possible values for OopStorage::_needs_cleanup. |
|
744 const uint needs_cleanup_none = 0; // No cleanup needed. |
|
745 const uint needs_cleanup_marked = 1; // Requested, but no notification made. |
|
746 const uint needs_cleanup_notified = 2; // Requested and Service thread notified. |
717 |
747 |
718 const size_t initial_active_array_size = 8; |
748 const size_t initial_active_array_size = 8; |
719 |
749 |
720 OopStorage::OopStorage(const char* name, |
750 OopStorage::OopStorage(const char* name, |
721 Mutex* allocation_mutex, |
751 Mutex* allocation_mutex, |
725 _allocation_list(), |
755 _allocation_list(), |
726 _deferred_updates(NULL), |
756 _deferred_updates(NULL), |
727 _allocation_mutex(allocation_mutex), |
757 _allocation_mutex(allocation_mutex), |
728 _active_mutex(active_mutex), |
758 _active_mutex(active_mutex), |
729 _allocation_count(0), |
759 _allocation_count(0), |
730 _concurrent_iteration_count(0) |
760 _concurrent_iteration_count(0), |
|
761 _needs_cleanup(needs_cleanup_none) |
731 { |
762 { |
732 _active_array->increment_refcount(); |
763 _active_array->increment_refcount(); |
733 assert(_active_mutex->rank() < _allocation_mutex->rank(), |
764 assert(_active_mutex->rank() < _allocation_mutex->rank(), |
734 "%s: active_mutex must have lower rank than allocation_mutex", _name); |
765 "%s: active_mutex must have lower rank than allocation_mutex", _name); |
|
766 assert(Service_lock->rank() < _active_mutex->rank(), |
|
767 "%s: active_mutex must have higher rank than Service_lock", _name); |
735 assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, |
768 assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, |
736 "%s: active mutex requires safepoint check", _name); |
769 "%s: active mutex requires safepoint check", _name); |
737 assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, |
770 assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, |
738 "%s: allocation mutex requires safepoint check", _name); |
771 "%s: allocation mutex requires safepoint check", _name); |
739 } |
772 } |
761 } |
794 } |
762 ActiveArray::destroy(_active_array); |
795 ActiveArray::destroy(_active_array); |
763 FREE_C_HEAP_ARRAY(char, _name); |
796 FREE_C_HEAP_ARRAY(char, _name); |
764 } |
797 } |
765 |
798 |
766 void OopStorage::delete_empty_blocks_safepoint() { |
799 // Called by service thread to check for pending work. |
767 assert_at_safepoint(); |
800 bool OopStorage::needs_delete_empty_blocks() const { |
768 // Process any pending release updates, which may make more empty |
801 return Atomic::load(&_needs_cleanup) != needs_cleanup_none; |
769 // blocks available for deletion. |
802 } |
770 while (reduce_deferred_updates()) {} |
803 |
771 // Don't interfere with a concurrent iteration. |
804 // Record that cleanup is needed, without notifying the Service thread. |
772 if (_concurrent_iteration_count > 0) return; |
805 // Used by release(), where we can't lock even Service_lock. |
773 // Delete empty (and otherwise deletable) blocks from end of _allocation_list. |
806 void OopStorage::record_needs_cleanup() { |
774 for (Block* block = _allocation_list.tail(); |
807 Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none); |
775 (block != NULL) && block->is_deletable(); |
808 } |
776 block = _allocation_list.tail()) { |
809 |
777 _active_array->remove(block); |
810 // Record that cleanup is needed, and notify the Service thread. |
778 _allocation_list.unlink(*block); |
811 void OopStorage::notify_needs_cleanup() { |
779 delete_empty_block(*block); |
812 // Avoid re-notification if already notified. |
780 } |
813 const uint notified = needs_cleanup_notified; |
781 } |
814 if (Atomic::xchg(notified, &_needs_cleanup) != notified) { |
782 |
815 MonitorLockerEx ml(Service_lock, Monitor::_no_safepoint_check_flag); |
783 void OopStorage::delete_empty_blocks_concurrent() { |
816 ml.notify_all(); |
|
817 } |
|
818 } |
|
819 |
|
820 bool OopStorage::delete_empty_blocks() { |
784 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
821 MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
785 // Other threads could be adding to the empty block count while we |
822 |
786 // release the mutex across the block deletions. Set an upper bound |
823 // Clear the request before processing. |
787 // on how many blocks we'll try to release, so other threads can't |
824 Atomic::store(needs_cleanup_none, &_needs_cleanup); |
788 // cause an unbounded stay in this function. |
825 OrderAccess::fence(); |
|
826 |
|
827 // Other threads could be adding to the empty block count or the |
|
828 // deferred update list while we're working. Set an upper bound on |
|
829 // how many updates we'll process and blocks we'll try to release, |
|
830 // so other threads can't cause an unbounded stay in this function. |
789 size_t limit = block_count(); |
831 size_t limit = block_count(); |
|
832 if (limit == 0) return false; // Empty storage; nothing at all to do. |
790 |
833 |
791 for (size_t i = 0; i < limit; ++i) { |
834 for (size_t i = 0; i < limit; ++i) { |
792 // Additional updates might become available while we dropped the |
835 // Process deferred updates, which might make empty blocks available. |
793 // lock. But limit number processed to limit lock duration. |
836 // Continue checking once deletion starts, since additional updates |
794 reduce_deferred_updates(); |
837 // might become available while we're working. |
795 |
838 if (reduce_deferred_updates()) { |
796 Block* block = _allocation_list.tail(); |
839 // Be safepoint-polite while looping. |
797 if ((block == NULL) || !block->is_deletable()) { |
840 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
798 // No block to delete, so done. There could be more pending |
841 ThreadBlockInVM tbiv(JavaThread::current()); |
799 // deferred updates that could give us more work to do; deal with |
842 } else { |
800 // that in some later call, to limit lock duration here. |
843 Block* block = _allocation_list.tail(); |
801 return; |
844 if ((block == NULL) || !block->is_empty()) { |
|
845 return false; |
|
846 } else if (!block->is_safe_to_delete()) { |
|
847 // Look for other work while waiting for block to be deletable. |
|
848 break; |
|
849 } |
|
850 |
|
851 // Try to delete the block. First, try to remove from _active_array. |
|
852 { |
|
853 MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag); |
|
854 // Don't interfere with an active concurrent iteration. |
|
855 // Instead, give up immediately. There is more work to do, |
|
856 // but don't re-notify, to avoid useless spinning of the |
|
857 // service thread. Instead, iteration completion notifies. |
|
858 if (_concurrent_iteration_count > 0) return true; |
|
859 _active_array->remove(block); |
|
860 } |
|
861 // Remove block from _allocation_list and delete it. |
|
862 _allocation_list.unlink(*block); |
|
863 // Be safepoint-polite while deleting and looping. |
|
864 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
|
865 delete_empty_block(*block); |
|
866 ThreadBlockInVM tbiv(JavaThread::current()); |
802 } |
867 } |
803 |
868 } |
804 { |
869 // Exceeded work limit or can't delete last block. This will |
805 MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag); |
870 // cause the service thread to loop, giving other subtasks an |
806 // Don't interfere with a concurrent iteration. |
871 // opportunity to run too. There's no need for a notification, |
807 if (_concurrent_iteration_count > 0) return; |
872 // because we are part of the service thread (unless gtesting). |
808 _active_array->remove(block); |
873 record_needs_cleanup(); |
809 } |
874 return true; |
810 // Remove block from _allocation_list and delete it. |
|
811 _allocation_list.unlink(*block); |
|
812 // Release mutex while deleting block. |
|
813 MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
|
814 delete_empty_block(*block); |
|
815 } |
|
816 } |
875 } |
817 |
876 |
818 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const { |
877 OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const { |
819 const Block* block = find_block_or_null(ptr); |
878 const Block* block = find_block_or_null(ptr); |
820 if (block != NULL) { |
879 if (block != NULL) { |