412 // is empty, for ease of empty block deletion processing. |
411 // is empty, for ease of empty block deletion processing. |
413 |
412 |
414 oop* OopStorage::allocate() { |
413 oop* OopStorage::allocate() { |
415 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
414 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
416 |
415 |
417 // Note: Without this we might never perform cleanup. As it is, |
|
418 // cleanup is only requested here, when completing a concurrent |
|
419 // iteration, or when someone entirely else wakes up the service |
|
420 // thread, which isn't ideal. But we can't notify in release(). |
|
421 if (reduce_deferred_updates()) { |
|
422 notify_needs_cleanup(); |
|
423 } |
|
424 |
|
425 Block* block = block_for_allocation(); |
416 Block* block = block_for_allocation(); |
426 if (block == NULL) return NULL; // Block allocation failed. |
417 if (block == NULL) return NULL; // Block allocation failed. |
427 assert(!block->is_full(), "invariant"); |
418 assert(!block->is_full(), "invariant"); |
428 if (block->is_empty()) { |
419 if (block->is_empty()) { |
429 // Transitioning from empty to not empty. |
420 // Transitioning from empty to not empty. |
430 log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); |
421 log_trace(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block)); |
431 } |
422 } |
432 oop* result = block->allocate(); |
423 oop* result = block->allocate(); |
433 assert(result != NULL, "allocation failed"); |
424 assert(result != NULL, "allocation failed"); |
434 assert(!block->is_empty(), "postcondition"); |
425 assert(!block->is_empty(), "postcondition"); |
435 Atomic::inc(&_allocation_count); // release updates outside lock. |
426 Atomic::inc(&_allocation_count); // release updates outside lock. |
436 if (block->is_full()) { |
427 if (block->is_full()) { |
437 // Transitioning from not full to full. |
428 // Transitioning from not full to full. |
438 // Remove full blocks from consideration by future allocates. |
429 // Remove full blocks from consideration by future allocates. |
439 log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); |
430 log_trace(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); |
440 _allocation_list.unlink(*block); |
431 _allocation_list.unlink(*block); |
441 } |
432 } |
442 log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); |
433 log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); |
443 return result; |
434 return result; |
444 } |
435 } |
472 return true; |
463 return true; |
473 } |
464 } |
474 |
465 |
475 OopStorage::Block* OopStorage::block_for_allocation() { |
466 OopStorage::Block* OopStorage::block_for_allocation() { |
476 assert_lock_strong(_allocation_mutex); |
467 assert_lock_strong(_allocation_mutex); |
477 |
|
478 while (true) { |
468 while (true) { |
479 // Use the first block in _allocation_list for the allocation. |
469 // Use the first block in _allocation_list for the allocation. |
480 Block* block = _allocation_list.head(); |
470 Block* block = _allocation_list.head(); |
481 if (block != NULL) { |
471 if (block != NULL) { |
482 return block; |
472 return block; |
483 } else if (reduce_deferred_updates()) { |
473 } else if (reduce_deferred_updates()) { |
484 MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
474 // Might have added a block to the _allocation_list, so retry. |
485 notify_needs_cleanup(); |
|
486 } else if (try_add_block()) { |
475 } else if (try_add_block()) { |
487 block = _allocation_list.head(); |
476 // Successfully added a new block to the list, so retry. |
488 assert(block != NULL, "invariant"); |
477 assert(_allocation_list.chead() != NULL, "invariant"); |
489 return block; |
478 } else if (_allocation_list.chead() != NULL) { |
490 } else if (reduce_deferred_updates()) { // Once more before failure. |
479 // Trying to add a block failed, but some other thread added to the |
491 MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
480 // list while we'd dropped the lock over the new block allocation. |
492 notify_needs_cleanup(); |
481 } else if (!reduce_deferred_updates()) { // Once more before failure. |
493 } else { |
|
494 // Attempt to add a block failed, no other thread added a block, |
482 // Attempt to add a block failed, no other thread added a block, |
495 // and no deferred updated added a block, then allocation failed. |
483 // and no deferred updated added a block, then allocation failed. |
496 log_debug(oopstorage, blocks)("%s: failed block allocation", name()); |
484 log_info(oopstorage, blocks)("%s: failed block allocation", name()); |
497 return NULL; |
485 return NULL; |
498 } |
486 } |
499 } |
487 } |
500 } |
488 } |
501 |
489 |
583 |
571 |
584 static void log_release_transitions(uintx releasing, |
572 static void log_release_transitions(uintx releasing, |
585 uintx old_allocated, |
573 uintx old_allocated, |
586 const OopStorage* owner, |
574 const OopStorage* owner, |
587 const void* block) { |
575 const void* block) { |
588 Log(oopstorage, blocks) log; |
576 LogTarget(Trace, oopstorage, blocks) lt; |
589 LogStream ls(log.debug()); |
577 if (lt.is_enabled()) { |
590 if (is_full_bitmask(old_allocated)) { |
578 LogStream ls(lt); |
591 ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block)); |
579 if (is_full_bitmask(old_allocated)) { |
592 } |
580 ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block)); |
593 if (releasing == old_allocated) { |
581 } |
594 ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block)); |
582 if (releasing == old_allocated) { |
|
583 ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block)); |
|
584 } |
595 } |
585 } |
596 } |
586 } |
597 |
587 |
598 void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) { |
588 void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) { |
599 assert(releasing != 0, "preconditon"); |
589 assert(releasing != 0, "preconditon"); |
616 // reduce_deferred_updates will make any needed changes related to this |
606 // reduce_deferred_updates will make any needed changes related to this |
617 // block and _allocation_list. This deferral avoids _allocation_list |
607 // block and _allocation_list. This deferral avoids _allocation_list |
618 // updates and the associated locking here. |
608 // updates and the associated locking here. |
619 if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { |
609 if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { |
620 // Log transitions. Both transitions are possible in a single update. |
610 // Log transitions. Both transitions are possible in a single update. |
621 if (log_is_enabled(Debug, oopstorage, blocks)) { |
611 log_release_transitions(releasing, old_allocated, owner, this); |
622 log_release_transitions(releasing, old_allocated, _owner, this); |
|
623 } |
|
624 // Attempt to claim responsibility for adding this block to the deferred |
612 // Attempt to claim responsibility for adding this block to the deferred |
625 // list, by setting the link to non-NULL by self-looping. If this fails, |
613 // list, by setting the link to non-NULL by self-looping. If this fails, |
626 // then someone else has made such a claim and the deferred update has not |
614 // then someone else has made such a claim and the deferred update has not |
627 // yet been processed and will include our change, so we don't need to do |
615 // yet been processed and will include our change, so we don't need to do |
628 // anything further. |
616 // anything further. |
633 _deferred_updates_next = (head == NULL) ? this : head; |
621 _deferred_updates_next = (head == NULL) ? this : head; |
634 Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); |
622 Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); |
635 if (fetched == head) break; // Successful update. |
623 if (fetched == head) break; // Successful update. |
636 head = fetched; // Retry with updated head. |
624 head = fetched; // Retry with updated head. |
637 } |
625 } |
638 owner->record_needs_cleanup(); |
626 // Only request cleanup for to-empty transitions, not for from-full. |
639 log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT, |
627 // There isn't any rush to process from-full transitions. Allocation |
640 _owner->name(), p2i(this)); |
628 // will reduce deferrals before allocating new blocks, so may process |
|
629 // some. And the service thread will drain the entire deferred list |
|
630 // if there are any pending to-empty transitions. |
|
631 if (releasing == old_allocated) { |
|
632 owner->record_needs_cleanup(); |
|
633 } |
|
634 log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT, |
|
635 owner->name(), p2i(this)); |
641 } |
636 } |
642 } |
637 } |
643 // Release hold on empty block deletion. |
638 // Release hold on empty block deletion. |
644 Atomic::dec(&_release_refcount); |
639 Atomic::dec(&_release_refcount); |
645 } |
640 } |
732 block->release_entries(releasing, this); |
726 block->release_entries(releasing, this); |
733 Atomic::sub(count, &_allocation_count); |
727 Atomic::sub(count, &_allocation_count); |
734 } |
728 } |
735 } |
729 } |
736 |
730 |
737 const char* dup_name(const char* name) { |
|
738 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC); |
|
739 strcpy(dup, name); |
|
740 return dup; |
|
741 } |
|
742 |
|
743 // Possible values for OopStorage::_needs_cleanup. |
|
744 const uint needs_cleanup_none = 0; // No cleanup needed. |
|
745 const uint needs_cleanup_marked = 1; // Requested, but no notification made. |
|
746 const uint needs_cleanup_notified = 2; // Requested and Service thread notified. |
|
747 |
|
748 const size_t initial_active_array_size = 8; |
731 const size_t initial_active_array_size = 8; |
749 |
732 |
750 OopStorage::OopStorage(const char* name, |
733 OopStorage::OopStorage(const char* name, |
751 Mutex* allocation_mutex, |
734 Mutex* allocation_mutex, |
752 Mutex* active_mutex) : |
735 Mutex* active_mutex) : |
753 _name(dup_name(name)), |
736 _name(os::strdup(name)), |
754 _active_array(ActiveArray::create(initial_active_array_size)), |
737 _active_array(ActiveArray::create(initial_active_array_size)), |
755 _allocation_list(), |
738 _allocation_list(), |
756 _deferred_updates(NULL), |
739 _deferred_updates(NULL), |
757 _allocation_mutex(allocation_mutex), |
740 _allocation_mutex(allocation_mutex), |
758 _active_mutex(active_mutex), |
741 _active_mutex(active_mutex), |
759 _allocation_count(0), |
742 _allocation_count(0), |
760 _concurrent_iteration_count(0), |
743 _concurrent_iteration_count(0), |
761 _needs_cleanup(needs_cleanup_none) |
744 _needs_cleanup(false) |
762 { |
745 { |
763 _active_array->increment_refcount(); |
746 _active_array->increment_refcount(); |
764 assert(_active_mutex->rank() < _allocation_mutex->rank(), |
747 assert(_active_mutex->rank() < _allocation_mutex->rank(), |
765 "%s: active_mutex must have lower rank than allocation_mutex", _name); |
748 "%s: active_mutex must have lower rank than allocation_mutex", _name); |
766 assert(Service_lock->rank() < _active_mutex->rank(), |
749 assert(Service_lock->rank() < _active_mutex->rank(), |
791 for (size_t i = _active_array->block_count(); 0 < i; ) { |
774 for (size_t i = _active_array->block_count(); 0 < i; ) { |
792 block = _active_array->at(--i); |
775 block = _active_array->at(--i); |
793 Block::delete_block(*block); |
776 Block::delete_block(*block); |
794 } |
777 } |
795 ActiveArray::destroy(_active_array); |
778 ActiveArray::destroy(_active_array); |
796 FREE_C_HEAP_ARRAY(char, _name); |
779 os::free(const_cast<char*>(_name)); |
797 } |
780 } |
798 |
781 |
799 // Called by service thread to check for pending work. |
782 // Managing service thread notifications. |
800 bool OopStorage::needs_delete_empty_blocks() const { |
783 // |
801 return Atomic::load(&_needs_cleanup) != needs_cleanup_none; |
784 // We don't want cleanup work to linger indefinitely, but we also don't want |
|
785 // to run the service thread too often. We're also very limited in what we |
|
786 // can do in a release operation, where cleanup work is created. |
|
787 // |
|
788 // When a release operation changes a block's state to empty, it records the |
|
789 // need for cleanup in both the associated storage object and in the global |
|
790 // request state. A safepoint cleanup task notifies the service thread when |
|
791 // there may be cleanup work for any storage object, based on the global |
|
792 // request state. But that notification is deferred if the service thread |
|
793 // has run recently, and we also avoid duplicate notifications. The service |
|
794 // thread updates the timestamp and resets the state flags on every iteration. |
|
795 |
|
796 // Global cleanup request state. |
|
797 static volatile bool needs_cleanup_requested = false; |
|
798 |
|
799 // Flag for avoiding duplicate notifications. |
|
800 static bool needs_cleanup_triggered = false; |
|
801 |
|
802 // Time after which a notification can be made. |
|
803 static jlong cleanup_trigger_permit_time = 0; |
|
804 |
|
805 // Minimum time since last service thread check before notification is |
|
806 // permitted. The value of 500ms was an arbitrary choice; frequent, but not |
|
807 // too frequent. |
|
808 const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC; |
|
809 |
|
810 void OopStorage::trigger_cleanup_if_needed() { |
|
811 MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag); |
|
812 if (Atomic::load(&needs_cleanup_requested) && |
|
813 !needs_cleanup_triggered && |
|
814 (os::javaTimeNanos() > cleanup_trigger_permit_time)) { |
|
815 needs_cleanup_triggered = true; |
|
816 ml.notify_all(); |
|
817 } |
|
818 } |
|
819 |
|
820 bool OopStorage::has_cleanup_work_and_reset() { |
|
821 assert_lock_strong(Service_lock); |
|
822 cleanup_trigger_permit_time = |
|
823 os::javaTimeNanos() + cleanup_trigger_defer_period; |
|
824 needs_cleanup_triggered = false; |
|
825 // Set the request flag false and return its old value. |
|
826 // Needs to be atomic to avoid dropping a concurrent request. |
|
827 // Can't use Atomic::xchg, which may not support bool. |
|
828 return Atomic::cmpxchg(false, &needs_cleanup_requested, true); |
802 } |
829 } |
803 |
830 |
804 // Record that cleanup is needed, without notifying the Service thread. |
831 // Record that cleanup is needed, without notifying the Service thread. |
805 // Used by release(), where we can't lock even Service_lock. |
832 // Used by release(), where we can't lock even Service_lock. |
806 void OopStorage::record_needs_cleanup() { |
833 void OopStorage::record_needs_cleanup() { |
807 Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none); |
834 // Set local flag first, else service thread could wake up and miss |
808 } |
835 // the request. This order may instead (rarely) unnecessarily notify. |
809 |
836 OrderAccess::release_store(&_needs_cleanup, true); |
810 // Record that cleanup is needed, and notify the Service thread. |
837 OrderAccess::release_store_fence(&needs_cleanup_requested, true); |
811 void OopStorage::notify_needs_cleanup() { |
|
812 // Avoid re-notification if already notified. |
|
813 const uint notified = needs_cleanup_notified; |
|
814 if (Atomic::xchg(notified, &_needs_cleanup) != notified) { |
|
815 MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag); |
|
816 ml.notify_all(); |
|
817 } |
|
818 } |
838 } |
819 |
839 |
820 bool OopStorage::delete_empty_blocks() { |
840 bool OopStorage::delete_empty_blocks() { |
|
841 // Service thread might have oopstorage work, but not for this object. |
|
842 // Check for deferred updates even though that's not a service thread |
|
843 // trigger; since we're here, we might as well process them. |
|
844 if (!OrderAccess::load_acquire(&_needs_cleanup) && |
|
845 (OrderAccess::load_acquire(&_deferred_updates) == NULL)) { |
|
846 return false; |
|
847 } |
|
848 |
821 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
849 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
822 |
850 |
823 // Clear the request before processing. |
851 // Clear the request before processing. |
824 Atomic::store(needs_cleanup_none, &_needs_cleanup); |
852 OrderAccess::release_store_fence(&_needs_cleanup, false); |
825 OrderAccess::fence(); |
|
826 |
853 |
827 // Other threads could be adding to the empty block count or the |
854 // Other threads could be adding to the empty block count or the |
828 // deferred update list while we're working. Set an upper bound on |
855 // deferred update list while we're working. Set an upper bound on |
829 // how many updates we'll process and blocks we'll try to release, |
856 // how many updates we'll process and blocks we'll try to release, |
830 // so other threads can't cause an unbounded stay in this function. |
857 // so other threads can't cause an unbounded stay in this function. |
831 size_t limit = block_count(); |
858 // We add a bit of slop because the reduce_deferred_updates clause |
832 if (limit == 0) return false; // Empty storage; nothing at all to do. |
859 // can cause blocks to be double counted. If there are few blocks |
|
860 // and many of them are deferred and empty, we might hit the limit |
|
861 // and spin the caller without doing very much work. Otherwise, |
|
862 // we don't normally hit the limit anyway, instead running out of |
|
863 // work to do. |
|
864 size_t limit = block_count() + 10; |
833 |
865 |
834 for (size_t i = 0; i < limit; ++i) { |
866 for (size_t i = 0; i < limit; ++i) { |
835 // Process deferred updates, which might make empty blocks available. |
867 // Process deferred updates, which might make empty blocks available. |
836 // Continue checking once deletion starts, since additional updates |
868 // Continue checking once deletion starts, since additional updates |
837 // might become available while we're working. |
869 // might become available while we're working. |