305 uintx allocated = allocated_bitmask(); |
305 uintx allocated = allocated_bitmask(); |
306 while (true) { |
306 while (true) { |
307 assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); |
307 assert(!is_full_bitmask(allocated), "attempt to allocate from full block"); |
308 unsigned index = count_trailing_zeros(~allocated); |
308 unsigned index = count_trailing_zeros(~allocated); |
309 uintx new_value = allocated | bitmask_for_index(index); |
309 uintx new_value = allocated | bitmask_for_index(index); |
310 uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated); |
310 uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value); |
311 if (fetched == allocated) { |
311 if (fetched == allocated) { |
312 return get_pointer(index); // CAS succeeded; return entry for index. |
312 return get_pointer(index); // CAS succeeded; return entry for index. |
313 } |
313 } |
314 allocated = fetched; // CAS failed; retry with latest value. |
314 allocated = fetched; // CAS failed; retry with latest value. |
315 } |
315 } |
593 // Atomically update allocated bitmask. |
593 // Atomically update allocated bitmask. |
594 uintx old_allocated = _allocated_bitmask; |
594 uintx old_allocated = _allocated_bitmask; |
595 while (true) { |
595 while (true) { |
596 assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); |
596 assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); |
597 uintx new_value = old_allocated ^ releasing; |
597 uintx new_value = old_allocated ^ releasing; |
598 uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated); |
598 uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value); |
599 if (fetched == old_allocated) break; // Successful update. |
599 if (fetched == old_allocated) break; // Successful update. |
600 old_allocated = fetched; // Retry with updated bitmask. |
600 old_allocated = fetched; // Retry with updated bitmask. |
601 } |
601 } |
602 |
602 |
603 // Now that the bitmask has been updated, if we have a state transition |
603 // Now that the bitmask has been updated, if we have a state transition |
612 // Attempt to claim responsibility for adding this block to the deferred |
612 // Attempt to claim responsibility for adding this block to the deferred |
613 // list, by setting the link to non-NULL by self-looping. If this fails, |
613 // list, by setting the link to non-NULL by self-looping. If this fails, |
614 // then someone else has made such a claim and the deferred update has not |
614 // then someone else has made such a claim and the deferred update has not |
615 // yet been processed and will include our change, so we don't need to do |
615 // yet been processed and will include our change, so we don't need to do |
616 // anything further. |
616 // anything further. |
617 if (Atomic::replace_if_null(this, &_deferred_updates_next)) { |
617 if (Atomic::replace_if_null(&_deferred_updates_next, this)) { |
618 // Successfully claimed. Push, with self-loop for end-of-list. |
618 // Successfully claimed. Push, with self-loop for end-of-list. |
619 Block* head = owner->_deferred_updates; |
619 Block* head = owner->_deferred_updates; |
620 while (true) { |
620 while (true) { |
621 _deferred_updates_next = (head == NULL) ? this : head; |
621 _deferred_updates_next = (head == NULL) ? this : head; |
622 Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head); |
622 Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this); |
623 if (fetched == head) break; // Successful update. |
623 if (fetched == head) break; // Successful update. |
624 head = fetched; // Retry with updated head. |
624 head = fetched; // Retry with updated head. |
625 } |
625 } |
626 // Only request cleanup for to-empty transitions, not for from-full. |
626 // Only request cleanup for to-empty transitions, not for from-full. |
627 // There isn't any rush to process from-full transitions. Allocation |
627 // There isn't any rush to process from-full transitions. Allocation |
649 while (true) { |
649 while (true) { |
650 if (block == NULL) return false; |
650 if (block == NULL) return false; |
651 // Try atomic pop of block from list. |
651 // Try atomic pop of block from list. |
652 Block* tail = block->deferred_updates_next(); |
652 Block* tail = block->deferred_updates_next(); |
653 if (block == tail) tail = NULL; // Handle self-loop end marker. |
653 if (block == tail) tail = NULL; // Handle self-loop end marker. |
654 Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block); |
654 Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail); |
655 if (fetched == block) break; // Update successful. |
655 if (fetched == block) break; // Update successful. |
656 block = fetched; // Retry with updated block. |
656 block = fetched; // Retry with updated block. |
657 } |
657 } |
658 block->set_deferred_updates_next(NULL); // Clear tail after updating head. |
658 block->set_deferred_updates_next(NULL); // Clear tail after updating head. |
659 // Ensure bitmask read after pop is complete, including clearing tail, for |
659 // Ensure bitmask read after pop is complete, including clearing tail, for |
823 os::javaTimeNanos() + cleanup_trigger_defer_period; |
823 os::javaTimeNanos() + cleanup_trigger_defer_period; |
824 needs_cleanup_triggered = false; |
824 needs_cleanup_triggered = false; |
825 // Set the request flag false and return its old value. |
825 // Set the request flag false and return its old value. |
826 // Needs to be atomic to avoid dropping a concurrent request. |
826 // Needs to be atomic to avoid dropping a concurrent request. |
827 // Can't use Atomic::xchg, which may not support bool. |
827 // Can't use Atomic::xchg, which may not support bool. |
828 return Atomic::cmpxchg(false, &needs_cleanup_requested, true); |
828 return Atomic::cmpxchg(&needs_cleanup_requested, true, false); |
829 } |
829 } |
830 |
830 |
831 // Record that cleanup is needed, without notifying the Service thread. |
831 // Record that cleanup is needed, without notifying the Service thread. |
832 // Used by release(), where we can't lock even Service_lock. |
832 // Used by release(), where we can't lock even Service_lock. |
833 void OopStorage::record_needs_cleanup() { |
833 void OopStorage::record_needs_cleanup() { |