138 size_t OopStorage::ActiveArray::block_count() const { |
138 size_t OopStorage::ActiveArray::block_count() const { |
139 return _block_count; |
139 return _block_count; |
140 } |
140 } |
141 |
141 |
142 size_t OopStorage::ActiveArray::block_count_acquire() const { |
142 size_t OopStorage::ActiveArray::block_count_acquire() const { |
143 return OrderAccess::load_acquire(&_block_count); |
143 return Atomic::load_acquire(&_block_count); |
144 } |
144 } |
145 |
145 |
146 void OopStorage::ActiveArray::increment_refcount() const { |
146 void OopStorage::ActiveArray::increment_refcount() const { |
147 int new_value = Atomic::add(1, &_refcount); |
147 int new_value = Atomic::add(1, &_refcount); |
148 assert(new_value >= 1, "negative refcount %d", new_value - 1); |
148 assert(new_value >= 1, "negative refcount %d", new_value - 1); |
159 if (index < _size) { |
159 if (index < _size) { |
160 block->set_active_index(index); |
160 block->set_active_index(index); |
161 *block_ptr(index) = block; |
161 *block_ptr(index) = block; |
162 // Use a release_store to ensure all the setup is complete before |
162 // Use a release_store to ensure all the setup is complete before |
163 // making the block visible. |
163 // making the block visible. |
164 OrderAccess::release_store(&_block_count, index + 1); |
164 Atomic::release_store(&_block_count, index + 1); |
165 return true; |
165 return true; |
166 } else { |
166 } else { |
167 return false; |
167 return false; |
168 } |
168 } |
169 } |
169 } |
262 // For interaction with release(), these must follow the empty check, |
262 // For interaction with release(), these must follow the empty check, |
263 // and the order of these checks is important. |
263 // and the order of these checks is important. |
264 bool OopStorage::Block::is_safe_to_delete() const { |
264 bool OopStorage::Block::is_safe_to_delete() const { |
265 assert(is_empty(), "precondition"); |
265 assert(is_empty(), "precondition"); |
266 OrderAccess::loadload(); |
266 OrderAccess::loadload(); |
267 return (OrderAccess::load_acquire(&_release_refcount) == 0) && |
267 return (Atomic::load_acquire(&_release_refcount) == 0) && |
268 (OrderAccess::load_acquire(&_deferred_updates_next) == NULL); |
268 (Atomic::load_acquire(&_deferred_updates_next) == NULL); |
269 } |
269 } |
270 |
270 |
271 OopStorage::Block* OopStorage::Block::deferred_updates_next() const { |
271 OopStorage::Block* OopStorage::Block::deferred_updates_next() const { |
272 return _deferred_updates_next; |
272 return _deferred_updates_next; |
273 } |
273 } |
512 void OopStorage::replace_active_array(ActiveArray* new_array) { |
512 void OopStorage::replace_active_array(ActiveArray* new_array) { |
513 // Caller has the old array that is the current value of _active_array. |
513 // Caller has the old array that is the current value of _active_array. |
514 // Update new_array refcount to account for the new reference. |
514 // Update new_array refcount to account for the new reference. |
515 new_array->increment_refcount(); |
515 new_array->increment_refcount(); |
516 // Install new_array, ensuring its initialization is complete first. |
516 // Install new_array, ensuring its initialization is complete first. |
517 OrderAccess::release_store(&_active_array, new_array); |
517 Atomic::release_store(&_active_array, new_array); |
518 // Wait for any readers that could read the old array from _active_array. |
518 // Wait for any readers that could read the old array from _active_array. |
519 // Can't use GlobalCounter here, because this is called from allocate(), |
519 // Can't use GlobalCounter here, because this is called from allocate(), |
520 // which may be called in the scope of a GlobalCounter critical section |
520 // which may be called in the scope of a GlobalCounter critical section |
521 // when inserting a StringTable entry. |
521 // when inserting a StringTable entry. |
522 _protect_active.synchronize(); |
522 _protect_active.synchronize(); |
530 // even if an allocate operation expands and replaces the value of |
530 // even if an allocate operation expands and replaces the value of |
531 // _active_array. The caller must relinquish the array when done |
531 // _active_array. The caller must relinquish the array when done |
532 // using it. |
532 // using it. |
533 OopStorage::ActiveArray* OopStorage::obtain_active_array() const { |
533 OopStorage::ActiveArray* OopStorage::obtain_active_array() const { |
534 SingleWriterSynchronizer::CriticalSection cs(&_protect_active); |
534 SingleWriterSynchronizer::CriticalSection cs(&_protect_active); |
535 ActiveArray* result = OrderAccess::load_acquire(&_active_array); |
535 ActiveArray* result = Atomic::load_acquire(&_active_array); |
536 result->increment_refcount(); |
536 result->increment_refcount(); |
537 return result; |
537 return result; |
538 } |
538 } |
539 |
539 |
540 // Decrement refcount of array and destroy if refcount is zero. |
540 // Decrement refcount of array and destroy if refcount is zero. |
643 bool OopStorage::reduce_deferred_updates() { |
643 bool OopStorage::reduce_deferred_updates() { |
644 assert_lock_strong(_allocation_mutex); |
644 assert_lock_strong(_allocation_mutex); |
645 // Atomically pop a block off the list, if any available. |
645 // Atomically pop a block off the list, if any available. |
646 // No ABA issue because this is only called by one thread at a time. |
646 // No ABA issue because this is only called by one thread at a time. |
647 // The atomicity is wrto pushes by release(). |
647 // The atomicity is wrto pushes by release(). |
648 Block* block = OrderAccess::load_acquire(&_deferred_updates); |
648 Block* block = Atomic::load_acquire(&_deferred_updates); |
649 while (true) { |
649 while (true) { |
650 if (block == NULL) return false; |
650 if (block == NULL) return false; |
651 // Try atomic pop of block from list. |
651 // Try atomic pop of block from list. |
652 Block* tail = block->deferred_updates_next(); |
652 Block* tail = block->deferred_updates_next(); |
653 if (block == tail) tail = NULL; // Handle self-loop end marker. |
653 if (block == tail) tail = NULL; // Handle self-loop end marker. |
831 // Record that cleanup is needed, without notifying the Service thread. |
831 // Record that cleanup is needed, without notifying the Service thread. |
832 // Used by release(), where we can't lock even Service_lock. |
832 // Used by release(), where we can't lock even Service_lock. |
833 void OopStorage::record_needs_cleanup() { |
833 void OopStorage::record_needs_cleanup() { |
834 // Set local flag first, else service thread could wake up and miss |
834 // Set local flag first, else service thread could wake up and miss |
835 // the request. This order may instead (rarely) unnecessarily notify. |
835 // the request. This order may instead (rarely) unnecessarily notify. |
836 OrderAccess::release_store(&_needs_cleanup, true); |
836 Atomic::release_store(&_needs_cleanup, true); |
837 OrderAccess::release_store_fence(&needs_cleanup_requested, true); |
837 Atomic::release_store_fence(&needs_cleanup_requested, true); |
838 } |
838 } |
839 |
839 |
840 bool OopStorage::delete_empty_blocks() { |
840 bool OopStorage::delete_empty_blocks() { |
841 // Service thread might have oopstorage work, but not for this object. |
841 // Service thread might have oopstorage work, but not for this object. |
842 // Check for deferred updates even though that's not a service thread |
842 // Check for deferred updates even though that's not a service thread |
843 // trigger; since we're here, we might as well process them. |
843 // trigger; since we're here, we might as well process them. |
844 if (!OrderAccess::load_acquire(&_needs_cleanup) && |
844 if (!Atomic::load_acquire(&_needs_cleanup) && |
845 (OrderAccess::load_acquire(&_deferred_updates) == NULL)) { |
845 (Atomic::load_acquire(&_deferred_updates) == NULL)) { |
846 return false; |
846 return false; |
847 } |
847 } |
848 |
848 |
849 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
849 MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); |
850 |
850 |
851 // Clear the request before processing. |
851 // Clear the request before processing. |
852 OrderAccess::release_store_fence(&_needs_cleanup, false); |
852 Atomic::release_store_fence(&_needs_cleanup, false); |
853 |
853 |
854 // Other threads could be adding to the empty block count or the |
854 // Other threads could be adding to the empty block count or the |
855 // deferred update list while we're working. Set an upper bound on |
855 // deferred update list while we're working. Set an upper bound on |
856 // how many updates we'll process and blocks we'll try to release, |
856 // how many updates we'll process and blocks we'll try to release, |
857 // so other threads can't cause an unbounded stay in this function. |
857 // so other threads can't cause an unbounded stay in this function. |
991 } |
991 } |
992 } |
992 } |
993 |
993 |
994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { |
994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { |
995 data->_processed += data->_segment_end - data->_segment_start; |
995 data->_processed += data->_segment_end - data->_segment_start; |
996 size_t start = OrderAccess::load_acquire(&_next_block); |
996 size_t start = Atomic::load_acquire(&_next_block); |
997 if (start >= _block_count) { |
997 if (start >= _block_count) { |
998 return finish_iteration(data); // No more blocks available. |
998 return finish_iteration(data); // No more blocks available. |
999 } |
999 } |
1000 // Try to claim several at a time, but not *too* many. We want to |
1000 // Try to claim several at a time, but not *too* many. We want to |
1001 // avoid deciding there are many available and selecting a large |
1001 // avoid deciding there are many available and selecting a large |