8204834: Fix confusing "allocate" naming in OopStorage
Summary: allocate_list => allocation_list and so on.
Reviewed-by: dholmes, tschatzl, coleenp
--- a/src/hotspot/share/gc/shared/oopStorage.cpp Mon Jul 09 14:12:50 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp Mon Jul 09 13:35:55 2018 -0400
@@ -45,47 +45,47 @@
#include "utilities/ostream.hpp"
#include "utilities/spinYield.hpp"
-OopStorage::AllocateEntry::AllocateEntry() : _prev(NULL), _next(NULL) {}
+OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {}
-OopStorage::AllocateEntry::~AllocateEntry() {
+OopStorage::AllocationListEntry::~AllocationListEntry() {
assert(_prev == NULL, "deleting attached block");
assert(_next == NULL, "deleting attached block");
}
-OopStorage::AllocateList::AllocateList() : _head(NULL), _tail(NULL) {}
+OopStorage::AllocationList::AllocationList() : _head(NULL), _tail(NULL) {}
-OopStorage::AllocateList::~AllocateList() {
+OopStorage::AllocationList::~AllocationList() {
// ~OopStorage() empties its lists before destroying them.
assert(_head == NULL, "deleting non-empty block list");
assert(_tail == NULL, "deleting non-empty block list");
}
-void OopStorage::AllocateList::push_front(const Block& block) {
+void OopStorage::AllocationList::push_front(const Block& block) {
const Block* old = _head;
if (old == NULL) {
assert(_tail == NULL, "invariant");
_head = _tail = █
} else {
- block.allocate_entry()._next = old;
- old->allocate_entry()._prev = █
+ block.allocation_list_entry()._next = old;
+ old->allocation_list_entry()._prev = █
_head = █
}
}
-void OopStorage::AllocateList::push_back(const Block& block) {
+void OopStorage::AllocationList::push_back(const Block& block) {
const Block* old = _tail;
if (old == NULL) {
assert(_head == NULL, "invariant");
_head = _tail = █
} else {
- old->allocate_entry()._next = █
- block.allocate_entry()._prev = old;
+ old->allocation_list_entry()._next = █
+ block.allocation_list_entry()._prev = old;
_tail = █
}
}
-void OopStorage::AllocateList::unlink(const Block& block) {
- const AllocateEntry& block_entry = block.allocate_entry();
+void OopStorage::AllocationList::unlink(const Block& block) {
+ const AllocationListEntry& block_entry = block.allocation_list_entry();
const Block* prev_blk = block_entry._prev;
const Block* next_blk = block_entry._next;
block_entry._prev = NULL;
@@ -96,15 +96,15 @@
_head = _tail = NULL;
} else if (prev_blk == NULL) {
assert(_head == &block, "invariant");
- next_blk->allocate_entry()._prev = NULL;
+ next_blk->allocation_list_entry()._prev = NULL;
_head = next_blk;
} else if (next_blk == NULL) {
assert(_tail == &block, "invariant");
- prev_blk->allocate_entry()._next = NULL;
+ prev_blk->allocation_list_entry()._next = NULL;
_tail = prev_blk;
} else {
- next_blk->allocate_entry()._prev = prev_blk;
- prev_blk->allocate_entry()._next = next_blk;
+ next_blk->allocation_list_entry()._prev = prev_blk;
+ prev_blk->allocation_list_entry()._next = next_blk;
}
}
@@ -210,7 +210,7 @@
_owner(owner),
_memory(memory),
_active_index(0),
- _allocate_entry(),
+ _allocation_list_entry(),
_deferred_updates_next(NULL),
_release_refcount(0)
{
@@ -367,65 +367,65 @@
//////////////////////////////////////////////////////////////////////////////
// Allocation
//
-// Allocation involves the _allocate_list, which contains a subset of the
+// Allocation involves the _allocation_list, which contains a subset of the
// blocks owned by a storage object. This is a doubly-linked list, linked
// through dedicated fields in the blocks. Full blocks are removed from this
// list, though they are still present in the _active_array. Empty blocks are
-// kept at the end of the _allocate_list, to make it easy for empty block
+// kept at the end of the _allocation_list, to make it easy for empty block
// deletion to find them.
//
// allocate(), and delete_empty_blocks_concurrent() lock the
-// _allocate_mutex while performing any list and array modifications.
+// _allocation_mutex while performing any list and array modifications.
//
// allocate() and release() update a block's _allocated_bitmask using CAS
// loops. This prevents loss of updates even though release() performs
// its updates without any locking.
//
-// allocate() obtains the entry from the first block in the _allocate_list,
+// allocate() obtains the entry from the first block in the _allocation_list,
// and updates that block's _allocated_bitmask to indicate the entry is in
// use. If this makes the block full (all entries in use), the block is
-// removed from the _allocate_list so it won't be considered by future
+// removed from the _allocation_list so it won't be considered by future
// allocations until some entries in it are released.
//
// release() is performed lock-free. release() first looks up the block for
// the entry, using address alignment to find the enclosing block (thereby
// avoiding iteration over the _active_array). Once the block has been
// determined, its _allocated_bitmask needs to be updated, and its position in
-// the _allocate_list may need to be updated. There are two cases:
+// the _allocation_list may need to be updated. There are two cases:
//
// (a) If the block is neither full nor would become empty with the release of
// the entry, only its _allocated_bitmask needs to be updated. But if the CAS
// update fails, the applicable case may change for the retry.
//
-// (b) Otherwise, the _allocate_list also needs to be modified. This requires
-// locking the _allocate_mutex. To keep the release() operation lock-free,
-// rather than updating the _allocate_list itself, it instead performs a
+// (b) Otherwise, the _allocation_list also needs to be modified. This requires
+// locking the _allocation_mutex. To keep the release() operation lock-free,
+// rather than updating the _allocation_list itself, it instead performs a
// lock-free push of the block onto the _deferred_updates list. Entries on
// that list are processed by allocate() and delete_empty_blocks_XXX(), while
// they already hold the necessary lock. That processing makes the block's
// list state consistent with its current _allocated_bitmask. The block is
-// added to the _allocate_list if not already present and the bitmask is not
-// full. The block is moved to the end of the _allocated_list if the bitmask
+// added to the _allocation_list if not already present and the bitmask is not
+// full. The block is moved to the end of the _allocation_list if the bitmask
// is empty, for ease of empty block deletion processing.
oop* OopStorage::allocate() {
- MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+ MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Do some deferred update processing every time we allocate.
- // Continue processing deferred updates if _allocate_list is empty,
+ // Continue processing deferred updates if _allocation_list is empty,
// in the hope that we'll get a block from that, rather than
// allocating a new block.
- while (reduce_deferred_updates() && (_allocate_list.head() == NULL)) {}
+ while (reduce_deferred_updates() && (_allocation_list.head() == NULL)) {}
- // Use the first block in _allocate_list for the allocation.
- Block* block = _allocate_list.head();
+ // Use the first block in _allocation_list for the allocation.
+ Block* block = _allocation_list.head();
if (block == NULL) {
// No available blocks; make a new one, and add to storage.
{
- MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+ MutexUnlockerEx mul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
block = Block::new_block(this);
}
if (block == NULL) {
- while (_allocate_list.head() == NULL) {
+ while (_allocation_list.head() == NULL) {
if (!reduce_deferred_updates()) {
// Failed to make new block, no other thread made a block
// available while the mutex was released, and didn't get
@@ -448,13 +448,13 @@
return NULL;
}
}
- // Add to end of _allocate_list. The mutex release allowed
- // other threads to add blocks to the _allocate_list. We prefer
+ // Add to end of _allocation_list. The mutex release allowed
+ // other threads to add blocks to the _allocation_list. We prefer
// to allocate from non-empty blocks, to allow empty blocks to
// be deleted.
- _allocate_list.push_back(*block);
+ _allocation_list.push_back(*block);
}
- block = _allocate_list.head();
+ block = _allocation_list.head();
}
// Allocate from first block.
assert(block != NULL, "invariant");
@@ -471,7 +471,7 @@
// Transitioning from not full to full.
// Remove full blocks from consideration by future allocates.
log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
- _allocate_list.unlink(*block);
+ _allocation_list.unlink(*block);
}
log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
return result;
@@ -482,7 +482,7 @@
// Return true if the array was successfully expanded, false to
// indicate allocation failure.
bool OopStorage::expand_active_array() {
- assert_lock_strong(_allocate_mutex);
+ assert_lock_strong(_allocation_mutex);
ActiveArray* old_array = _active_array;
size_t new_size = 2 * old_array->size();
log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
@@ -632,7 +632,7 @@
// (updated bitmask is empty or old bitmask was full), atomically push
// this block onto the deferred updates list. Some future call to
// reduce_deferred_updates will make any needed changes related to this
- // block and _allocate_list. This deferral avoids list updates and the
+ // block and _allocation_list. This deferral avoids list updates and the
// associated locking here.
if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
// Log transitions. Both transitions are possible in a single update.
@@ -663,7 +663,7 @@
// Process one available deferred update. Returns true if one was processed.
bool OopStorage::reduce_deferred_updates() {
- assert_locked_or_safepoint(_allocate_mutex);
+ assert_locked_or_safepoint(_allocation_mutex);
// Atomically pop a block off the list, if any available.
// No ABA issue because this is only called by one thread at a time.
// The atomicity is wrto pushes by release().
@@ -687,20 +687,20 @@
uintx allocated = block->allocated_bitmask();
// Make membership in list consistent with bitmask state.
- if ((_allocate_list.ctail() != NULL) &&
- ((_allocate_list.ctail() == block) ||
- (_allocate_list.next(*block) != NULL))) {
- // Block is in the allocate list.
+ if ((_allocation_list.ctail() != NULL) &&
+ ((_allocation_list.ctail() == block) ||
+ (_allocation_list.next(*block) != NULL))) {
+ // Block is in the _allocation_list.
assert(!is_full_bitmask(allocated), "invariant");
} else if (!is_full_bitmask(allocated)) {
- // Block is not in the allocate list, but now should be.
- _allocate_list.push_front(*block);
+ // Block is not in the _allocation_list, but now should be.
+ _allocation_list.push_front(*block);
} // Else block is full and not in list, which is correct.
// Move empty block to end of list, for possible deletion.
if (is_empty_bitmask(allocated)) {
- _allocate_list.unlink(*block);
- _allocate_list.push_back(*block);
+ _allocation_list.unlink(*block);
+ _allocation_list.push_back(*block);
}
log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
@@ -759,24 +759,24 @@
const size_t initial_active_array_size = 8;
OopStorage::OopStorage(const char* name,
- Mutex* allocate_mutex,
+ Mutex* allocation_mutex,
Mutex* active_mutex) :
_name(dup_name(name)),
_active_array(ActiveArray::create(initial_active_array_size)),
- _allocate_list(),
+ _allocation_list(),
_deferred_updates(NULL),
- _allocate_mutex(allocate_mutex),
+ _allocation_mutex(allocation_mutex),
_active_mutex(active_mutex),
_allocation_count(0),
_concurrent_iteration_active(false)
{
_active_array->increment_refcount();
- assert(_active_mutex->rank() < _allocate_mutex->rank(),
- "%s: active_mutex must have lower rank than allocate_mutex", _name);
+ assert(_active_mutex->rank() < _allocation_mutex->rank(),
+ "%s: active_mutex must have lower rank than allocation_mutex", _name);
assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
"%s: active mutex requires safepoint check", _name);
- assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
- "%s: allocate mutex requires safepoint check", _name);
+ assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
+ "%s: allocation mutex requires safepoint check", _name);
}
void OopStorage::delete_empty_block(const Block& block) {
@@ -791,8 +791,8 @@
_deferred_updates = block->deferred_updates_next();
block->set_deferred_updates_next(NULL);
}
- while ((block = _allocate_list.head()) != NULL) {
- _allocate_list.unlink(*block);
+ while ((block = _allocation_list.head()) != NULL) {
+ _allocation_list.unlink(*block);
}
bool unreferenced = _active_array->decrement_refcount();
assert(unreferenced, "deleting storage while _active_array is referenced");
@@ -811,18 +811,18 @@
while (reduce_deferred_updates()) {}
// Don't interfere with a concurrent iteration.
if (_concurrent_iteration_active) return;
- // Delete empty (and otherwise deletable) blocks from end of _allocate_list.
- for (Block* block = _allocate_list.tail();
+ // Delete empty (and otherwise deletable) blocks from end of _allocation_list.
+ for (Block* block = _allocation_list.tail();
(block != NULL) && block->is_deletable();
- block = _allocate_list.tail()) {
+ block = _allocation_list.tail()) {
_active_array->remove(block);
- _allocate_list.unlink(*block);
+ _allocation_list.unlink(*block);
delete_empty_block(*block);
}
}
void OopStorage::delete_empty_blocks_concurrent() {
- MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+ MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Other threads could be adding to the empty block count while we
// release the mutex across the block deletions. Set an upper bound
// on how many blocks we'll try to release, so other threads can't
@@ -834,7 +834,7 @@
// lock. But limit number processed to limit lock duration.
reduce_deferred_updates();
- Block* block = _allocate_list.tail();
+ Block* block = _allocation_list.tail();
if ((block == NULL) || !block->is_deletable()) {
// No block to delete, so done. There could be more pending
// deferred updates that could give us more work to do; deal with
@@ -848,10 +848,10 @@
if (_concurrent_iteration_active) return;
_active_array->remove(block);
}
- // Remove block from _allocate_list and delete it.
- _allocate_list.unlink(*block);
+ // Remove block from _allocation_list and delete it.
+ _allocation_list.unlink(*block);
// Release mutex while deleting block.
- MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+ MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
delete_empty_block(*block);
}
}
@@ -860,7 +860,7 @@
const Block* block = find_block_or_null(ptr);
if (block != NULL) {
// Prevent block deletion and _active_array modification.
- MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
+ MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Block could be a false positive, so get index carefully.
size_t index = Block::active_index_safe(block);
if ((index < _active_array->block_count()) &&
--- a/src/hotspot/share/gc/shared/oopStorage.hpp Mon Jul 09 14:12:50 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp Mon Jul 09 13:35:55 2018 -0400
@@ -73,7 +73,7 @@
class OopStorage : public CHeapObj<mtGC> {
public:
- OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex);
+ OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex);
~OopStorage();
// These count and usage accessors are racy unless at a safepoint.
@@ -94,12 +94,12 @@
ALLOCATED_ENTRY
};
- // Locks _allocate_mutex.
+ // Locks _allocation_mutex.
// precondition: ptr != NULL.
EntryStatus allocation_status(const oop* ptr) const;
// Allocates and returns a new entry. Returns NULL if memory allocation
- // failed. Locks _allocate_mutex.
+ // failed. Locks _allocation_mutex.
// postcondition: *result == NULL.
oop* allocate();
@@ -152,7 +152,7 @@
// Block cleanup functions are for the exclusive use of the GC.
// Both stop deleting if there is an in-progress concurrent iteration.
- // Concurrent deletion locks both the allocate_mutex and the active_mutex.
+ // Concurrent deletion locks both the _allocation_mutex and the _active_mutex.
void delete_empty_blocks_safepoint();
void delete_empty_blocks_concurrent();
@@ -172,20 +172,20 @@
NOT_AIX( private: )
class Block; // Fixed-size array of oops, plus bookkeeping.
class ActiveArray; // Array of Blocks, plus bookkeeping.
- class AllocateEntry; // Provides AllocateList links in a Block.
+ class AllocationListEntry; // Provides AllocationList links in a Block.
// Doubly-linked list of Blocks.
- class AllocateList {
+ class AllocationList {
const Block* _head;
const Block* _tail;
// Noncopyable.
- AllocateList(const AllocateList&);
- AllocateList& operator=(const AllocateList&);
+ AllocationList(const AllocationList&);
+ AllocationList& operator=(const AllocationList&);
public:
- AllocateList();
- ~AllocateList();
+ AllocationList();
+ ~AllocationList();
Block* head();
Block* tail();
@@ -219,10 +219,10 @@
private:
const char* _name;
ActiveArray* _active_array;
- AllocateList _allocate_list;
+ AllocationList _allocation_list;
Block* volatile _deferred_updates;
- Mutex* _allocate_mutex;
+ Mutex* _allocation_mutex;
Mutex* _active_mutex;
// Volatile for racy unlocked accesses.
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp Mon Jul 09 14:12:50 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp Mon Jul 09 13:35:55 2018 -0400
@@ -107,10 +107,10 @@
return *block_ptr(index);
}
-// A Block has an embedded AllocateEntry to provide the links between
-// Blocks in a AllocateList.
-class OopStorage::AllocateEntry {
- friend class OopStorage::AllocateList;
+// A Block has an embedded AllocationListEntry to provide the links between
+// Blocks in an AllocationList.
+class OopStorage::AllocationListEntry {
+ friend class OopStorage::AllocationList;
// Members are mutable, and we deal exclusively with pointers to
// const, to make const blocks easier to use; a block being const
@@ -119,18 +119,18 @@
mutable const Block* _next;
// Noncopyable.
- AllocateEntry(const AllocateEntry&);
- AllocateEntry& operator=(const AllocateEntry&);
+ AllocationListEntry(const AllocationListEntry&);
+ AllocationListEntry& operator=(const AllocationListEntry&);
public:
- AllocateEntry();
- ~AllocateEntry();
+ AllocationListEntry();
+ ~AllocationListEntry();
};
// Fixed-sized array of oops, plus bookkeeping data.
// All blocks are in the storage's _active_array, at the block's _active_index.
-// Non-full blocks are in the storage's _allocate_list, linked through the
-// block's _allocate_entry. Empty blocks are at the end of that list.
+// Non-full blocks are in the storage's _allocation_list, linked through the
+// block's _allocation_list_entry. Empty blocks are at the end of that list.
class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
// _data must be the first non-static data member, for alignment.
oop _data[BitsPerWord];
@@ -140,7 +140,7 @@
const OopStorage* _owner;
void* _memory; // Unaligned storage containing block.
size_t _active_index;
- AllocateEntry _allocate_entry;
+ AllocationListEntry _allocation_list_entry;
Block* volatile _deferred_updates_next;
volatile uintx _release_refcount;
@@ -158,7 +158,7 @@
Block& operator=(const Block&);
public:
- const AllocateEntry& allocate_entry() const;
+ const AllocationListEntry& allocation_list_entry() const;
static size_t allocation_size();
static size_t allocation_alignment_shift();
@@ -197,36 +197,36 @@
template<typename F> bool iterate(F f) const;
}; // class Block
-inline OopStorage::Block* OopStorage::AllocateList::head() {
+inline OopStorage::Block* OopStorage::AllocationList::head() {
return const_cast<Block*>(_head);
}
-inline OopStorage::Block* OopStorage::AllocateList::tail() {
+inline OopStorage::Block* OopStorage::AllocationList::tail() {
return const_cast<Block*>(_tail);
}
-inline const OopStorage::Block* OopStorage::AllocateList::chead() const {
+inline const OopStorage::Block* OopStorage::AllocationList::chead() const {
return _head;
}
-inline const OopStorage::Block* OopStorage::AllocateList::ctail() const {
+inline const OopStorage::Block* OopStorage::AllocationList::ctail() const {
return _tail;
}
-inline OopStorage::Block* OopStorage::AllocateList::prev(Block& block) {
- return const_cast<Block*>(block.allocate_entry()._prev);
+inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) {
+ return const_cast<Block*>(block.allocation_list_entry()._prev);
}
-inline OopStorage::Block* OopStorage::AllocateList::next(Block& block) {
- return const_cast<Block*>(block.allocate_entry()._next);
+inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) {
+ return const_cast<Block*>(block.allocation_list_entry()._next);
}
-inline const OopStorage::Block* OopStorage::AllocateList::prev(const Block& block) const {
- return block.allocate_entry()._prev;
+inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const {
+ return block.allocation_list_entry()._prev;
}
-inline const OopStorage::Block* OopStorage::AllocateList::next(const Block& block) const {
- return block.allocate_entry()._next;
+inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const {
+ return block.allocation_list_entry()._next;
}
template<typename Closure>
@@ -298,8 +298,8 @@
// Inline Block accesses for use in iteration loops.
-inline const OopStorage::AllocateEntry& OopStorage::Block::allocate_entry() const {
- return _allocate_entry;
+inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const {
+ return _allocation_list_entry;
}
inline void OopStorage::Block::check_index(unsigned index) const {
--- a/src/hotspot/share/gc/shared/oopStorageParState.hpp Mon Jul 09 14:12:50 2018 +0200
+++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp Mon Jul 09 13:35:55 2018 -0400
@@ -52,7 +52,7 @@
// interfering with with each other.
//
// Both allocate() and delete_empty_blocks_concurrent() lock the
-// _allocate_mutex while performing their respective list and array
+// _allocation_mutex while performing their respective list and array
// manipulations, preventing them from interfering with each other.
//
// When allocate() creates a new block, it is added to the end of the
--- a/test/hotspot/gtest/gc/shared/test_oopStorage.cpp Mon Jul 09 14:12:50 2018 +0200
+++ b/test/hotspot/gtest/gc/shared/test_oopStorage.cpp Mon Jul 09 13:35:55 2018 -0400
@@ -52,23 +52,23 @@
class OopStorage::TestAccess : public AllStatic {
public:
typedef OopStorage::Block Block;
- typedef OopStorage::AllocateList AllocateList;
+ typedef OopStorage::AllocationList AllocationList;
typedef OopStorage::ActiveArray ActiveArray;
static ActiveArray& active_array(const OopStorage& storage) {
return *storage._active_array;
}
- static AllocateList& allocate_list(OopStorage& storage) {
- return storage._allocate_list;
+ static AllocationList& allocation_list(OopStorage& storage) {
+ return storage._allocation_list;
}
- static const AllocateList& allocate_list(const OopStorage& storage) {
- return storage._allocate_list;
+ static const AllocationList& allocation_list(const OopStorage& storage) {
+ return storage._allocation_list;
}
- static Mutex* allocate_mutex(const OopStorage& storage) {
- return storage._allocate_mutex;
+ static Mutex* allocation_mutex(const OopStorage& storage) {
+ return storage._allocation_mutex;
}
static bool reduce_deferred_updates(OopStorage& storage) {
@@ -109,13 +109,13 @@
// building with precompiled headers, or for consistency with that
// workaround. There really should be an opto namespace.
typedef TestAccess::Block OopBlock;
-typedef TestAccess::AllocateList AllocateList;
+typedef TestAccess::AllocationList AllocationList;
typedef TestAccess::ActiveArray ActiveArray;
// Using EXPECT_EQ can't use NULL directly. Otherwise AIX build breaks.
const OopBlock* const NULL_BLOCK = NULL;
-static size_t list_length(const AllocateList& list) {
+static size_t list_length(const AllocationList& list) {
size_t result = 0;
for (const OopBlock* block = list.chead();
block != NULL;
@@ -125,7 +125,7 @@
return result;
}
-static void clear_list(AllocateList& list) {
+static void clear_list(AllocationList& list) {
OopBlock* next;
for (OopBlock* block = list.head(); block != NULL; block = next) {
next = list.next(*block);
@@ -133,12 +133,12 @@
}
}
-static bool is_list_empty(const AllocateList& list) {
+static bool is_list_empty(const AllocationList& list) {
return list.chead() == NULL;
}
static bool process_deferred_updates(OopStorage& storage) {
- MutexLockerEx ml(TestAccess::allocate_mutex(storage), Mutex::_no_safepoint_check_flag);
+ MutexLockerEx ml(TestAccess::allocation_mutex(storage), Mutex::_no_safepoint_check_flag);
bool result = false;
while (TestAccess::reduce_deferred_updates(storage)) {
result = true;
@@ -155,7 +155,7 @@
}
static size_t empty_block_count(const OopStorage& storage) {
- const AllocateList& list = TestAccess::allocate_list(storage);
+ const AllocationList& list = TestAccess::allocation_list(storage);
size_t count = 0;
for (const OopBlock* block = list.ctail();
(block != NULL) && block->is_empty();
@@ -183,7 +183,7 @@
OopStorageTest();
~OopStorageTest();
- Mutex _allocate_mutex;
+ Mutex _allocation_mutex;
Mutex _active_mutex;
OopStorage _storage;
@@ -195,19 +195,19 @@
};
OopStorageTest::OopStorageTest() :
- _allocate_mutex(_allocate_rank,
- "test_OopStorage_allocate",
- false,
- Mutex::_safepoint_check_never),
+ _allocation_mutex(_allocate_rank,
+ "test_OopStorage_allocation",
+ false,
+ Mutex::_safepoint_check_never),
_active_mutex(_active_rank,
"test_OopStorage_active",
false,
Mutex::_safepoint_check_never),
- _storage("Test Storage", &_allocate_mutex, &_active_mutex)
+ _storage("Test Storage", &_allocation_mutex, &_active_mutex)
{ }
OopStorageTest::~OopStorageTest() {
- clear_list(TestAccess::allocate_list(_storage));
+ clear_list(TestAccess::allocation_list(_storage));
}
class OopStorageTestWithAllocation : public OopStorageTest {
@@ -243,10 +243,10 @@
OopStorage* _storage;
};
-static bool is_allocate_list_sorted(const OopStorage& storage) {
- // The allocate_list isn't strictly sorted. Rather, all empty
+static bool is_allocation_list_sorted(const OopStorage& storage) {
+ // The allocation_list isn't strictly sorted. Rather, all empty
// blocks are segregated to the end of the list.
- const AllocateList& list = TestAccess::allocate_list(storage);
+ const AllocationList& list = TestAccess::allocation_list(storage);
const OopBlock* block = list.ctail();
for ( ; (block != NULL) && block->is_empty(); block = list.prev(*block)) {}
for ( ; block != NULL; block = list.prev(*block)) {
@@ -269,7 +269,7 @@
TEST_VM_F(OopStorageTest, allocate_one) {
EXPECT_EQ(0u, active_count(_storage));
- EXPECT_TRUE(is_list_empty(TestAccess::allocate_list(_storage)));
+ EXPECT_TRUE(is_list_empty(TestAccess::allocation_list(_storage)));
oop* ptr = _storage.allocate();
EXPECT_TRUE(ptr != NULL);
@@ -277,11 +277,11 @@
EXPECT_EQ(1u, active_count(_storage));
EXPECT_EQ(1u, _storage.block_count());
- EXPECT_EQ(1u, list_length(TestAccess::allocate_list(_storage)));
+ EXPECT_EQ(1u, list_length(TestAccess::allocation_list(_storage)));
EXPECT_EQ(0u, empty_block_count(_storage));
- const OopBlock* block = TestAccess::allocate_list(_storage).chead();
+ const OopBlock* block = TestAccess::allocation_list(_storage).chead();
EXPECT_NE(block, (OopBlock*)NULL);
EXPECT_EQ(block, active_head(_storage));
EXPECT_FALSE(TestAccess::block_is_empty(*block));
@@ -293,11 +293,11 @@
EXPECT_EQ(1u, active_count(_storage));
EXPECT_EQ(1u, _storage.block_count());
- EXPECT_EQ(1u, list_length(TestAccess::allocate_list(_storage)));
+ EXPECT_EQ(1u, list_length(TestAccess::allocation_list(_storage)));
EXPECT_EQ(1u, empty_block_count(_storage));
- const OopBlock* new_block = TestAccess::allocate_list(_storage).chead();
+ const OopBlock* new_block = TestAccess::allocation_list(_storage).chead();
EXPECT_EQ(block, new_block);
EXPECT_EQ(block, active_head(_storage));
EXPECT_TRUE(TestAccess::block_is_empty(*block));
@@ -309,11 +309,11 @@
static const size_t max_entries = 1000;
oop* entries[max_entries];
- AllocateList& allocate_list = TestAccess::allocate_list(_storage);
+ AllocationList& allocation_list = TestAccess::allocation_list(_storage);
EXPECT_EQ(0u, active_count(_storage));
EXPECT_EQ(0u, _storage.block_count());
- EXPECT_TRUE(is_list_empty(allocate_list));
+ EXPECT_TRUE(is_list_empty(allocation_list));
size_t allocated = 0;
for ( ; allocated < max_entries; ++allocated) {
@@ -326,8 +326,8 @@
if (TestAccess::block_is_full(block)) {
break;
} else {
- EXPECT_FALSE(is_list_empty(allocate_list));
- EXPECT_EQ(&block, allocate_list.chead());
+ EXPECT_FALSE(is_list_empty(allocation_list));
+ EXPECT_EQ(&block, allocation_list.chead());
}
}
entries[allocated] = _storage.allocate();
@@ -336,7 +336,7 @@
EXPECT_EQ(allocated, _storage.allocation_count());
EXPECT_EQ(1u, active_count(_storage));
EXPECT_EQ(1u, _storage.block_count());
- EXPECT_TRUE(is_list_empty(allocate_list));
+ EXPECT_TRUE(is_list_empty(allocation_list));
const OopBlock& block = *TestAccess::active_array(_storage).at(0);
EXPECT_TRUE(TestAccess::block_is_full(block));
EXPECT_EQ(allocated, TestAccess::block_allocation_count(block));
@@ -346,7 +346,7 @@
size_t remaining = allocated - (i + 1);
EXPECT_EQ(remaining, TestAccess::block_allocation_count(block));
EXPECT_EQ(remaining, _storage.allocation_count());
- EXPECT_FALSE(is_list_empty(allocate_list));
+ EXPECT_FALSE(is_list_empty(allocation_list));
}
}
@@ -354,7 +354,7 @@
static const size_t max_entries = 1000;
oop* entries[max_entries];
- AllocateList& allocate_list = TestAccess::allocate_list(_storage);
+ AllocationList& allocation_list = TestAccess::allocation_list(_storage);
EXPECT_EQ(0u, empty_block_count(_storage));
@@ -362,12 +362,12 @@
ASSERT_TRUE(entries[0] != NULL);
EXPECT_EQ(1u, active_count(_storage));
EXPECT_EQ(1u, _storage.block_count());
- EXPECT_EQ(1u, list_length(allocate_list));
+ EXPECT_EQ(1u, list_length(allocation_list));
EXPECT_EQ(0u, empty_block_count(_storage));
const OopBlock* block = TestAccess::active_array(_storage).at(0);
EXPECT_EQ(1u, TestAccess::block_allocation_count(*block));
- EXPECT_EQ(block, allocate_list.chead());
+ EXPECT_EQ(block, allocation_list.chead());
for (size_t i = 1; i < max_entries; ++i) {
entries[i] = _storage.allocate();
@@ -376,40 +376,40 @@
EXPECT_EQ(0u, empty_block_count(_storage));
if (block == NULL) {
- ASSERT_FALSE(is_list_empty(allocate_list));
- EXPECT_EQ(1u, list_length(allocate_list));
- block = allocate_list.chead();
+ ASSERT_FALSE(is_list_empty(allocation_list));
+ EXPECT_EQ(1u, list_length(allocation_list));
+ block = allocation_list.chead();
EXPECT_EQ(1u, TestAccess::block_allocation_count(*block));
EXPECT_EQ(block, active_head(_storage));
} else if (TestAccess::block_is_full(*block)) {
- EXPECT_TRUE(is_list_empty(allocate_list));
+ EXPECT_TRUE(is_list_empty(allocation_list));
block = NULL;
} else {
- EXPECT_FALSE(is_list_empty(allocate_list));
- EXPECT_EQ(block, allocate_list.chead());
+ EXPECT_FALSE(is_list_empty(allocation_list));
+ EXPECT_EQ(block, allocation_list.chead());
EXPECT_EQ(block, active_head(_storage));
}
}
if (block != NULL) {
EXPECT_NE(0u, TestAccess::block_allocation_count(*block));
- EXPECT_FALSE(is_list_empty(allocate_list));
- EXPECT_EQ(block, allocate_list.chead());
+ EXPECT_FALSE(is_list_empty(allocation_list));
+ EXPECT_EQ(block, allocation_list.chead());
EXPECT_EQ(block, active_head(_storage));
}
for (size_t i = 0; i < max_entries; ++i) {
release_entry(_storage, entries[i]);
- EXPECT_TRUE(is_allocate_list_sorted(_storage));
+ EXPECT_TRUE(is_allocation_list_sorted(_storage));
EXPECT_EQ(max_entries - (i + 1), total_allocation_count(_storage));
}
- EXPECT_EQ(active_count(_storage), list_length(allocate_list));
+ EXPECT_EQ(active_count(_storage), list_length(allocation_list));
EXPECT_EQ(active_count(_storage), _storage.block_count());
EXPECT_EQ(active_count(_storage), empty_block_count(_storage));
- for (const OopBlock* block = allocate_list.chead();
+ for (const OopBlock* block = allocation_list.chead();
block != NULL;
- block = allocate_list.next(*block)) {
+ block = allocation_list.next(*block)) {
EXPECT_TRUE(TestAccess::block_is_empty(*block));
}
}
@@ -420,10 +420,10 @@
EXPECT_EQ(0u, empty_block_count(_storage));
- AllocateList& allocate_list = TestAccess::allocate_list(_storage);
+ AllocationList& allocation_list = TestAccess::allocation_list(_storage);
EXPECT_EQ(_max_entries, total_allocation_count(_storage));
- EXPECT_GE(1u, list_length(allocate_list));
+ EXPECT_GE(1u, list_length(allocation_list));
// Release all entries in "random" order.
size_t released = 0;
@@ -433,14 +433,14 @@
_entries[i] = NULL;
++released;
EXPECT_EQ(_max_entries - released, total_allocation_count(_storage));
- EXPECT_TRUE(is_allocate_list_sorted(_storage));
+ EXPECT_TRUE(is_allocation_list_sorted(_storage));
}
}
- EXPECT_EQ(active_count(_storage), list_length(allocate_list));
+ EXPECT_EQ(active_count(_storage), list_length(allocation_list));
EXPECT_EQ(active_count(_storage), _storage.block_count());
EXPECT_EQ(0u, total_allocation_count(_storage));
- EXPECT_EQ(list_length(allocate_list), empty_block_count(_storage));
+ EXPECT_EQ(list_length(allocation_list), empty_block_count(_storage));
}
TEST_VM_F(OopStorageTestWithAllocation, random_allocate_release) {
@@ -450,10 +450,10 @@
EXPECT_EQ(0u, empty_block_count(_storage));
- AllocateList& allocate_list = TestAccess::allocate_list(_storage);
+ AllocationList& allocation_list = TestAccess::allocation_list(_storage);
EXPECT_EQ(_max_entries, total_allocation_count(_storage));
- EXPECT_GE(1u, list_length(allocate_list));
+ EXPECT_GE(1u, list_length(allocation_list));
// Release all entries in "random" order, "randomly" interspersed
// with additional allocations.
@@ -466,20 +466,20 @@
++released;
++total_released;
EXPECT_EQ(_max_entries - released, total_allocation_count(_storage));
- EXPECT_TRUE(is_allocate_list_sorted(_storage));
+ EXPECT_TRUE(is_allocation_list_sorted(_storage));
if (total_released % allocate_step == 0) {
_entries[i] = _storage.allocate();
--released;
EXPECT_EQ(_max_entries - released, total_allocation_count(_storage));
- EXPECT_TRUE(is_allocate_list_sorted(_storage));
+ EXPECT_TRUE(is_allocation_list_sorted(_storage));
}
}
}
- EXPECT_EQ(active_count(_storage), list_length(allocate_list));
+ EXPECT_EQ(active_count(_storage), list_length(allocation_list));
EXPECT_EQ(active_count(_storage), _storage.block_count());
EXPECT_EQ(0u, total_allocation_count(_storage));
- EXPECT_EQ(list_length(allocate_list), empty_block_count(_storage));
+ EXPECT_EQ(list_length(allocation_list), empty_block_count(_storage));
}
template<bool sorted>
@@ -1200,10 +1200,10 @@
const size_t OopStorageBlockCollectionTest::nvalues;
const void* const OopStorageBlockCollectionTest::_pseudo_owner[] = {};
-class OopStorageAllocateListTest : public OopStorageBlockCollectionTest {};
+class OopStorageAllocationListTest : public OopStorageBlockCollectionTest {};
-TEST_F(OopStorageAllocateListTest, empty_list) {
- AllocateList list;
+TEST_F(OopStorageAllocationListTest, empty_list) {
+ AllocationList list;
EXPECT_TRUE(is_list_empty(list));
EXPECT_EQ(NULL_BLOCK, list.head());
@@ -1211,8 +1211,8 @@
EXPECT_EQ(NULL_BLOCK, list.ctail());
}
-TEST_F(OopStorageAllocateListTest, push_back) {
- AllocateList list;
+TEST_F(OopStorageAllocationListTest, push_back) {
+ AllocationList list;
for (size_t i = 0; i < nvalues; ++i) {
list.push_back(*values[i]);
@@ -1241,8 +1241,8 @@
clear_list(list);
}
-TEST_F(OopStorageAllocateListTest, push_front) {
- AllocateList list;
+TEST_F(OopStorageAllocationListTest, push_front) {
+ AllocationList list;
for (size_t i = 0; i < nvalues; ++i) {
list.push_front(*values[i]);
@@ -1271,22 +1271,22 @@
clear_list(list);
}
-class OopStorageAllocateListTestWithList : public OopStorageAllocateListTest {
+class OopStorageAllocationListTestWithList : public OopStorageAllocationListTest {
public:
- OopStorageAllocateListTestWithList() : list() {
+ OopStorageAllocationListTestWithList() : list() {
for (size_t i = 0; i < nvalues; ++i) {
list.push_back(*values[i]);
}
}
- ~OopStorageAllocateListTestWithList() {
+ ~OopStorageAllocationListTestWithList() {
clear_list(list);
}
- AllocateList list;
+ AllocationList list;
};
-TEST_F(OopStorageAllocateListTestWithList, unlink_front) {
+TEST_F(OopStorageAllocationListTestWithList, unlink_front) {
EXPECT_EQ(list.chead(), values[0]);
EXPECT_EQ(list.ctail(), values[nvalues - 1]);
@@ -1304,7 +1304,7 @@
EXPECT_EQ(NULL_BLOCK, block);
}
-TEST_F(OopStorageAllocateListTestWithList, unlink_back) {
+TEST_F(OopStorageAllocationListTestWithList, unlink_back) {
EXPECT_EQ(list.chead(), values[0]);
list.unlink(*values[nvalues - 1]);
@@ -1321,7 +1321,7 @@
EXPECT_EQ(NULL_BLOCK, block);
}
-TEST_F(OopStorageAllocateListTestWithList, unlink_middle) {
+TEST_F(OopStorageAllocationListTestWithList, unlink_middle) {
EXPECT_EQ(list.chead(), values[0]);
size_t index = nvalues / 2;
@@ -1344,8 +1344,8 @@
EXPECT_EQ(NULL_BLOCK, block);
}
-TEST_F(OopStorageAllocateListTest, single) {
- AllocateList list;
+TEST_F(OopStorageAllocationListTest, single) {
+ AllocationList list;
list.push_back(*values[0]);
EXPECT_EQ(NULL_BLOCK, list.next(*values[0]));