8182169: ArrayAllocator should take MEMFLAGS as regular parameter
Summary: Change ArrayAllocator memflags from template parameter to ordinary function parameters
Reviewed-by: kbarrett, tschatzl
Contributed-by: milan.mimica@gmail.com
--- a/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp Fri Jul 21 16:37:01 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp Fri Jul 21 21:01:59 2017 -0400
@@ -55,13 +55,13 @@
G1CardLiveData::bm_word_t* G1CardLiveData::allocate_large_bitmap(size_t size_in_bits) {
size_t size_in_words = BitMap::calc_size_in_words(size_in_bits);
- bm_word_t* map = MmapArrayAllocator<bm_word_t, mtGC>::allocate(size_in_words);
+ bm_word_t* map = MmapArrayAllocator<bm_word_t>::allocate(size_in_words, mtGC);
return map;
}
void G1CardLiveData::free_large_bitmap(bm_word_t* bitmap, size_t size_in_bits) {
- MmapArrayAllocator<bm_word_t, mtGC>::free(bitmap, BitMap::calc_size_in_words(size_in_bits));
+ MmapArrayAllocator<bm_word_t>::free(bitmap, BitMap::calc_size_in_words(size_in_bits));
}
void G1CardLiveData::initialize(size_t max_capacity, uint num_max_regions) {
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Fri Jul 21 16:37:01 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Fri Jul 21 21:01:59 2017 -0400
@@ -144,7 +144,7 @@
assert(new_capacity <= _max_chunk_capacity,
"Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
- TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::allocate_or_null(new_capacity);
+ TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
if (new_base == NULL) {
log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
@@ -152,7 +152,7 @@
}
// Release old mapping.
if (_base != NULL) {
- MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
+ MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
}
_base = new_base;
@@ -205,7 +205,7 @@
G1CMMarkStack::~G1CMMarkStack() {
if (_base != NULL) {
- MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
+ MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
}
}
--- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp Fri Jul 21 16:37:01 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp Fri Jul 21 21:01:59 2017 -0400
@@ -36,7 +36,7 @@
_use_cache = true;
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
- _hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size);
+ _hot_cache = ArrayAllocator<jbyte*>::allocate(_hot_cache_size, mtGC);
reset_hot_cache_internal();
@@ -51,7 +51,7 @@
G1HotCardCache::~G1HotCardCache() {
if (default_use_cache()) {
assert(_hot_cache != NULL, "Logic");
- ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size);
+ ArrayAllocator<jbyte*>::free(_hot_cache, _hot_cache_size);
_hot_cache = NULL;
}
}
--- a/hotspot/src/share/vm/gc/shared/taskqueue.inline.hpp Fri Jul 21 16:37:01 2017 -0400
+++ b/hotspot/src/share/vm/gc/shared/taskqueue.inline.hpp Fri Jul 21 21:01:59 2017 -0400
@@ -44,13 +44,13 @@
template<class E, MEMFLAGS F, unsigned int N>
inline void GenericTaskQueue<E, F, N>::initialize() {
- _elems = ArrayAllocator<E, F>::allocate(N);
+ _elems = ArrayAllocator<E>::allocate(N, F);
}
template<class E, MEMFLAGS F, unsigned int N>
inline GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
assert(false, "This code is currently never called");
- ArrayAllocator<E, F>::free(const_cast<E*>(_elems), N);
+ ArrayAllocator<E>::free(const_cast<E*>(_elems), N);
}
template<class E, MEMFLAGS F, unsigned int N>
--- a/hotspot/src/share/vm/memory/allocation.hpp Fri Jul 21 16:37:01 2017 -0400
+++ b/hotspot/src/share/vm/memory/allocation.hpp Fri Jul 21 21:01:59 2017 -0400
@@ -718,43 +718,43 @@
// to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
// is set so that we always use malloc except for Solaris where we set the
// limit to get mapped memory.
-template <class E, MEMFLAGS F>
+template <class E>
class ArrayAllocator : public AllStatic {
private:
static bool should_use_malloc(size_t length);
- static E* allocate_malloc(size_t length);
- static E* allocate_mmap(size_t length);
+ static E* allocate_malloc(size_t length, MEMFLAGS flags);
+ static E* allocate_mmap(size_t length, MEMFLAGS flags);
static void free_malloc(E* addr, size_t length);
static void free_mmap(E* addr, size_t length);
public:
- static E* allocate(size_t length);
- static E* reallocate(E* old_addr, size_t old_length, size_t new_length);
+ static E* allocate(size_t length, MEMFLAGS flags);
+ static E* reallocate(E* old_addr, size_t old_length, size_t new_length, MEMFLAGS flags);
static void free(E* addr, size_t length);
};
// Uses mmaped memory for all allocations. All allocations are initially
// zero-filled. No pre-touching.
-template <class E, MEMFLAGS F>
+template <class E>
class MmapArrayAllocator : public AllStatic {
private:
static size_t size_for(size_t length);
public:
- static E* allocate_or_null(size_t length);
- static E* allocate(size_t length);
+ static E* allocate_or_null(size_t length, MEMFLAGS flags);
+ static E* allocate(size_t length, MEMFLAGS flags);
static void free(E* addr, size_t length);
};
// Uses malloc:ed memory for all allocations.
-template <class E, MEMFLAGS F>
+template <class E>
class MallocArrayAllocator : public AllStatic {
public:
static size_t size_for(size_t length);
- static E* allocate(size_t length);
+ static E* allocate(size_t length, MEMFLAGS flags);
static void free(E* addr, size_t length);
};
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp Fri Jul 21 16:37:01 2017 -0400
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp Fri Jul 21 21:01:59 2017 -0400
@@ -146,19 +146,19 @@
FreeHeap(p);
}
-template <class E, MEMFLAGS F>
-size_t MmapArrayAllocator<E, F>::size_for(size_t length) {
+template <class E>
+size_t MmapArrayAllocator<E>::size_for(size_t length) {
size_t size = length * sizeof(E);
int alignment = os::vm_allocation_granularity();
return align_up(size, alignment);
}
-template <class E, MEMFLAGS F>
-E* MmapArrayAllocator<E, F>::allocate_or_null(size_t length) {
+template <class E>
+E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
size_t size = size_for(length);
int alignment = os::vm_allocation_granularity();
- char* addr = os::reserve_memory(size, NULL, alignment, F);
+ char* addr = os::reserve_memory(size, NULL, alignment, flags);
if (addr == NULL) {
return NULL;
}
@@ -171,12 +171,12 @@
}
}
-template <class E, MEMFLAGS F>
-E* MmapArrayAllocator<E, F>::allocate(size_t length) {
+template <class E>
+E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
size_t size = size_for(length);
int alignment = os::vm_allocation_granularity();
- char* addr = os::reserve_memory(size, NULL, alignment, F);
+ char* addr = os::reserve_memory(size, NULL, alignment, flags);
if (addr == NULL) {
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
@@ -186,55 +186,55 @@
return (E*)addr;
}
-template <class E, MEMFLAGS F>
-void MmapArrayAllocator<E, F>::free(E* addr, size_t length) {
+template <class E>
+void MmapArrayAllocator<E>::free(E* addr, size_t length) {
bool result = os::release_memory((char*)addr, size_for(length));
assert(result, "Failed to release memory");
}
-template <class E, MEMFLAGS F>
-size_t MallocArrayAllocator<E, F>::size_for(size_t length) {
+template <class E>
+size_t MallocArrayAllocator<E>::size_for(size_t length) {
return length * sizeof(E);
}
-template <class E, MEMFLAGS F>
-E* MallocArrayAllocator<E, F>::allocate(size_t length) {
- return (E*)AllocateHeap(size_for(length), F);
+template <class E>
+E* MallocArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
+ return (E*)AllocateHeap(size_for(length), flags);
}
-template<class E, MEMFLAGS F>
-void MallocArrayAllocator<E, F>::free(E* addr, size_t /*length*/) {
+template<class E>
+void MallocArrayAllocator<E>::free(E* addr, size_t /*length*/) {
FreeHeap(addr);
}
-template <class E, MEMFLAGS F>
-bool ArrayAllocator<E, F>::should_use_malloc(size_t length) {
- return MallocArrayAllocator<E, F>::size_for(length) < ArrayAllocatorMallocLimit;
+template <class E>
+bool ArrayAllocator<E>::should_use_malloc(size_t length) {
+ return MallocArrayAllocator<E>::size_for(length) < ArrayAllocatorMallocLimit;
}
-template <class E, MEMFLAGS F>
-E* ArrayAllocator<E, F>::allocate_malloc(size_t length) {
- return MallocArrayAllocator<E, F>::allocate(length);
+template <class E>
+E* ArrayAllocator<E>::allocate_malloc(size_t length, MEMFLAGS flags) {
+ return MallocArrayAllocator<E>::allocate(length, flags);
}
-template <class E, MEMFLAGS F>
-E* ArrayAllocator<E, F>::allocate_mmap(size_t length) {
- return MmapArrayAllocator<E, F>::allocate(length);
+template <class E>
+E* ArrayAllocator<E>::allocate_mmap(size_t length, MEMFLAGS flags) {
+ return MmapArrayAllocator<E>::allocate(length, flags);
}
-template <class E, MEMFLAGS F>
-E* ArrayAllocator<E, F>::allocate(size_t length) {
+template <class E>
+E* ArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
if (should_use_malloc(length)) {
- return allocate_malloc(length);
+ return allocate_malloc(length, flags);
}
- return allocate_mmap(length);
+ return allocate_mmap(length, flags);
}
-template <class E, MEMFLAGS F>
-E* ArrayAllocator<E, F>::reallocate(E* old_addr, size_t old_length, size_t new_length) {
+template <class E>
+E* ArrayAllocator<E>::reallocate(E* old_addr, size_t old_length, size_t new_length, MEMFLAGS flags) {
E* new_addr = (new_length > 0)
- ? allocate(new_length)
+ ? allocate(new_length, flags)
: NULL;
if (new_addr != NULL && old_addr != NULL) {
@@ -248,18 +248,18 @@
return new_addr;
}
-template<class E, MEMFLAGS F>
-void ArrayAllocator<E, F>::free_malloc(E* addr, size_t length) {
- MallocArrayAllocator<E, F>::free(addr, length);
+template<class E>
+void ArrayAllocator<E>::free_malloc(E* addr, size_t length) {
+ MallocArrayAllocator<E>::free(addr, length);
}
-template<class E, MEMFLAGS F>
-void ArrayAllocator<E, F>::free_mmap(E* addr, size_t length) {
- MmapArrayAllocator<E, F>::free(addr, length);
+template<class E>
+void ArrayAllocator<E>::free_mmap(E* addr, size_t length) {
+ MmapArrayAllocator<E>::free(addr, length);
}
-template<class E, MEMFLAGS F>
-void ArrayAllocator<E, F>::free(E* addr, size_t length) {
+template<class E>
+void ArrayAllocator<E>::free(E* addr, size_t length) {
if (addr != NULL) {
if (should_use_malloc(length)) {
free_malloc(addr, length);
--- a/hotspot/src/share/vm/utilities/bitMap.cpp Fri Jul 21 16:37:01 2017 -0400
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp Fri Jul 21 21:01:59 2017 -0400
@@ -48,10 +48,10 @@
class CHeapBitMapAllocator : StackObj {
public:
bm_word_t* allocate(size_t size_in_words) const {
- return ArrayAllocator<bm_word_t, mtInternal>::allocate(size_in_words);
+ return ArrayAllocator<bm_word_t>::allocate(size_in_words, mtInternal);
}
void free(bm_word_t* map, idx_t size_in_words) const {
- ArrayAllocator<bm_word_t, mtInternal>::free(map, size_in_words);
+ ArrayAllocator<bm_word_t>::free(map, size_in_words);
}
};