8178501: Replace usages of align macros with calls to the align inline functions
Reviewed-by: mgerdin, tschatzl
--- a/hotspot/src/os/posix/vm/os_posix.cpp Tue Jul 04 17:44:30 2017 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.cpp Mon Apr 24 09:14:09 2017 +0200
@@ -322,7 +322,7 @@
julong lower_limit = min_allocation_size;
while ((upper_limit - lower_limit) > min_allocation_size) {
julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
- temp_limit = align_down_(temp_limit, min_allocation_size);
+ temp_limit = align_down(temp_limit, min_allocation_size);
if (is_allocatable(temp_limit)) {
lower_limit = temp_limit;
} else {
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Tue Jul 04 17:44:30 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Mon Apr 24 09:14:09 2017 +0200
@@ -372,7 +372,7 @@
size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
- return align_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
+ return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
}
// If could fit into free regions w/o expansion, try.
--- a/hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp Tue Jul 04 17:44:30 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp Mon Apr 24 09:14:09 2017 +0200
@@ -161,7 +161,7 @@
const size_t alignment = virtual_spaces()->alignment();
size_t change_in_bytes = MIN3(young_gen_available,
old_gen_available,
- align_up_(expand_in_bytes, alignment));
+ align_up(expand_in_bytes, alignment));
if (change_in_bytes == 0) {
return;
@@ -203,7 +203,7 @@
const size_t alignment = virtual_spaces()->alignment();
size_t change_in_bytes = MIN3(young_gen_available,
old_gen_available,
- align_up_(expand_in_bytes, alignment));
+ align_up(expand_in_bytes, alignment));
if (change_in_bytes == 0) {
return false;
--- a/hotspot/src/share/vm/memory/padded.inline.hpp Tue Jul 04 17:44:30 2017 +0200
+++ b/hotspot/src/share/vm/memory/padded.inline.hpp Mon Apr 24 09:14:09 2017 +0200
@@ -54,9 +54,9 @@
template <class T, MEMFLAGS flags, size_t alignment>
T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) {
// Calculate and align the size of the first dimension's table.
- size_t table_size = align_up_(rows * sizeof(T*), alignment);
+ size_t table_size = align_up(rows * sizeof(T*), alignment);
// The size of the separate rows.
- size_t row_size = align_up_(columns * sizeof(T), alignment);
+ size_t row_size = align_up(columns * sizeof(T), alignment);
// Total size consists of the indirection table plus the rows.
size_t total_size = table_size + rows * row_size + alignment;
--- a/hotspot/src/share/vm/runtime/arguments.cpp Tue Jul 04 17:44:30 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Mon Apr 24 09:14:09 2017 +0200
@@ -1681,8 +1681,8 @@
// keeping alignment constraints of the heap. To guarantee the latter, as the
// NULL page is located before the heap, we pad the NULL page to the conservative
// maximum alignment that the GC may ever impose upon the heap.
- size_t displacement_due_to_null_page = align_up_(os::vm_page_size(),
- _conservative_max_heap_alignment);
+ size_t displacement_due_to_null_page = align_up((size_t)os::vm_page_size(),
+ _conservative_max_heap_alignment);
LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
NOT_LP64(ShouldNotReachHere(); return 0);
@@ -2763,7 +2763,7 @@
const julong min_size = min_ThreadStackSize * K;
const julong max_size = max_ThreadStackSize * K;
- assert(is_aligned_(max_size, (size_t)os::vm_page_size()), "Implementation assumption");
+ assert(is_aligned(max_size, os::vm_page_size()), "Implementation assumption");
julong size = 0;
ArgsRange errcode = parse_memory_size(tail, &size, min_size, max_size);
@@ -2778,7 +2778,7 @@
}
// Internally track ThreadStackSize in units of 1024 bytes.
- const julong size_aligned = align_up_(size, K);
+ const julong size_aligned = align_up(size, K);
assert(size <= size_aligned,
"Overflow: " JULONG_FORMAT " " JULONG_FORMAT,
size, size_aligned);
@@ -2789,7 +2789,7 @@
size_in_K);
// Check that code expanding ThreadStackSize to a page aligned number of bytes won't overflow.
- const julong max_expanded = align_up_(size_in_K * K, (size_t)os::vm_page_size());
+ const julong max_expanded = align_up(size_in_K * K, os::vm_page_size());
assert(max_expanded < max_uintx && max_expanded >= size_in_K,
"Expansion overflowed: " JULONG_FORMAT " " JULONG_FORMAT,
max_expanded, size_in_K);