# HG changeset patch # User stefank # Date 1492012398 -7200 # Node ID d503911aa94801c64d0a90521b1d93ff58f6d066 # Parent 0330c5fc49ce84b784621d01eee61ce44554e394 8178489: Make align functions more type safe and consistent Reviewed-by: mgerdin, rehn diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp --- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -697,7 +697,7 @@ // Execute code. Illegal instructions will be replaced by 0 in the signal handler. VM_Version::_is_determine_features_test_running = true; // We must align the first argument to 16 bytes because of the lqarx check. - (*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0); + (*test)(align_ptr_up(mid_of_test_area, 16), (uint64_t)0); VM_Version::_is_determine_features_test_running = false; // determine which instructions are legal. diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os/aix/vm/misc_aix.cpp --- a/hotspot/src/os/aix/vm/misc_aix.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os/aix/vm/misc_aix.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -53,7 +53,7 @@ if (!CanUseSafeFetch32()) { return true; } - int* const aligned = (int*) align_size_down((intptr_t)p, 4); + int* const aligned = (int*) align_ptr_down(p, 4); int cafebabe = 0xcafebabe; int deadbeef = 0xdeadbeef; return (SafeFetch32(aligned, cafebabe) != cafebabe) || diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os/aix/vm/os_aix.cpp --- a/hotspot/src/os/aix/vm/os_aix.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os/aix/vm/os_aix.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -2110,7 +2110,7 @@ } // Handle alignment. - char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint); + char* const addr_aligned = align_ptr_up(addr, alignment_hint); const size_t waste_pre = addr_aligned - addr; char* const addr_aligned_end = addr_aligned + size; const size_t waste_post = extra_size - waste_pre - size; @@ -2361,7 +2361,7 @@ // Always round to os::vm_page_size(), which may be larger than 4K. size = align_size_up(size, os::vm_page_size()); - addr = (char *)align_ptr_up(addr, os::vm_page_size()); + addr = align_ptr_up(addr, os::vm_page_size()); bool rc = false; bool remove_bookkeeping = false; diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os/linux/vm/os_linux.cpp --- a/hotspot/src/os/linux/vm/os_linux.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -3170,7 +3170,7 @@ start = NULL; } } else { - char* const start_aligned = (char*) align_ptr_up(start, alignment); + char* const start_aligned = align_ptr_up(start, alignment); char* const end_aligned = start_aligned + bytes; char* const end = start + extra_size; if (start_aligned > start) { @@ -3674,8 +3674,8 @@ char* end = start + bytes; // Find the regions of the allocated chunk that can be promoted to large pages. - char* lp_start = (char*)align_ptr_up(start, large_page_size); - char* lp_end = (char*)align_ptr_down(end, large_page_size); + char* lp_start = align_ptr_up(start, large_page_size); + char* lp_end = align_ptr_down(end, large_page_size); size_t lp_bytes = lp_end - lp_start; @@ -5986,7 +5986,7 @@ for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { - char* const req_addr = (char*) align_ptr_up(mapping1, alignment); + char* const req_addr = align_ptr_up(mapping1, alignment); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", size, alignment, p2i(req_addr), p2i(p), @@ -6006,7 +6006,7 @@ for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { - char* const req_addr = (char*) align_ptr_up(mapping2, alignment); + char* const req_addr = align_ptr_up(mapping2, alignment); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)"))); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os/posix/vm/os_posix.cpp --- a/hotspot/src/os/posix/vm/os_posix.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os/posix/vm/os_posix.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -158,7 +158,7 @@ } // Do manual alignment - char* aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); + char* aligned_base = align_ptr_up(extra_base, alignment); // [ | | ] // ^ extra_base diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os/solaris/vm/os_solaris.cpp --- a/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -235,7 +235,7 @@ } // base may not be page aligned address base = current_stack_base(); - address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; + address bottom = align_ptr_up(base - size, os::vm_page_size());; return (size_t)(base - bottom); } @@ -1122,7 +1122,7 @@ if (current_size == 0) current_size = 2 * K * K; stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; } - address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; + address bottom = align_ptr_up(base - stack_size, os::vm_page_size());; stack_size = (size_t)(base - bottom); assert(stack_size > 0, "Stack size calculation problem"); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os/windows/vm/os_windows.cpp --- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -2398,8 +2398,7 @@ (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { // Set memory to RWX and retry - address page_start = - (address) align_size_down((intptr_t) addr, (intptr_t) page_size); + address page_start = align_ptr_down(addr, page_size); bool res = os::protect_memory((char*) page_start, page_size, os::MEM_PROT_RWX); @@ -2833,7 +2832,7 @@ // we still need to round up to a page boundary (in case we are using large pages) // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) // instead we handle this in the bytes_to_rq computation below - p_buf = (char *) align_size_up((size_t)p_buf, page_size); + p_buf = align_ptr_up(p_buf, page_size); // now go through and allocate one chunk at a time until all bytes are // allocated @@ -2997,7 +2996,7 @@ return NULL; } // Do manual alignment - aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); + aligned_base = align_ptr_up(extra_base, alignment); os::release_memory(extra_base, extra_size); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp --- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -730,8 +730,7 @@ (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { // Set memory to RWX and retry - address page_start = - (address) align_size_down((intptr_t) addr, (intptr_t) page_size); + address page_start = align_ptr_down(addr, page_size); bool res = os::protect_memory((char*) page_start, page_size, os::MEM_PROT_RWX); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp --- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -546,8 +546,7 @@ (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { // Set memory to RWX and retry - address page_start = - (address) align_size_down((intptr_t) addr, (intptr_t) page_size); + address page_start = align_ptr_down(addr, page_size); bool res = os::protect_memory((char*) page_start, page_size, os::MEM_PROT_RWX); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp --- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -647,8 +647,7 @@ (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { // Make memory rwx and retry - address page_start = - (address) align_size_down((intptr_t) addr, (intptr_t) page_size); + address page_start = align_ptr_down(addr, page_size); bool res = os::protect_memory((char*) page_start, page_size, os::MEM_PROT_RWX); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/compiler/oopMap.cpp --- a/hotspot/src/share/vm/compiler/oopMap.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/compiler/oopMap.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -612,7 +612,7 @@ } int ImmutableOopMapBuilder::size_for(const OopMap* map) const { - return align_size_up(sizeof(ImmutableOopMap) + map->data_size(), 8); + return align_size_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8); } int ImmutableOopMapBuilder::heap_size() { @@ -668,7 +668,7 @@ address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap new (addr) ImmutableOopMap(map); - return align_size_up(sizeof(ImmutableOopMap) + map->data_size(), 8); + return size_for(map); } void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) { diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp --- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -513,7 +513,7 @@ // Adjust the chunk for the minimum size. This version is called in // most cases in CompactibleFreeListSpace methods. inline static size_t adjustObjectSize(size_t size) { - return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); + return align_object_size(MAX2(size, (size_t)MinChunkSize)); } // This is a virtual version of adjustObjectSize() that is called // only occasionally when the compaction space changes and the type diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp --- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -3219,9 +3219,7 @@ if (sp->used_region().contains(_restart_addr)) { // Align down to a card boundary for the start of 0th task // for this space. - aligned_start = - (HeapWord*)align_size_down((uintptr_t)_restart_addr, - CardTableModRefBS::card_size); + aligned_start = align_ptr_down(_restart_addr, CardTableModRefBS::card_size); } size_t chunk_size = sp->marking_task_size(); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp --- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -287,7 +287,7 @@ // Determine how far we are from the next card boundary. If it is smaller than // the minimum object size we can allocate into, expand into the next card. HeapWord* top = cur->top(); - HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, BOTConstants::N_bytes); + HeapWord* aligned_top = align_ptr_up(top, BOTConstants::N_bytes); size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/g1/g1Allocator.cpp --- a/hotspot/src/share/vm/gc/g1/g1Allocator.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1Allocator.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -436,7 +436,7 @@ // If an end alignment was requested, insert filler objects. if (end_alignment_in_bytes != 0) { HeapWord* currtop = _allocation_region->top(); - HeapWord* newtop = (HeapWord*)align_ptr_up(currtop, end_alignment_in_bytes); + HeapWord* newtop = align_ptr_up(currtop, end_alignment_in_bytes); size_t fill_size = pointer_delta(newtop, currtop); if (fill_size != 0) { if (fill_size < CollectedHeap::min_fill_size()) { @@ -445,8 +445,8 @@ // region boundary because the max supported alignment is smaller than the min // region size, and because the allocation code never leaves space smaller than // the min_fill_size at the top of the current allocation region. - newtop = (HeapWord*)align_ptr_up(currtop + CollectedHeap::min_fill_size(), - end_alignment_in_bytes); + newtop = align_ptr_up(currtop + CollectedHeap::min_fill_size(), + end_alignment_in_bytes); fill_size = pointer_delta(newtop, currtop); } HeapWord* fill = archive_mem_allocate(fill_size); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp --- a/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -131,7 +131,7 @@ void clear_card_bitmap_range(HeapWord* start, HeapWord* end) { BitMap::idx_t start_idx = card_live_bitmap_index_for(start); - BitMap::idx_t end_idx = card_live_bitmap_index_for((HeapWord*)align_ptr_up(end, CardTableModRefBS::card_size)); + BitMap::idx_t end_idx = card_live_bitmap_index_for(align_ptr_up(end, CardTableModRefBS::card_size)); _card_bm.clear_range(start_idx, end_idx); } @@ -139,7 +139,7 @@ // Mark the card liveness bitmap for the object spanning from start to end. void mark_card_bitmap_range(HeapWord* start, HeapWord* end) { BitMap::idx_t start_idx = card_live_bitmap_index_for(start); - BitMap::idx_t end_idx = card_live_bitmap_index_for((HeapWord*)align_ptr_up(end, CardTableModRefBS::card_size)); + BitMap::idx_t end_idx = card_live_bitmap_index_for(align_ptr_up(end, CardTableModRefBS::card_size)); assert((end_idx - start_idx) > 0, "Trying to mark zero sized range."); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp --- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -71,8 +71,7 @@ HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, const HeapWord* limit) const { // First we must round addr *up* to a possible object boundary. - addr = (HeapWord*)align_size_up((intptr_t)addr, - HeapWordSize << _shifter); + addr = align_ptr_up(addr, HeapWordSize << _shifter); size_t addrOffset = heapWordToOffset(addr); assert(limit != NULL, "limit must not be NULL"); size_t limitOffset = heapWordToOffset(limit); @@ -171,8 +170,8 @@ size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); - _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; - size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; + _max_chunk_capacity = align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; + size_t initial_chunk_capacity = align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; guarantee(initial_chunk_capacity <= _max_chunk_capacity, "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp --- a/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -141,7 +141,7 @@ void G1PageBasedVirtualSpace::commit_tail() { vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here"); - char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size); + char* const aligned_end_address = align_ptr_down(_high_boundary, _page_size); os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable, err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".", p2i(aligned_end_address), p2i(_high_boundary), _tail_size)); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp --- a/hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -504,9 +504,7 @@ } #ifdef ASSERT ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); - assert(cur_committed.start() == - (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), - os::vm_page_size()), + assert(cur_committed.start() == align_ptr_up(cur_committed.start(), os::vm_page_size()), "Starts should have proper alignment"); #endif @@ -586,8 +584,7 @@ jbyte* new_start = byte_for(new_region.start()); // Set the new start of the committed region HeapWord* new_start_aligned = - (HeapWord*)align_size_down((uintptr_t)new_start, - os::vm_page_size()); + (HeapWord*)align_ptr_down(new_start, os::vm_page_size()); MemRegion new_committed = MemRegion(new_start_aligned, _committed[changed_region].end()); _committed[changed_region] = new_committed; diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp --- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -996,8 +996,8 @@ size_t survivor_limit) { assert(survivor_limit >= _space_alignment, "survivor_limit too small"); - assert((size_t)align_size_down(survivor_limit, _space_alignment) - == survivor_limit, "survivor_limit not aligned"); + assert(is_size_aligned(survivor_limit, _space_alignment), + "survivor_limit not aligned"); // This method is called even if the tenuring threshold and survivor // spaces are not adjusted so that the averages are sampled above. diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/serial/defNewGeneration.cpp --- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -273,9 +273,9 @@ char *to_end = to_start + survivor_size; assert(to_end == _virtual_space.high(), "just checking"); - assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); - assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); - assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); + assert(Space::is_aligned(eden_start), "checking alignment"); + assert(Space::is_aligned(from_start), "checking alignment"); + assert(Space::is_aligned(to_start), "checking alignment"); MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp --- a/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -73,8 +73,8 @@ // interface, so it is "exactly precise" (if i may be allowed the adverbial // redundancy for emphasis) and does not include narrow oop slots not // included in the original write interval. - HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); - HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); + HeapWord* aligned_start = align_ptr_down(start, HeapWordSize); + HeapWord* aligned_end = align_ptr_up (end, HeapWordSize); // If compressed oops were not being used, these should already be aligned assert(UseCompressedOops || (aligned_start == start && aligned_end == end), "Expected heap word alignment of start and end"); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp --- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -212,10 +212,8 @@ } // Align the end up to a page size (starts are already aligned). jbyte* const new_end = byte_after(new_region.last()); - HeapWord* new_end_aligned = - (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); - assert(new_end_aligned >= (HeapWord*) new_end, - "align up, but less"); + HeapWord* new_end_aligned = (HeapWord*) align_ptr_up(new_end, _page_size); + assert((void*)new_end_aligned >= (void*) new_end, "align up, but less"); // Check the other regions (excludes "ind") to ensure that // the new_end_aligned does not intrude onto the committed // space of another region. @@ -370,8 +368,8 @@ void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { - assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); - assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); + assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); + assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); jbyte* cur = byte_for(mr.start()); jbyte* last = byte_after(mr.last()); while (cur < last) { @@ -381,8 +379,8 @@ } void CardTableModRefBS::invalidate(MemRegion mr) { - assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); - assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); + assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); + assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) dirty_MemRegion(mri); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp --- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -290,7 +290,9 @@ // Mapping from card marking array entry to address of first word HeapWord* addr_for(const jbyte* p) const { assert(p >= _byte_map && p < _byte_map + _byte_map_size, - "out of bounds access to card marking array"); + "out of bounds access to card marking array. p: " PTR_FORMAT + " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT, + p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size)); size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); HeapWord* result = (HeapWord*) (delta << card_shift); assert(_whole_heap.contains(result), diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/collectedHeap.cpp --- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -445,7 +445,7 @@ } size_t CollectedHeap::filler_array_hdr_size() { - return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long + return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long } size_t CollectedHeap::filler_array_min_size() { diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp --- a/hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -273,7 +273,7 @@ assert(is_size_aligned(alignment_in_bytes, HeapWordSize), "Alignment size %u is incorrect.", alignment_in_bytes); - HeapWord* new_addr = (HeapWord*) align_ptr_up(addr, alignment_in_bytes); + HeapWord* new_addr = align_ptr_up(addr, alignment_in_bytes); size_t padding = pointer_delta(new_addr, addr); if (padding == 0) { diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/collectorPolicy.cpp --- a/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -311,7 +311,7 @@ // Now take the actual NewSize into account. We will silently increase NewSize // if the user specified a smaller or unaligned value. size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize); - bounded_new_size = MAX2(smallest_new_size, (size_t)align_size_down(bounded_new_size, _gen_alignment)); + bounded_new_size = MAX2(smallest_new_size, align_size_down(bounded_new_size, _gen_alignment)); if (bounded_new_size != NewSize) { FLAG_SET_ERGO(size_t, NewSize, bounded_new_size); } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/plab.cpp --- a/hotspot/src/share/vm/gc/shared/plab.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/plab.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -136,7 +136,7 @@ // Calculates plab size for current number of gc worker threads. size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) { - return (size_t)align_object_size(MIN2(MAX2(min_size(), _desired_net_plab_sz / no_of_gc_workers), max_size())); + return align_object_size(MIN2(MAX2(min_size(), _desired_net_plab_sz / no_of_gc_workers), max_size())); } // Compute desired plab size for one gc worker thread and latch result for later diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/space.cpp --- a/hotspot/src/share/vm/gc/shared/space.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/space.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -689,7 +689,7 @@ size = align_object_size(size); const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); - if (size >= (size_t)align_object_size(array_header_size)) { + if (size >= align_object_size(array_header_size)) { size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); // allocate uninitialized int array typeArrayOop t = (typeArrayOop) allocate(size); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/gc/shared/space.hpp --- a/hotspot/src/share/vm/gc/shared/space.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/space.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -154,7 +154,7 @@ // Test whether p is double-aligned static bool is_aligned(void* p) { - return ((intptr_t)p & (sizeof(double)-1)) == 0; + return is_ptr_aligned(p, sizeof(double)); } // Size computations. Sizes are in bytes. diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/memory/metaspace.cpp --- a/hotspot/src/share/vm/memory/metaspace.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/memory/metaspace.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -497,7 +497,7 @@ // memory addresses don't conflict. if (DumpSharedSpaces) { bool large_pages = false; // No large pages when dumping the CDS archive. - char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); + char* shared_base = align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); if (_rs.is_reserved()) { @@ -3130,7 +3130,7 @@ // Aix: Search for a place where we can find memory. If we need to load // the base, 4G alignment is helpful, too. size_t increment = AARCH64_ONLY(4*)G; - for (char *a = (char*)align_ptr_up(requested_addr, increment); + for (char *a = align_ptr_up(requested_addr, increment); a < (char*)(1024*G); a += increment) { if (a == (char *)(32*G)) { @@ -3355,7 +3355,7 @@ #ifdef _LP64 if (using_class_space()) { char* cds_end = (char*)(cds_address + cds_total); - cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); + cds_end = align_ptr_up(cds_end, _reserve_alignment); // If UseCompressedClassPointers is set then allocate the metaspace area // above the heap and above the CDS area (if it exists). allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/memory/metaspaceShared.cpp --- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -84,7 +84,7 @@ assert(DumpSharedSpaces, "dump time only"); size_t alignment = sizeof(char*); num_bytes = align_size_up(num_bytes, alignment); - _alloc_top = (char*)align_ptr_up(_alloc_top, alignment); + _alloc_top = align_ptr_up(_alloc_top, alignment); if (_alloc_top + num_bytes > _vs.high()) { report_out_of_shared_space(_space_type); } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/memory/universe.cpp --- a/hotspot/src/share/vm/memory/universe.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/memory/universe.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -552,7 +552,7 @@ bool Universe::on_page_boundary(void* addr) { - return ((uintptr_t) addr) % os::vm_page_size() == 0; + return is_ptr_aligned(addr, os::vm_page_size()); } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/memory/virtualspace.cpp --- a/hotspot/src/share/vm/memory/virtualspace.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/memory/virtualspace.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -492,8 +492,8 @@ if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) { // Calc address range within we try to attach (range of possible start addresses). - char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); - char* const lowest_start = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment); + char* const highest_start = align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); + char* const lowest_start = align_ptr_up(aligned_heap_base_min_address, attach_point_alignment); try_reserve_range(highest_start, lowest_start, attach_point_alignment, aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large); } @@ -517,7 +517,7 @@ (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. // Calc address range within we try to attach (range of possible start addresses). - char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment); + char *const highest_start = align_ptr_down(zerobased_max - size, attach_point_alignment); // Need to be careful about size being guaranteed to be less // than UnscaledOopHeapMax due to type constraints. char *lowest_start = aligned_heap_base_min_address; @@ -525,7 +525,7 @@ if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large lowest_start = MAX2(lowest_start, (char*)unscaled_end); } - lowest_start = (char *)align_ptr_up(lowest_start, attach_point_alignment); + lowest_start = align_ptr_up(lowest_start, attach_point_alignment); try_reserve_range(highest_start, lowest_start, attach_point_alignment, aligned_heap_base_min_address, zerobased_max, size, alignment, large); } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/oops/array.hpp --- a/hotspot/src/share/vm/oops/array.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/oops/array.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -78,7 +78,7 @@ int length = (int)elements; - assert((size_t)size(length) * BytesPerWord == bytes, + assert((size_t)size(length) * BytesPerWord == (size_t)bytes, "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, bytes, (size_t)size(length) * BytesPerWord); @@ -122,7 +122,12 @@ void release_at_put(int which, T contents) { OrderAccess::release_store(adr_at(which), contents); } static int size(int length) { - return align_size_up(byte_sizeof(length), BytesPerWord) / BytesPerWord; + size_t bytes = align_size_up(byte_sizeof(length), BytesPerWord); + size_t words = bytes / BytesPerWord; + + assert(words <= INT_MAX, "Overflow: " SIZE_FORMAT, words); + + return (int)words; } int size() { diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/oops/constMethod.hpp --- a/hotspot/src/share/vm/oops/constMethod.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/oops/constMethod.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -360,7 +360,7 @@ // Sizing static int header_size() { - return align_size_up(sizeof(ConstMethod), wordSize) / wordSize; + return align_size_up((int)sizeof(ConstMethod), wordSize) / wordSize; } // Size needed diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/oops/constantPool.hpp --- a/hotspot/src/share/vm/oops/constantPool.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/oops/constantPool.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -756,7 +756,7 @@ // Sizing (in words) static int header_size() { - return align_size_up(sizeof(ConstantPool), wordSize) / wordSize; + return align_size_up((int)sizeof(ConstantPool), wordSize) / wordSize; } static int size(int length) { return align_metadata_size(header_size() + length); } int size() const { return size(length()); } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/oops/cpCache.hpp --- a/hotspot/src/share/vm/oops/cpCache.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/oops/cpCache.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -362,7 +362,7 @@ // Code generation support static WordSize size() { - return in_WordSize(align_size_up(sizeof(ConstantPoolCacheEntry), wordSize) / wordSize); + return in_WordSize(align_size_up((int)sizeof(ConstantPoolCacheEntry), wordSize) / wordSize); } static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); } static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/oops/instanceKlass.hpp --- a/hotspot/src/share/vm/oops/instanceKlass.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -100,7 +100,7 @@ // sizeof(OopMapBlock) in words. static const int size_in_words() { - return align_size_up(int(sizeof(OopMapBlock)), wordSize) >> + return align_size_up((int)sizeof(OopMapBlock), wordSize) >> LogBytesPerWord; } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/oops/method.hpp --- a/hotspot/src/share/vm/oops/method.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/oops/method.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -667,7 +667,7 @@ // sizing static int header_size() { - return align_size_up(sizeof(Method), wordSize) / wordSize; + return align_size_up((int)sizeof(Method), wordSize) / wordSize; } static int size(bool is_native); int size() const { return method_size(); } diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/oops/methodCounters.hpp --- a/hotspot/src/share/vm/oops/methodCounters.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/oops/methodCounters.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -118,7 +118,7 @@ AOT_ONLY(Method* method() const { return _method; }) static int size() { - return align_size_up(sizeof(MethodCounters), wordSize) / wordSize; + return align_size_up((int)sizeof(MethodCounters), wordSize) / wordSize; } void clear_counters(); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/opto/compile.cpp --- a/hotspot/src/share/vm/opto/compile.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/opto/compile.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -3903,7 +3903,7 @@ // Align size up to the next section start (which is insts; see // CodeBuffer::align_at_start). assert(_size == -1, "already set?"); - _size = align_size_up(offset, CodeEntryAlignment); + _size = align_size_up(offset, (int)CodeEntryAlignment); } void Compile::ConstantTable::emit(CodeBuffer& cb) { diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/prims/jvm.cpp --- a/hotspot/src/share/vm/prims/jvm.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/prims/jvm.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -663,7 +663,7 @@ // variants, and of the code generated by the inline_native_clone intrinsic. assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned"); Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop, - (size_t)align_object_size(size) / HeapWordsPerLong); + align_object_size(size) / HeapWordsPerLong); // Clear the header new_obj_oop->init_mark(); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/runtime/atomic.hpp --- a/hotspot/src/share/vm/runtime/atomic.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/runtime/atomic.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -153,7 +153,7 @@ jbyte compare_value, cmpxchg_memory_order order) { STATIC_ASSERT(sizeof(jbyte) == 1); volatile jint* dest_int = - static_cast(align_ptr_down(dest, sizeof(jint))); + reinterpret_cast(align_ptr_down(dest, sizeof(jint))); size_t offset = pointer_delta(dest, dest_int, 1); jint cur = *dest_int; jbyte* cur_as_bytes = reinterpret_cast(&cur); diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp --- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -607,7 +607,7 @@ } Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) { - size_t aligned_max = (size_t)align_size_down(max_uintx/2, Metaspace::reserve_alignment_words()); + size_t aligned_max = align_size_down(max_uintx/2, Metaspace::reserve_alignment_words()); if (value > aligned_max) { CommandLineError::print(verbose, "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be " diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/runtime/synchronizer.cpp --- a/hotspot/src/share/vm/runtime/synchronizer.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/runtime/synchronizer.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -1186,7 +1186,7 @@ void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); temp = (PaddedEnd *) - align_size_up((intptr_t)real_malloc_addr, + align_ptr_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); // NOTE: (almost) no way to recover if allocation failed. diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/runtime/thread.cpp --- a/hotspot/src/share/vm/runtime/thread.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/runtime/thread.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -172,7 +172,7 @@ void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC) : AllocateHeap(aligned_size, flags, CURRENT_PC, AllocFailStrategy::RETURN_NULL); - void* aligned_addr = (void*) align_size_up((intptr_t) real_malloc_addr, alignment); + void* aligned_addr = align_ptr_up(real_malloc_addr, alignment); assert(((uintptr_t) aligned_addr + (uintptr_t) size) <= ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size), "JavaThread alignment code overflowed allocated storage"); @@ -286,7 +286,7 @@ if (UseBiasedLocking) { assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed"); assert(this == _real_malloc_address || - this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment), + this == align_ptr_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment), "bug in forced alignment of thread objects"); } #endif // ASSERT diff -r 0330c5fc49ce -r d503911aa948 hotspot/src/share/vm/utilities/globalDefinitions.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Wed Apr 12 17:53:18 2017 +0200 @@ -518,51 +518,63 @@ #define align_size_up_(size, alignment) (align_size_down_((size) + align_mask(alignment), (alignment))) -inline bool is_size_aligned(size_t size, size_t alignment) { - return align_size_up_(size, alignment) == size; -} +#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment))) -inline bool is_ptr_aligned(const void* ptr, size_t alignment) { - return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr; -} +// Helpers to align sizes and check for alignment -inline intptr_t align_size_up(intptr_t size, intptr_t alignment) { +template +inline T align_size_up(T size, A alignment) { return align_size_up_(size, alignment); } -inline intptr_t align_size_down(intptr_t size, intptr_t alignment) { +template +inline T align_size_down(T size, A alignment) { return align_size_down_(size, alignment); } -#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment))) +template +inline bool is_size_aligned(T size, A alignment) { + return is_size_aligned_(size, alignment); +} -inline void* align_ptr_up(const void* ptr, size_t alignment) { - return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment); +// Align down with a lower bound. If the aligning results in 0, return 'alignment'. +template +inline T align_size_down_bounded(T size, A alignment) { + A aligned_size = align_size_down(size, alignment); + return aligned_size > 0 ? aligned_size : alignment; } -inline void* align_ptr_down(void* ptr, size_t alignment) { - return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment); +// Helpers to align pointers and check for alignment. + +template +inline T* align_ptr_up(T* ptr, A alignment) { + return (T*)align_size_up((uintptr_t)ptr, alignment); } -inline volatile void* align_ptr_down(volatile void* ptr, size_t alignment) { - return (volatile void*)align_size_down((intptr_t)ptr, (intptr_t)alignment); +template +inline T* align_ptr_down(T* ptr, A alignment) { + return (T*)align_size_down((uintptr_t)ptr, alignment); +} + +template +inline bool is_ptr_aligned(T* ptr, A alignment) { + return is_size_aligned((uintptr_t)ptr, alignment); } // Align metaspace objects by rounding up to natural word boundary - -inline intptr_t align_metadata_size(intptr_t size) { +template +inline T align_metadata_size(T size) { return align_size_up(size, 1); } // Align objects in the Java Heap by rounding up their size, in HeapWord units. -// Since the size is given in words this is somewhat of a nop, but -// distinguishes it from align_object_size. -inline intptr_t align_object_size(intptr_t size) { - return align_size_up(size, MinObjAlignment); +template +inline T align_object_size(T word_size) { + return align_size_up(word_size, MinObjAlignment); } -inline bool is_object_aligned(intptr_t addr) { - return addr == align_object_size(addr); +inline bool is_object_aligned(size_t word_size) { + return is_size_aligned(word_size, MinObjAlignment); } inline bool is_ptr_object_aligned(const void* addr) { @@ -570,32 +582,26 @@ } // Pad out certain offsets to jlong alignment, in HeapWord units. - -inline intptr_t align_object_offset(intptr_t offset) { +template +inline T align_object_offset(T offset) { return align_size_up(offset, HeapWordsPerLong); } -// Align down with a lower bound. If the aligning results in 0, return 'alignment'. - -inline size_t align_size_down_bounded(size_t size, size_t alignment) { - size_t aligned_size = align_size_down_(size, alignment); - return aligned_size > 0 ? aligned_size : alignment; -} - // Clamp an address to be within a specific page // 1. If addr is on the page it is returned as is // 2. If addr is above the page_address the start of the *next* page will be returned // 3. Otherwise, if addr is below the page_address the start of the page will be returned -inline address clamp_address_in_page(address addr, address page_address, intptr_t page_size) { - if (align_size_down(intptr_t(addr), page_size) == align_size_down(intptr_t(page_address), page_size)) { +template +inline T* clamp_address_in_page(T* addr, T* page_address, size_t page_size) { + if (align_ptr_down(addr, page_size) == align_ptr_down(page_address, page_size)) { // address is in the specified page, just return it as is return addr; } else if (addr > page_address) { // address is above specified page, return start of next page - return (address)align_size_down(intptr_t(page_address), page_size) + page_size; + return align_ptr_down(page_address, page_size) + page_size; } else { // address is below specified page, return start of page - return (address)align_size_down(intptr_t(page_address), page_size); + return align_ptr_down(page_address, page_size); } } diff -r 0330c5fc49ce -r d503911aa948 hotspot/test/native/utilities/test_align.cpp --- a/hotspot/test/native/utilities/test_align.cpp Wed Apr 12 13:05:59 2017 +0200 +++ b/hotspot/test/native/utilities/test_align.cpp Wed Apr 12 17:53:18 2017 +0200 @@ -45,8 +45,6 @@ std::numeric_limits::is_signed ? 's' : 'u', sizeof(T), (uint64_t)std::numeric_limits::max(), std::numeric_limits::is_signed ? 's' : 'u', sizeof(A), (uint64_t)std::numeric_limits::max()); - ASSERT_LE((uint64_t)std::numeric_limits::max(), (uint64_t)std::numeric_limits::max()) << "The test assumes that casting to intptr_t will not truncate bits"; - // Test all possible alignment values that fit in type A. for (A alignment = max_alignment(); alignment > 0; alignment >>= 1) { log("=== Alignment: " UINT64_FORMAT " ===\n", (uint64_t)alignment); @@ -62,11 +60,11 @@ T value = T(values[i]); // Check against uint64_t version - ASSERT_EQ(align_size_up(value, alignment), (intptr_t)up); + ASSERT_EQ(align_size_up((uint64_t)value, alignment), up); // Check inline function vs macro - ASSERT_EQ(align_size_up(value, alignment), (intptr_t)align_size_up_(value, alignment)); + ASSERT_EQ(align_size_up(value, alignment), align_size_up_(value, alignment)); // Sanity check - ASSERT_GE(align_size_up(value, alignment), (intptr_t)value); + ASSERT_GE(align_size_up(value, alignment), value); } // Test align down @@ -77,11 +75,11 @@ T value = T(values[i]); // Check against uint64_t version - ASSERT_EQ(align_size_down(value, alignment), (intptr_t)down); + ASSERT_EQ((uint64_t)align_size_down(value, alignment), down); // Check inline function vs macro - ASSERT_EQ(align_size_down(value, alignment), (intptr_t)align_size_down_(value, alignment)); + ASSERT_EQ(align_size_down(value, alignment), align_size_down_(value, alignment)); // Sanity check - ASSERT_LE(align_size_down(value, alignment), (intptr_t)value); + ASSERT_LE(align_size_down(value, alignment), value); } // Test is aligned @@ -103,10 +101,6 @@ TEST(Align, functions_and_macros) { // Test the alignment functions with different type combinations. - // The current implementation of the alignment functions use intptr_t - // as return and input parameter type. Therefore, we restrict the tested - // types on 32-bit platforms. -#ifdef _LP64 test_alignments(); test_alignments(); test_alignments(); @@ -121,7 +115,6 @@ test_alignments(); test_alignments(); test_alignments(); -#endif test_alignments(); test_alignments();