8178499: Remove _ptr_ and _size_ infixes from align functions
authorstefank
Tue, 04 Jul 2017 15:58:10 +0200
changeset 46619 a3919f5e8d2b
parent 46618 d503911aa948
child 46620 750c6edff33b
8178499: Remove _ptr_ and _size_ infixes from align functions Reviewed-by: rehn, tschatzl
hotspot/src/cpu/ppc/vm/c1_MacroAssembler_ppc.cpp
hotspot/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp
hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp
hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp
hotspot/src/cpu/x86/vm/bytes_x86.hpp
hotspot/src/cpu/zero/vm/interpreterFrame_zero.hpp
hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp
hotspot/src/cpu/zero/vm/stack_zero.cpp
hotspot/src/cpu/zero/vm/stack_zero.hpp
hotspot/src/os/aix/vm/misc_aix.cpp
hotspot/src/os/aix/vm/os_aix.cpp
hotspot/src/os/aix/vm/porting_aix.cpp
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/posix/vm/os_posix.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp
hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp
hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp
hotspot/src/share/vm/asm/codeBuffer.cpp
hotspot/src/share/vm/asm/codeBuffer.hpp
hotspot/src/share/vm/classfile/classFileParser.cpp
hotspot/src/share/vm/classfile/javaClasses.cpp
hotspot/src/share/vm/code/codeCache.cpp
hotspot/src/share/vm/compiler/oopMap.cpp
hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp
hotspot/src/share/vm/gc/g1/g1Allocator.cpp
hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp
hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp
hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.hpp
hotspot/src/share/vm/gc/g1/g1RegionToSpaceMapper.cpp
hotspot/src/share/vm/gc/g1/g1RemSet.cpp
hotspot/src/share/vm/gc/g1/ptrQueue.hpp
hotspot/src/share/vm/gc/g1/sparsePRT.hpp
hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp
hotspot/src/share/vm/gc/parallel/asPSOldGen.cpp
hotspot/src/share/vm/gc/parallel/asPSYoungGen.cpp
hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp
hotspot/src/share/vm/gc/parallel/mutableSpace.cpp
hotspot/src/share/vm/gc/parallel/objectStartArray.cpp
hotspot/src/share/vm/gc/parallel/parMarkBitMap.cpp
hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp
hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.hpp
hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp
hotspot/src/share/vm/gc/parallel/psOldGen.cpp
hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp
hotspot/src/share/vm/gc/parallel/psParallelCompact.inline.hpp
hotspot/src/share/vm/gc/parallel/psPromotionLAB.hpp
hotspot/src/share/vm/gc/parallel/psPromotionLAB.inline.hpp
hotspot/src/share/vm/gc/parallel/psYoungGen.cpp
hotspot/src/share/vm/gc/serial/defNewGeneration.cpp
hotspot/src/share/vm/gc/serial/defNewGeneration.hpp
hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp
hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp
hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp
hotspot/src/share/vm/gc/shared/collectedHeap.cpp
hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp
hotspot/src/share/vm/gc/shared/collectorPolicy.cpp
hotspot/src/share/vm/gc/shared/generationSpec.hpp
hotspot/src/share/vm/gc/shared/space.cpp
hotspot/src/share/vm/gc/shared/space.hpp
hotspot/src/share/vm/memory/allocation.inline.hpp
hotspot/src/share/vm/memory/filemap.cpp
hotspot/src/share/vm/memory/filemap.hpp
hotspot/src/share/vm/memory/heap.cpp
hotspot/src/share/vm/memory/metachunk.cpp
hotspot/src/share/vm/memory/metaspace.cpp
hotspot/src/share/vm/memory/metaspaceShared.cpp
hotspot/src/share/vm/memory/padded.hpp
hotspot/src/share/vm/memory/padded.inline.hpp
hotspot/src/share/vm/memory/universe.cpp
hotspot/src/share/vm/memory/virtualspace.cpp
hotspot/src/share/vm/oops/array.hpp
hotspot/src/share/vm/oops/arrayOop.hpp
hotspot/src/share/vm/oops/constMethod.cpp
hotspot/src/share/vm/oops/constMethod.hpp
hotspot/src/share/vm/oops/constantPool.hpp
hotspot/src/share/vm/oops/cpCache.hpp
hotspot/src/share/vm/oops/instanceKlass.hpp
hotspot/src/share/vm/oops/method.cpp
hotspot/src/share/vm/oops/method.hpp
hotspot/src/share/vm/oops/methodCounters.hpp
hotspot/src/share/vm/oops/methodData.cpp
hotspot/src/share/vm/oops/methodData.hpp
hotspot/src/share/vm/oops/objArrayOop.hpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/memnode.cpp
hotspot/src/share/vm/prims/whitebox.cpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/atomic.hpp
hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp
hotspot/src/share/vm/runtime/jniHandles.cpp
hotspot/src/share/vm/runtime/os.cpp
hotspot/src/share/vm/runtime/perfMemory.cpp
hotspot/src/share/vm/runtime/synchronizer.cpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/services/nmtCommon.hpp
hotspot/src/share/vm/utilities/bitMap.hpp
hotspot/src/share/vm/utilities/copy.cpp
hotspot/src/share/vm/utilities/globalDefinitions.hpp
hotspot/src/share/vm/utilities/stack.inline.hpp
hotspot/test/native/gc/shared/test_collectorPolicy.cpp
hotspot/test/native/memory/test_metachunk.cpp
hotspot/test/native/runtime/test_arguments.cpp
hotspot/test/native/utilities/test_align.cpp
--- a/hotspot/src/cpu/ppc/vm/c1_MacroAssembler_ppc.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/c1_MacroAssembler_ppc.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -340,7 +340,7 @@
   // Check for negative or excessive length.
   size_t max_length = max_array_allocation_length >> log2_elt_size;
   if (UseTLAB) {
-    size_t max_tlab = align_size_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
+    size_t max_tlab = align_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
     if (max_tlab < max_length) { max_length = max_tlab; }
   }
   load_const_optimized(t1, max_length);
--- a/hotspot/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -251,7 +251,7 @@
     fpu_reg_save_offsets[i] = sp_offset;
     sp_offset += BytesPerWord;
   }
-  frame_size_in_bytes = align_size_up(sp_offset, frame::alignment_in_bytes);
+  frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes);
 }
 
 
@@ -275,7 +275,7 @@
 static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target,
                                                             int stack_parms) {
   // Make a frame and preserve the caller's caller-save registers.
-  const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
+  const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
   const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
   OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
 
@@ -325,7 +325,7 @@
 static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target,
                                              int stack_parms, bool do_return = true) {
   // Make a frame and preserve the caller's caller-save registers.
-  const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
+  const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
   const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
   OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
 
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -626,7 +626,7 @@
           int spill_slots = 3;
           if (preserve1 != noreg) { spill_slots++; }
           if (preserve2 != noreg) { spill_slots++; }
-          const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
+          const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
           Label filtered;
 
           // Is marking active?
@@ -687,7 +687,7 @@
       case BarrierSet::G1SATBCTLogging:
         {
           int spill_slots = (preserve != noreg) ? 1 : 0;
-          const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
+          const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
 
           __ save_LR_CR(R0);
           __ push_frame(frame_size, R0);
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -697,7 +697,7 @@
   // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
   VM_Version::_is_determine_features_test_running = true;
   // We must align the first argument to 16 bytes because of the lqarx check.
-  (*test)(align_ptr_up(mid_of_test_area, 16), (uint64_t)0);
+  (*test)(align_up(mid_of_test_area, 16), (uint64_t)0);
   VM_Version::_is_determine_features_test_running = false;
 
   // determine which instructions are legal.
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -272,7 +272,7 @@
   // this should match assembler::total_frame_size_in_bytes, which
   // isn't callable from this context.  It's checked by an assert when
   // it's used though.
-  frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8);
+  frame_size_in_bytes = align_up(sp_offset * wordSize, 8);
 }
 
 
--- a/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -90,7 +90,7 @@
       }
       TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
   }else {
-    int const_size = align_size_up(_constants->end()-_constants->start(), CodeEntryAlignment);
+    int const_size = align_up(_constants->end()-_constants->start(), CodeEntryAlignment);
     NativeMovRegMem* load = nativeMovRegMem_at(pc);
     // This offset must match with SPARCLoadConstantTableBaseOp.emitCode
     load->set_offset(- (const_size - data_offset + Assembler::min_simm13()));
--- a/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -86,7 +86,7 @@
   void* end = static_cast<char*>(to) + size;
   if (size >= (size_t)BytesPerWord) {
     // Fill any partial word prefix.
-    uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
+    uintx* aligned_to = static_cast<uintx*>(align_up(to, BytesPerWord));
     fill_subword(to, aligned_to, value);
 
     // Compute fill word.
@@ -97,7 +97,7 @@
     xvalue |= (xvalue << 16);
     xvalue |= (xvalue << 32);
 
-    uintx* aligned_end = static_cast<uintx*>(align_ptr_down(end, BytesPerWord));
+    uintx* aligned_end = static_cast<uintx*>(align_down(end, BytesPerWord));
     assert(aligned_to <= aligned_end, "invariant");
 
     // for ( ; aligned_to < aligned_end; ++aligned_to) {
--- a/hotspot/src/cpu/x86/vm/bytes_x86.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/bytes_x86.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -43,7 +43,7 @@
 
     T x;
 
-    if (is_ptr_aligned(p, sizeof(T))) {
+    if (is_aligned(p, sizeof(T))) {
       x = *(T*)p;
     } else {
       memcpy(&x, p, sizeof(T));
@@ -56,7 +56,7 @@
   static inline void put_native(void* p, T x) {
     assert(p != NULL, "null pointer");
 
-    if (is_ptr_aligned(p, sizeof(T))) {
+    if (is_aligned(p, sizeof(T))) {
       *(T*)p = x;
     } else {
       memcpy(p, &x, sizeof(T));
--- a/hotspot/src/cpu/zero/vm/interpreterFrame_zero.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/interpreterFrame_zero.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -57,8 +57,8 @@
  protected:
   enum Layout {
     istate_off = jf_header_words +
-      (align_size_up_(sizeof(BytecodeInterpreter),
-                      wordSize) >> LogBytesPerWord) - 1,
+      (align_up_(sizeof(BytecodeInterpreter),
+                 wordSize) >> LogBytesPerWord) - 1,
     header_words
   };
 
--- a/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -148,7 +148,7 @@
   ZeroStack *stack = thread->zero_stack();
 
   int required_words =
-    (align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
+    (align_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
     (method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
 
   stack->overflow_check(required_words, CHECK_NULL);
--- a/hotspot/src/cpu/zero/vm/stack_zero.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/stack_zero.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -39,7 +39,7 @@
   assert(needs_setup(), "already set up");
   int abi_available = abi_stack_available(thread);
   assert(abi_available >= 0, "available abi stack must be >= 0");
-  return align_size_down(abi_available / 2, wordSize);
+  return align_down(abi_available / 2, wordSize);
 }
 
 void ZeroStack::handle_overflow(TRAPS) {
--- a/hotspot/src/cpu/zero/vm/stack_zero.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/stack_zero.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -88,7 +88,7 @@
   }
 
   void *alloc(size_t size) {
-    int count = align_size_up(size, wordSize) >> LogBytesPerWord;
+    int count = align_up(size, wordSize) >> LogBytesPerWord;
     assert(count <= available_words(), "stack overflow");
     return _sp -= count;
   }
--- a/hotspot/src/os/aix/vm/misc_aix.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/aix/vm/misc_aix.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -53,7 +53,7 @@
   if (!CanUseSafeFetch32()) {
     return true;
   }
-  int* const aligned = (int*) align_ptr_down(p, 4);
+  int* const aligned = (int*) align_down(p, 4);
   int cafebabe = 0xcafebabe;
   int deadbeef = 0xdeadbeef;
   return (SafeFetch32(aligned, cafebabe) != cafebabe) ||
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -1936,7 +1936,7 @@
   }
 
   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
-  const size_t size = align_size_up(bytes, 64*K);
+  const size_t size = align_up(bytes, 64*K);
 
   // Reserve the shared segment.
   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
@@ -2077,7 +2077,7 @@
   }
 
   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
-  const size_t size = align_size_up(bytes, os::vm_page_size());
+  const size_t size = align_up(bytes, os::vm_page_size());
 
   // alignment: Allocate memory large enough to include an aligned range of the right size and
   // cut off the leading and trailing waste pages.
@@ -2110,7 +2110,7 @@
   }
 
   // Handle alignment.
-  char* const addr_aligned = align_ptr_up(addr, alignment_hint);
+  char* const addr_aligned = align_up(addr, alignment_hint);
   const size_t waste_pre = addr_aligned - addr;
   char* const addr_aligned_end = addr_aligned + size;
   const size_t waste_post = extra_size - waste_pre - size;
@@ -2336,9 +2336,9 @@
   assert0(requested_addr == NULL);
 
   // Always round to os::vm_page_size(), which may be larger than 4K.
-  bytes = align_size_up(bytes, os::vm_page_size());
+  bytes = align_up(bytes, os::vm_page_size());
   const size_t alignment_hint0 =
-    alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
+    alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
 
   // In 4K mode always use mmap.
   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
@@ -2360,8 +2360,8 @@
   guarantee0(vmi);
 
   // Always round to os::vm_page_size(), which may be larger than 4K.
-  size = align_size_up(size, os::vm_page_size());
-  addr = align_ptr_up(addr, os::vm_page_size());
+  size = align_up(size, os::vm_page_size());
+  addr = align_up(addr, os::vm_page_size());
 
   bool rc = false;
   bool remove_bookkeeping = false;
@@ -2527,7 +2527,7 @@
   char* addr = NULL;
 
   // Always round to os::vm_page_size(), which may be larger than 4K.
-  bytes = align_size_up(bytes, os::vm_page_size());
+  bytes = align_up(bytes, os::vm_page_size());
 
   // In 4K mode always use mmap.
   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
@@ -4312,7 +4312,7 @@
   // We need to do this because caller code will assume stack low address is
   // page aligned and will place guard pages without checking.
   address low = bounds.base - bounds.size;
-  address low_aligned = (address)align_ptr_up(low, os::vm_page_size());
+  address low_aligned = (address)align_up(low, os::vm_page_size());
   size_t s = bounds.base - low_aligned;
   return s;
 }
--- a/hotspot/src/os/aix/vm/porting_aix.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/aix/vm/porting_aix.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -149,7 +149,7 @@
   codeptr_t pc2 = (codeptr_t) pc;
 
   // Make sure the pointer is word aligned.
-  pc2 = (codeptr_t) align_ptr_up((char*)pc2, 4);
+  pc2 = (codeptr_t) align_up((char*)pc2, 4);
   CHECK_POINTER_READABLE(pc2)
 
   // Find start of traceback table.
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -2272,7 +2272,7 @@
 
 static bool bsd_mprotect(char* addr, size_t size, int prot) {
   // Bsd wants the mprotect address argument to be page aligned.
-  char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
+  char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
 
   // According to SUSv3, mprotect() should only be used with mappings
   // established by mmap(), and mmap() always maps whole pages. Unaligned
@@ -2281,7 +2281,7 @@
   // caller if you hit this assert.
   assert(addr == bottom, "sanity check");
 
-  size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
+  size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
   return ::mprotect(bottom, size, prot) == 0;
 }
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -578,7 +578,7 @@
 
   // Adjust bottom to point to the largest address within the same page, it
   // gives us a one-page buffer if alloca() allocates slightly more memory.
-  bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size());
+  bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size());
   bottom += os::Linux::page_size() - 1;
 
   // sp might be slightly above current stack pointer; if that's the case, we
@@ -715,7 +715,7 @@
   if (stack_size <= SIZE_MAX - guard_size) {
     stack_size += guard_size;
   }
-  assert(is_size_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
+  assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
 
   int status = pthread_attr_setstacksize(&attr, stack_size);
   assert_status(status == 0, status, "pthread_attr_setstacksize");
@@ -1101,7 +1101,7 @@
   }
 
   // stack_top could be partially down the page so align it
-  stack_top = align_size_up(stack_top, page_size());
+  stack_top = align_up(stack_top, page_size());
 
   // Allowed stack value is minimum of max_size and what we derived from rlimit
   if (max_size > 0) {
@@ -1111,7 +1111,7 @@
     // clamp it at 8MB as we do on Solaris
     _initial_thread_stack_size = MIN2(stack_size, 8*M);
   }
-  _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
+  _initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size());
   _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
 
   assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
@@ -3170,7 +3170,7 @@
         start = NULL;
       }
     } else {
-      char* const start_aligned = align_ptr_up(start, alignment);
+      char* const start_aligned = align_up(start, alignment);
       char* const end_aligned = start_aligned + bytes;
       char* const end = start + extra_size;
       if (start_aligned > start) {
@@ -3200,7 +3200,7 @@
 
 static bool linux_mprotect(char* addr, size_t size, int prot) {
   // Linux wants the mprotect address argument to be page aligned.
-  char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
+  char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
 
   // According to SUSv3, mprotect() should only be used with mappings
   // established by mmap(), and mmap() always maps whole pages. Unaligned
@@ -3209,7 +3209,7 @@
   // caller if you hit this assert.
   assert(addr == bottom, "sanity check");
 
-  size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
+  size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
   return ::mprotect(bottom, size, prot) == 0;
 }
 
@@ -3244,7 +3244,7 @@
                  MAP_ANONYMOUS|MAP_PRIVATE,
                  -1, 0);
   if (p != MAP_FAILED) {
-    void *aligned_p = align_ptr_up(p, page_size);
+    void *aligned_p = align_up(p, page_size);
 
     result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
 
@@ -3487,9 +3487,9 @@
   } while (0)
 
 static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
-  assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment");
-
-  if (!is_size_aligned(alignment, SHMLBA)) {
+  assert(is_aligned(bytes, alignment), "Must be divisible by the alignment");
+
+  if (!is_aligned(alignment, SHMLBA)) {
     assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
     return NULL;
   }
@@ -3525,7 +3525,7 @@
 }
 
 static char* shmat_at_address(int shmid, char* req_addr) {
-  if (!is_ptr_aligned(req_addr, SHMLBA)) {
+  if (!is_aligned(req_addr, SHMLBA)) {
     assert(false, "Requested address needs to be SHMLBA aligned");
     return NULL;
   }
@@ -3543,8 +3543,8 @@
 static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
   // If a req_addr has been provided, we assume that the caller has already aligned the address.
   if (req_addr != NULL) {
-    assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
-    assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment");
+    assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
+    assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment");
     return shmat_at_address(shmid, req_addr);
   }
 
@@ -3553,7 +3553,7 @@
   // However, if the alignment is larger than the large page size, we have
   // to manually ensure that the memory returned is 'alignment' aligned.
   if (alignment > os::large_page_size()) {
-    assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
+    assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
     return shmat_with_alignment(shmid, bytes, alignment);
   } else {
     return shmat_at_address(shmid, NULL);
@@ -3565,10 +3565,10 @@
   // "exec" is passed in but not used.  Creating the shared image for
   // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseSHM, "only for SHM large pages");
-  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
-  assert(is_ptr_aligned(req_addr, alignment), "Unaligned address");
-
-  if (!is_size_aligned(bytes, os::large_page_size())) {
+  assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
+  assert(is_aligned(req_addr, alignment), "Unaligned address");
+
+  if (!is_aligned(bytes, os::large_page_size())) {
     return NULL; // Fallback to small pages.
   }
 
@@ -3627,8 +3627,8 @@
                                                         char* req_addr,
                                                         bool exec) {
   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
-  assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
-  assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+  assert(is_aligned(bytes, os::large_page_size()), "Unaligned size");
+  assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
 
   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   char* addr = (char*)::mmap(req_addr, bytes, prot,
@@ -3640,7 +3640,7 @@
     return NULL;
   }
 
-  assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
+  assert(is_aligned(addr, os::large_page_size()), "Must be");
 
   return addr;
 }
@@ -3659,8 +3659,8 @@
   size_t large_page_size = os::large_page_size();
   assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
 
-  assert(is_ptr_aligned(req_addr, alignment), "Must be");
-  assert(is_size_aligned(bytes, alignment), "Must be");
+  assert(is_aligned(req_addr, alignment), "Must be");
+  assert(is_aligned(bytes, alignment), "Must be");
 
   // First reserve - but not commit - the address range in small pages.
   char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
@@ -3669,17 +3669,17 @@
     return NULL;
   }
 
-  assert(is_ptr_aligned(start, alignment), "Must be");
+  assert(is_aligned(start, alignment), "Must be");
 
   char* end = start + bytes;
 
   // Find the regions of the allocated chunk that can be promoted to large pages.
-  char* lp_start = align_ptr_up(start, large_page_size);
-  char* lp_end   = align_ptr_down(end, large_page_size);
+  char* lp_start = align_up(start, large_page_size);
+  char* lp_end   = align_down(end, large_page_size);
 
   size_t lp_bytes = lp_end - lp_start;
 
-  assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
+  assert(is_aligned(lp_bytes, large_page_size), "Must be");
 
   if (lp_bytes == 0) {
     // The mapped region doesn't even span the start and the end of a large page.
@@ -3740,12 +3740,12 @@
                                                    char* req_addr,
                                                    bool exec) {
   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
-  assert(is_ptr_aligned(req_addr, alignment), "Must be");
-  assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be");
+  assert(is_aligned(req_addr, alignment), "Must be");
+  assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
   assert(is_power_of_2(os::large_page_size()), "Must be");
   assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
 
-  if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
+  if (is_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
     return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
   } else {
     return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
@@ -5967,12 +5967,12 @@
 
     for (int i = 0; i < num_sizes; i++) {
       const size_t size = sizes[i];
-      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+      for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " ->  " PTR_FORMAT " %s",
                  size, alignment, p2i(p), (p != NULL ? "" : "(failed)"));
         if (p != NULL) {
-          assert(is_ptr_aligned(p, alignment), "must be");
+          assert(is_aligned(p, alignment), "must be");
           small_page_write(p, size);
           os::Linux::release_memory_special_huge_tlbfs(p, size);
         }
@@ -5985,8 +5985,8 @@
 
     for (int i = 0; i < num_sizes; i++) {
       const size_t size = sizes[i];
-      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
-        char* const req_addr = align_ptr_up(mapping1, alignment);
+      for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
+        char* const req_addr = align_up(mapping1, alignment);
         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
                  size, alignment, p2i(req_addr), p2i(p),
@@ -6005,8 +6005,8 @@
 
     for (int i = 0; i < num_sizes; i++) {
       const size_t size = sizes[i];
-      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
-        char* const req_addr = align_ptr_up(mapping2, alignment);
+      for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
+        char* const req_addr = align_up(mapping2, alignment);
         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
                  size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)")));
@@ -6039,8 +6039,8 @@
     char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
 
     if (addr != NULL) {
-      assert(is_ptr_aligned(addr, alignment), "Check");
-      assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
+      assert(is_aligned(addr, alignment), "Check");
+      assert(is_aligned(addr, os::large_page_size()), "Check");
 
       small_page_write(addr, size);
 
@@ -6053,7 +6053,7 @@
     size_t ag = os::vm_allocation_granularity();
 
     for (size_t size = ag; size < lp * 3; size += ag) {
-      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+      for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
         test_reserve_memory_special_shm(size, alignment);
       }
     }
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -158,7 +158,7 @@
   }
 
   // Do manual alignment
-  char* aligned_base = align_ptr_up(extra_base, alignment);
+  char* aligned_base = align_up(extra_base, alignment);
 
   // [  |                                       |  ]
   // ^ extra_base
@@ -322,7 +322,7 @@
     julong lower_limit = min_allocation_size;
     while ((upper_limit - lower_limit) > min_allocation_size) {
       julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
-      temp_limit = align_size_down_(temp_limit, min_allocation_size);
+      temp_limit = align_down_(temp_limit, min_allocation_size);
       if (is_allocatable(temp_limit)) {
         lower_limit = temp_limit;
       } else {
@@ -1180,7 +1180,7 @@
                                    JavaThread::stack_guard_zone_size() +
                                    JavaThread::stack_shadow_zone_size();
 
-  _java_thread_min_stack_allowed = align_size_up(_java_thread_min_stack_allowed, vm_page_size());
+  _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
   _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
 
   size_t stack_size_in_bytes = ThreadStackSize * K;
@@ -1204,7 +1204,7 @@
                                        JavaThread::stack_guard_zone_size() +
                                        JavaThread::stack_shadow_zone_size();
 
-  _compiler_thread_min_stack_allowed = align_size_up(_compiler_thread_min_stack_allowed, vm_page_size());
+  _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
   _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
 
   stack_size_in_bytes = CompilerThreadStackSize * K;
@@ -1216,7 +1216,7 @@
     return JNI_ERR;
   }
 
-  _vm_internal_thread_min_stack_allowed = align_size_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
+  _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
   _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
 
   stack_size_in_bytes = VMThreadStackSize * K;
@@ -1276,9 +1276,9 @@
   // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
   // Be careful not to round up to 0. Align down in that case.
   if (stack_size <= SIZE_MAX - vm_page_size()) {
-    stack_size = align_size_up(stack_size, vm_page_size());
+    stack_size = align_up(stack_size, vm_page_size());
   } else {
-    stack_size = align_size_down(stack_size, vm_page_size());
+    stack_size = align_down(stack_size, vm_page_size());
   }
 
   return stack_size;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -235,7 +235,7 @@
   }
   // base may not be page aligned
   address base = current_stack_base();
-  address bottom = align_ptr_up(base - size, os::vm_page_size());;
+  address bottom = align_up(base - size, os::vm_page_size());;
   return (size_t)(base - bottom);
 }
 
@@ -1122,7 +1122,7 @@
       if (current_size == 0) current_size = 2 * K * K;
       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
     }
-    address bottom = align_ptr_up(base - stack_size, os::vm_page_size());;
+    address bottom = align_up(base - stack_size, os::vm_page_size());;
     stack_size = (size_t)(base - bottom);
 
     assert(stack_size > 0, "Stack size calculation problem");
@@ -2331,12 +2331,12 @@
 }
 
 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
-  assert(is_size_aligned(alignment, (size_t) vm_page_size()),
+  assert(is_aligned(alignment, (size_t) vm_page_size()),
          SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
          alignment, (size_t) vm_page_size());
 
   for (int i = 0; _page_sizes[i] != 0; i++) {
-    if (is_size_aligned(alignment, _page_sizes[i])) {
+    if (is_aligned(alignment, _page_sizes[i])) {
       return _page_sizes[i];
     }
   }
@@ -2348,7 +2348,7 @@
                                     size_t alignment_hint, bool exec) {
   int err = Solaris::commit_memory_impl(addr, bytes, exec);
   if (err == 0 && UseLargePages && alignment_hint > 0) {
-    assert(is_size_aligned(bytes, alignment_hint),
+    assert(is_aligned(bytes, alignment_hint),
            SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint);
 
     // The syscall memcntl requires an exact page size (see man memcntl for details).
@@ -2765,7 +2765,7 @@
 }
 
 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
-  assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
+  assert(addr == (char*)align_down((uintptr_t)addr, os::vm_page_size()),
          "addr must be page aligned");
   int retVal = mprotect(addr, bytes, prot);
   return retVal == 0;
@@ -2902,9 +2902,9 @@
 
 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
   assert(is_valid_page_size(align), SIZE_FORMAT " is not a valid page size", align);
-  assert(is_ptr_aligned((void*) start, align),
+  assert(is_aligned((void*) start, align),
          PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align);
-  assert(is_size_aligned(bytes, align),
+  assert(is_aligned(bytes, align),
          SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align);
 
   // Signal to OS that we want large pages for addresses
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -2386,7 +2386,7 @@
       bool pc_is_near_addr =
         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
       bool instr_spans_page_boundary =
-        (align_size_down((intptr_t) pc ^ (intptr_t) addr,
+        (align_down((intptr_t) pc ^ (intptr_t) addr,
                          (intptr_t) page_size) > 0);
 
       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@@ -2398,7 +2398,7 @@
             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
 
           // Set memory to RWX and retry
-          address page_start = align_ptr_down(addr, page_size);
+          address page_start = align_down(addr, page_size);
           bool res = os::protect_memory((char*) page_start, page_size,
                                         os::MEM_PROT_RWX);
 
@@ -2775,7 +2775,7 @@
 
   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
-  NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
+  NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
 
   if (numa_node_list_holder.build()) {
     if (log_is_enabled(Debug, os, cpu)) {
@@ -2832,12 +2832,12 @@
   // we still need to round up to a page boundary (in case we are using large pages)
   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
   // instead we handle this in the bytes_to_rq computation below
-  p_buf = align_ptr_up(p_buf, page_size);
+  p_buf = align_up(p_buf, page_size);
 
   // now go through and allocate one chunk at a time until all bytes are
   // allocated
   size_t  bytes_remaining = bytes;
-  // An overflow of align_size_up() would have been caught above
+  // An overflow of align_up() would have been caught above
   // in the calculation of size_of_reserve.
   char * next_alloc_addr = p_buf;
   HANDLE hProc = GetCurrentProcess();
@@ -2996,7 +2996,7 @@
       return NULL;
     }
     // Do manual alignment
-    aligned_base = align_ptr_up(extra_base, alignment);
+    aligned_base = align_up(extra_base, alignment);
 
     os::release_memory(extra_base, extra_size);
 
@@ -3065,7 +3065,7 @@
                                  bool exec) {
   assert(UseLargePages, "only for large pages");
 
-  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+  if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
     return NULL; // Fallback to small pages.
   }
 
@@ -4066,7 +4066,7 @@
                      JavaThread::stack_shadow_zone_size() +
                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
 
-  min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
+  min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
 
   if (actual_reserve_size < min_stack_allowed) {
     tty->print_cr("\nThe Java thread stack size specified is too small. "
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -718,7 +718,7 @@
     bool pc_is_near_addr =
       (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
     bool instr_spans_page_boundary =
-      (align_size_down((intptr_t) pc ^ (intptr_t) addr,
+      (align_down((intptr_t) pc ^ (intptr_t) addr,
                        (intptr_t) page_size) > 0);
 
     if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@@ -730,7 +730,7 @@
           (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
 
         // Set memory to RWX and retry
-        address page_start = align_ptr_down(addr, page_size);
+        address page_start = align_down(addr, page_size);
         bool res = os::protect_memory((char*) page_start, page_size,
                                       os::MEM_PROT_RWX);
 
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -534,7 +534,7 @@
     bool pc_is_near_addr =
       (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
     bool instr_spans_page_boundary =
-      (align_size_down((intptr_t) pc ^ (intptr_t) addr,
+      (align_down((intptr_t) pc ^ (intptr_t) addr,
                        (intptr_t) page_size) > 0);
 
     if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@@ -546,7 +546,7 @@
           (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
 
         // Set memory to RWX and retry
-        address page_start = align_ptr_down(addr, page_size);
+        address page_start = align_down(addr, page_size);
         bool res = os::protect_memory((char*) page_start, page_size,
                                       os::MEM_PROT_RWX);
 
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -350,7 +350,7 @@
   if (res != 0) {
     fatal("pthread_attr_getguardsize failed with errno = %d", res);
   }
-  int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
+  int guard_pages = align_up(guard_bytes, page_bytes) / page_bytes;
   assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
 
 #ifdef IA64
@@ -361,7 +361,7 @@
   // there's nothing to stop us allocating more to the normal stack
   // or more to the register stack if one or the other were found
   // to grow faster.
-  int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
+  int total_pages = align_down(stack_bytes, page_bytes) / page_bytes;
   stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
 #endif // IA64
 
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -635,7 +635,7 @@
     bool pc_is_near_addr =
       (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
     bool instr_spans_page_boundary =
-      (align_size_down((intptr_t) pc ^ (intptr_t) addr,
+      (align_down((intptr_t) pc ^ (intptr_t) addr,
                        (intptr_t) page_size) > 0);
 
     if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@@ -647,7 +647,7 @@
           (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
 
         // Make memory rwx and retry
-        address page_start = align_ptr_down(addr, page_size);
+        address page_start = align_down(addr, page_size);
         bool res = os::protect_memory((char*) page_start, page_size,
                                       os::MEM_PROT_RWX);
 
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -624,7 +624,7 @@
 
 csize_t CodeBuffer::total_relocation_size() const {
   csize_t total = copy_relocations_to(NULL);  // dry run only
-  return (csize_t) align_size_up(total, HeapWordSize);
+  return (csize_t) align_up(total, HeapWordSize);
 }
 
 csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const {
@@ -769,7 +769,7 @@
     CodeSection* dest_cs = dest->code_section(n);
     assert(cs->size() == dest_cs->size(), "sanity");
     csize_t usize = dest_cs->size();
-    csize_t wsize = align_size_up(usize, HeapWordSize);
+    csize_t wsize = align_up(usize, HeapWordSize);
     assert(dest_cs->start() + wsize <= dest_end, "no overflow");
     // Copy the code as aligned machine words.
     // This may also include an uninitialized partial word at the end.
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -227,7 +227,7 @@
   // Slop between sections, used only when allocating temporary BufferBlob buffers.
   static csize_t end_slop()         { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
 
-  csize_t align_at_start(csize_t off) const { return (csize_t) align_size_up(off, alignment()); }
+  csize_t align_at_start(csize_t off) const { return (csize_t) align_up(off, alignment()); }
 
   // Mark a section frozen.  Assign its remaining space to
   // the following section.  It will never expand after this point.
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -3714,7 +3714,7 @@
   if ( fac->count[STATIC_DOUBLE] &&
        (Universe::field_type_should_be_aligned(T_DOUBLE) ||
         Universe::field_type_should_be_aligned(T_LONG)) ) {
-    next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
+    next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
   }
 
   int next_static_word_offset   = next_static_double_offset +
@@ -3856,7 +3856,7 @@
   // long/double alignment.
   if (nonstatic_double_count > 0) {
     int offset = next_nonstatic_double_offset;
-    next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
+    next_nonstatic_double_offset = align_up(offset, BytesPerLong);
     if (compact_fields && offset != next_nonstatic_double_offset) {
       // Allocate available fields into the gap before double field.
       int length = next_nonstatic_double_offset - offset;
@@ -3906,7 +3906,7 @@
   if( allocation_style == 1 ) {
     next_nonstatic_oop_offset = next_nonstatic_padded_offset;
     if( nonstatic_oop_count > 0 ) {
-      next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
+      next_nonstatic_oop_offset = align_up(next_nonstatic_oop_offset, heapOopSize);
     }
     next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
   }
@@ -4061,31 +4061,31 @@
 
         switch (atype) {
           case NONSTATIC_BYTE:
-            next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, 1);
+            next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, 1);
             real_offset = next_nonstatic_padded_offset;
             next_nonstatic_padded_offset += 1;
             break;
 
           case NONSTATIC_SHORT:
-            next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerShort);
+            next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerShort);
             real_offset = next_nonstatic_padded_offset;
             next_nonstatic_padded_offset += BytesPerShort;
             break;
 
           case NONSTATIC_WORD:
-            next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerInt);
+            next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerInt);
             real_offset = next_nonstatic_padded_offset;
             next_nonstatic_padded_offset += BytesPerInt;
             break;
 
           case NONSTATIC_DOUBLE:
-            next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerLong);
+            next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerLong);
             real_offset = next_nonstatic_padded_offset;
             next_nonstatic_padded_offset += BytesPerLong;
             break;
 
           case NONSTATIC_OOP:
-            next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, heapOopSize);
+            next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize);
             real_offset = next_nonstatic_padded_offset;
             next_nonstatic_padded_offset += heapOopSize;
 
@@ -4147,9 +4147,9 @@
 
   int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
 
-  int nonstatic_fields_end      = align_size_up(notaligned_nonstatic_fields_end, heapOopSize);
-  int instance_end              = align_size_up(notaligned_nonstatic_fields_end, wordSize);
-  int static_fields_end         = align_size_up(next_static_byte_offset, wordSize);
+  int nonstatic_fields_end      = align_up(notaligned_nonstatic_fields_end, heapOopSize);
+  int instance_end              = align_up(notaligned_nonstatic_fields_end, wordSize);
+  int static_fields_end         = align_up(next_static_byte_offset, wordSize);
 
   int static_field_size         = (static_fields_end -
                                    InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
@@ -4158,7 +4158,7 @@
 
   int instance_size             = align_object_size(instance_end / wordSize);
 
-  assert(instance_size == align_object_size(align_size_up(
+  assert(instance_size == align_object_size(align_up(
          (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
           wordSize) / wordSize), "consistent layout helper value");
 
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -3815,7 +3815,7 @@
 
   // java_lang_boxing_object
   java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset + header;
-  java_lang_boxing_object::long_value_offset = align_size_up((java_lang_boxing_object::hc_value_offset + header), BytesPerLong);
+  java_lang_boxing_object::long_value_offset = align_up((java_lang_boxing_object::hc_value_offset + header), BytesPerLong);
 
   // java_lang_ref_Reference:
   java_lang_ref_Reference::referent_offset = java_lang_ref_Reference::hc_referent_offset * x + header;
@@ -3827,7 +3827,7 @@
   java_lang_ref_Reference::number_of_fake_oop_fields = 1;
 
   // java_lang_ref_SoftReference Class
-  java_lang_ref_SoftReference::timestamp_offset = align_size_up((java_lang_ref_SoftReference::hc_timestamp_offset * x + header), BytesPerLong);
+  java_lang_ref_SoftReference::timestamp_offset = align_up((java_lang_ref_SoftReference::hc_timestamp_offset * x + header), BytesPerLong);
   // Don't multiply static fields because they are always in wordSize units
   java_lang_ref_SoftReference::static_clock_offset = java_lang_ref_SoftReference::hc_static_clock_offset * x;
 
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -281,8 +281,8 @@
 
   // Align CodeHeaps
   size_t alignment = heap_alignment();
-  non_nmethod_size = align_size_up(non_nmethod_size, alignment);
-  profiled_size   = align_size_down(profiled_size, alignment);
+  non_nmethod_size = align_up(non_nmethod_size, alignment);
+  profiled_size   = align_down(profiled_size, alignment);
 
   // Reserve one continuous chunk of memory for CodeHeaps and split it into
   // parts for the individual heaps. The memory layout looks like this:
@@ -322,7 +322,7 @@
           os::vm_page_size();
   const size_t granularity = os::vm_allocation_granularity();
   const size_t r_align = MAX2(page_size, granularity);
-  const size_t r_size = align_size_up(size, r_align);
+  const size_t r_size = align_up(size, r_align);
   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
     MAX2(page_size, granularity);
 
--- a/hotspot/src/share/vm/compiler/oopMap.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -612,16 +612,16 @@
 }
 
 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
-  return align_size_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
+  return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
 }
 
 int ImmutableOopMapBuilder::heap_size() {
   int base = sizeof(ImmutableOopMapSet);
-  base = align_size_up(base, 8);
+  base = align_up(base, 8);
 
   // all of ours pc / offset pairs
   int pairs = _set->size() * sizeof(ImmutableOopMapPair);
-  pairs = align_size_up(pairs, 8);
+  pairs = align_up(pairs, 8);
 
   for (int i = 0; i < _set->size(); ++i) {
     int size = 0;
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -64,7 +64,7 @@
 
   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
   // for chunks to contain a FreeChunk.
-  size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
+  size_t min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
 
   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
@@ -2873,8 +2873,7 @@
     if (span.contains(low)) {
       // Align low down to  a card boundary so that
       // we can use block_offset_careful() on span boundaries.
-      HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
-                                 CardTableModRefBS::card_size);
+      HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
       // Clip span prefix at aligned_low
       span = span.intersection(MemRegion(aligned_low, span.end()));
     } else if (low > span.end()) {
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -3219,7 +3219,7 @@
   if (sp->used_region().contains(_restart_addr)) {
     // Align down to a card boundary for the start of 0th task
     // for this space.
-    aligned_start = align_ptr_down(_restart_addr, CardTableModRefBS::card_size);
+    aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
   }
 
   size_t chunk_size = sp->marking_task_size();
--- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -287,7 +287,7 @@
     // Determine how far we are from the next card boundary. If it is smaller than
     // the minimum object size we can allocate into, expand into the next card.
     HeapWord* top = cur->top();
-    HeapWord* aligned_top = align_ptr_up(top, BOTConstants::N_bytes);
+    HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes);
 
     size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
 
--- a/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -425,7 +425,7 @@
                                           size_t end_alignment_in_bytes) {
   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
-  assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
+  assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
 
   // If we've allocated nothing, simply return.
@@ -436,7 +436,7 @@
   // If an end alignment was requested, insert filler objects.
   if (end_alignment_in_bytes != 0) {
     HeapWord* currtop = _allocation_region->top();
-    HeapWord* newtop = align_ptr_up(currtop, end_alignment_in_bytes);
+    HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
     size_t fill_size = pointer_delta(newtop, currtop);
     if (fill_size != 0) {
       if (fill_size < CollectedHeap::min_fill_size()) {
@@ -445,8 +445,8 @@
         // region boundary because the max supported alignment is smaller than the min
         // region size, and because the allocation code never leaves space smaller than
         // the min_fill_size at the top of the current allocation region.
-        newtop = align_ptr_up(currtop + CollectedHeap::min_fill_size(),
-                              end_alignment_in_bytes);
+        newtop = align_up(currtop + CollectedHeap::min_fill_size(),
+                          end_alignment_in_bytes);
         fill_size = pointer_delta(newtop, currtop);
       }
       HeapWord* fill = archive_mem_allocate(fill_size);
--- a/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CardLiveData.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -131,7 +131,7 @@
 
   void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
     BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_ptr_up(end, CardTableModRefBS::card_size));
+    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
 
     _card_bm.clear_range(start_idx, end_idx);
   }
@@ -139,7 +139,7 @@
   // Mark the card liveness bitmap for the object spanning from start to end.
   void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
     BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_ptr_up(end, CardTableModRefBS::card_size));
+    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
 
     assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
 
@@ -423,7 +423,7 @@
 void G1CardLiveData::clear(WorkGang* workers) {
   guarantee(Universe::is_fully_initialized(), "Should not call this during initialization.");
 
-  size_t const num_chunks = align_size_up(live_cards_bm().size_in_bytes(), G1ClearCardLiveDataTask::chunk_size()) / G1ClearCardLiveDataTask::chunk_size();
+  size_t const num_chunks = align_up(live_cards_bm().size_in_bytes(), G1ClearCardLiveDataTask::chunk_size()) / G1ClearCardLiveDataTask::chunk_size();
   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 
   G1ClearCardLiveDataTask cl(live_cards_bm(), num_chunks);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -372,7 +372,7 @@
 
 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
-  return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
+  return align_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 }
 
 // If could fit into free regions w/o expansion, try.
@@ -1606,7 +1606,7 @@
 
 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
-  aligned_expand_bytes = align_size_up(aligned_expand_bytes,
+  aligned_expand_bytes = align_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
 
   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
@@ -1647,7 +1647,7 @@
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
   size_t aligned_shrink_bytes =
     ReservedSpace::page_align_size_down(shrink_bytes);
-  aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
+  aligned_shrink_bytes = align_down(aligned_shrink_bytes,
                                          HeapRegion::GrainBytes);
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
@@ -2435,7 +2435,7 @@
 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
 // must be equal to the humongous object limit.
 size_t G1CollectedHeap::max_tlab_size() const {
-  return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
+  return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
 }
 
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -71,7 +71,7 @@
 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
                                                  const HeapWord* limit) const {
   // First we must round addr *up* to a possible object boundary.
-  addr = align_ptr_up(addr, HeapWordSize << _shifter);
+  addr = align_up(addr, HeapWordSize << _shifter);
   size_t addrOffset = heapWordToOffset(addr);
   assert(limit != NULL, "limit must not be NULL");
   size_t limitOffset = heapWordToOffset(limit);
@@ -170,8 +170,8 @@
 
   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 
-  _max_chunk_capacity = align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
-  size_t initial_chunk_capacity = align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
+  _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
+  size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 
   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
@@ -714,7 +714,7 @@
   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 
   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
-  size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
+  size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 
   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 
--- a/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -44,13 +44,13 @@
   vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
   vmassert(page_size > 0, "Page size must be non-zero.");
 
-  guarantee(is_ptr_aligned(rs.base(), page_size),
+  guarantee(is_aligned(rs.base(), page_size),
             "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
-  guarantee(is_size_aligned(used_size, os::vm_page_size()),
+  guarantee(is_aligned(used_size, os::vm_page_size()),
             "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
   guarantee(used_size <= rs.size(),
             "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
-  guarantee(is_size_aligned(rs.size(), page_size),
+  guarantee(is_aligned(rs.size(), page_size),
             "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);
 
   _low_boundary  = rs.base();
@@ -141,7 +141,7 @@
 void G1PageBasedVirtualSpace::commit_tail() {
   vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
 
-  char* const aligned_end_address = align_ptr_down(_high_boundary, _page_size);
+  char* const aligned_end_address = align_down(_high_boundary, _page_size);
   os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
                             err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
                             p2i(aligned_end_address), p2i(_high_boundary), _tail_size));
--- a/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -100,7 +100,7 @@
   // Is the given page index the first after last page?
   bool is_after_last_page(size_t index) const;
   // Is the last page only partially covered by this space?
-  bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); }
+  bool is_last_page_partial() const { return !is_aligned(_high_boundary, _page_size); }
   // Returns the end address of the given page bounded by the reserved space.
   char* bounded_end_addr(size_t end_page) const;
 
--- a/hotspot/src/share/vm/gc/g1/g1RegionToSpaceMapper.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RegionToSpaceMapper.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -111,7 +111,7 @@
     _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
 
     guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
-    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
+    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size);
   }
 
   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -256,7 +256,7 @@
       return;
     }
 
-    size_t const num_chunks = align_size_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size();
+    size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size();
     uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
     size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion;
 
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -79,7 +79,7 @@
   }
 
   static size_t byte_index_to_index(size_t ind) {
-    assert(is_size_aligned(ind, _element_size), "precondition");
+    assert(is_aligned(ind, _element_size), "precondition");
     return ind / _element_size;
   }
 
--- a/hotspot/src/share/vm/gc/g1/sparsePRT.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/sparsePRT.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -68,7 +68,7 @@
   static size_t size() { return sizeof(SparsePRTEntry) + sizeof(card_elem_t) * (cards_num() - card_array_alignment); }
   // Returns the size of the card array.
   static int cards_num() {
-    return align_size_up(G1RSetSparseRegionEntries, card_array_alignment);
+    return align_up(G1RSetSparseRegionEntries, card_array_alignment);
   }
 
   // Set the region_ind to the given value, and delete all cards.
--- a/hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -161,7 +161,7 @@
   const size_t alignment = virtual_spaces()->alignment();
   size_t change_in_bytes = MIN3(young_gen_available,
                                 old_gen_available,
-                                align_size_up_(expand_in_bytes, alignment));
+                                align_up_(expand_in_bytes, alignment));
 
   if (change_in_bytes == 0) {
     return;
@@ -203,7 +203,7 @@
   const size_t alignment = virtual_spaces()->alignment();
   size_t change_in_bytes = MIN3(young_gen_available,
                                 old_gen_available,
-                                align_size_up_(expand_in_bytes, alignment));
+                                align_up_(expand_in_bytes, alignment));
 
   if (change_in_bytes == 0) {
     return false;
--- a/hotspot/src/share/vm/gc/parallel/asPSOldGen.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/asPSOldGen.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -91,7 +91,7 @@
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   size_t result =  gen_size_limit() - virtual_space()->committed_size();
-  size_t result_aligned = align_size_down(result, heap->generation_alignment());
+  size_t result_aligned = align_down(result, heap->generation_alignment());
   return result_aligned;
 }
 
@@ -106,7 +106,7 @@
   PSAdaptiveSizePolicy* policy = heap->size_policy();
   const size_t working_size =
     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
-  const size_t working_aligned = align_size_up(working_size, gen_alignment);
+  const size_t working_aligned = align_up(working_size, gen_alignment);
   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
   if (working_or_min > reserved().byte_size()) {
     // If the used or minimum gen size (aligned up) is greater
@@ -124,7 +124,7 @@
 
   size_t result = policy->promo_increment_aligned_down(max_contraction);
   // Also adjust for inter-generational alignment
-  size_t result_aligned = align_size_down(result, gen_alignment);
+  size_t result_aligned = align_down(result, gen_alignment);
 
   Log(gc, ergo) log;
   if (log.is_trace()) {
--- a/hotspot/src/share/vm/gc/parallel/asPSYoungGen.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/asPSYoungGen.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -75,7 +75,7 @@
     "generation size limit is wrong");
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   size_t result =  gen_size_limit() - current_committed_size;
-  size_t result_aligned = align_size_down(result, heap->generation_alignment());
+  size_t result_aligned = align_down(result, heap->generation_alignment());
   return result_aligned;
 }
 
@@ -98,7 +98,7 @@
     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
       "Alignment is wrong");
     size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
-    eden_avail = align_size_down(eden_avail, gen_alignment);
+    eden_avail = align_down(eden_avail, gen_alignment);
 
     assert(virtual_space()->committed_size() >= min_gen_size(),
       "minimum gen size is wrong");
@@ -110,7 +110,7 @@
     // for reasons the "increment" fraction is used.
     PSAdaptiveSizePolicy* policy = heap->size_policy();
     size_t result = policy->eden_increment_aligned_down(max_contraction);
-    size_t result_aligned = align_size_down(result, gen_alignment);
+    size_t result_aligned = align_down(result, gen_alignment);
 
     log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
     log_trace(gc, ergo)("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
@@ -166,7 +166,7 @@
 
   // Adjust new generation size
   const size_t eden_plus_survivors =
-    align_size_up(eden_size + 2 * survivor_size, alignment);
+    align_up(eden_size + 2 * survivor_size, alignment);
   size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
                              min_gen_size());
   assert(desired_size <= gen_size_limit(), "just checking");
@@ -332,7 +332,7 @@
       if (from_size == 0) {
         from_size = alignment;
       } else {
-        from_size = align_size_up(from_size, alignment);
+        from_size = align_up(from_size, alignment);
       }
 
       from_end = from_start + from_size;
@@ -419,9 +419,9 @@
             "from start moved to the right");
   guarantee((HeapWord*)from_end >= from_space()->top(),
             "from end moved into live data");
-  assert(is_ptr_object_aligned(eden_start), "checking alignment");
-  assert(is_ptr_object_aligned(from_start), "checking alignment");
-  assert(is_ptr_object_aligned(to_start), "checking alignment");
+  assert(is_object_aligned(eden_start), "checking alignment");
+  assert(is_object_aligned(from_start), "checking alignment");
+  assert(is_object_aligned(to_start), "checking alignment");
 
   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
--- a/hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -504,14 +504,14 @@
   }
 #ifdef ASSERT
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  assert(cur_committed.start() == align_ptr_up(cur_committed.start(), os::vm_page_size()),
+  assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
     "Starts should have proper alignment");
 #endif
 
   jbyte* new_start = byte_for(new_region.start());
   // Round down because this is for the start address
   HeapWord* new_start_aligned =
-    (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
+    (HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
   // The guard page is always committed and should not be committed over.
   // This method is used in cases where the generation is growing toward
   // lower addresses but the guard region is still at the end of the
@@ -584,7 +584,7 @@
   jbyte* new_start = byte_for(new_region.start());
   // Set the new start of the committed region
   HeapWord* new_start_aligned =
-    (HeapWord*)align_ptr_down(new_start, os::vm_page_size());
+    (HeapWord*)align_down(new_start, os::vm_page_size());
   MemRegion new_committed = MemRegion(new_start_aligned,
     _committed[changed_region].end());
   _committed[changed_region] = new_committed;
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -177,7 +177,7 @@
   if (pointer_delta(end(), obj) >= size) {
     HeapWord* new_top = obj + size;
     set_top(new_top);
-    assert(is_ptr_object_aligned(obj) && is_ptr_object_aligned(new_top),
+    assert(is_object_aligned(obj) && is_object_aligned(new_top),
            "checking alignment");
     return obj;
   } else {
@@ -198,7 +198,7 @@
       if (result != obj) {
         continue; // another thread beat us to the allocation, try again
       }
-      assert(is_ptr_object_aligned(obj) && is_ptr_object_aligned(new_top),
+      assert(is_object_aligned(obj) && is_object_aligned(new_top),
              "checking alignment");
       return obj;
     } else {
--- a/hotspot/src/share/vm/gc/parallel/objectStartArray.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/objectStartArray.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -43,7 +43,7 @@
   assert(bytes_to_reserve > 0, "Sanity");
 
   bytes_to_reserve =
-    align_size_up(bytes_to_reserve, os::vm_allocation_granularity());
+    align_up(bytes_to_reserve, os::vm_allocation_granularity());
 
   // Do not use large-pages for the backing store. The one large page region
   // will be used for the heap proper.
@@ -89,7 +89,7 @@
 
   // Only commit memory in page sized chunks
   requested_blocks_size_in_bytes =
-    align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());
+    align_up(requested_blocks_size_in_bytes, os::vm_page_size());
 
   _covered_region = mr;
 
--- a/hotspot/src/share/vm/gc/parallel/parMarkBitMap.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/parMarkBitMap.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -44,7 +44,7 @@
   const size_t raw_bytes = words * sizeof(idx_t);
   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
-  _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
+  _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 
   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
     MAX2(page_sz, granularity);
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -370,10 +370,10 @@
   }
 
   // Align everything and make a final limit check
-  desired_eden_size  = align_size_up(desired_eden_size, _space_alignment);
+  desired_eden_size  = align_up(desired_eden_size, _space_alignment);
   desired_eden_size  = MAX2(desired_eden_size, _space_alignment);
 
-  eden_limit  = align_size_down(eden_limit, _space_alignment);
+  eden_limit  = align_down(eden_limit, _space_alignment);
 
   // And one last limit check, now that we've aligned things.
   if (desired_eden_size > eden_limit) {
@@ -547,10 +547,10 @@
   }
 
   // Align everything and make a final limit check
-  desired_promo_size = align_size_up(desired_promo_size, _space_alignment);
+  desired_promo_size = align_up(desired_promo_size, _space_alignment);
   desired_promo_size = MAX2(desired_promo_size, _space_alignment);
 
-  promo_limit = align_size_down(promo_limit, _space_alignment);
+  promo_limit = align_down(promo_limit, _space_alignment);
 
   // And one last limit check, now that we've aligned things.
   desired_promo_size = MIN2(desired_promo_size, promo_limit);
@@ -925,24 +925,24 @@
 
 size_t PSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) {
   size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement);
-  return align_size_up(result, _space_alignment);
+  return align_up(result, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::eden_increment_aligned_down(size_t cur_eden) {
   size_t result = eden_increment(cur_eden);
-  return align_size_down(result, _space_alignment);
+  return align_down(result, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up(
   size_t cur_eden) {
   size_t result = eden_increment(cur_eden,
     YoungGenerationSizeIncrement + _young_gen_size_increment_supplement);
-  return align_size_up(result, _space_alignment);
+  return align_up(result, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) {
   size_t eden_heap_delta = eden_decrement(cur_eden);
-  return align_size_down(eden_heap_delta, _space_alignment);
+  return align_down(eden_heap_delta, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
@@ -964,24 +964,24 @@
 
 size_t PSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
   size_t result =  promo_increment(cur_promo, TenuredGenerationSizeIncrement);
-  return align_size_up(result, _space_alignment);
+  return align_up(result, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::promo_increment_aligned_down(size_t cur_promo) {
   size_t result =  promo_increment(cur_promo, TenuredGenerationSizeIncrement);
-  return align_size_down(result, _space_alignment);
+  return align_down(result, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up(
   size_t cur_promo) {
   size_t result =  promo_increment(cur_promo,
     TenuredGenerationSizeIncrement + _old_gen_size_increment_supplement);
-  return align_size_up(result, _space_alignment);
+  return align_up(result, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) {
   size_t promo_heap_delta = promo_decrement(cur_promo);
-  return align_size_down(promo_heap_delta, _space_alignment);
+  return align_down(promo_heap_delta, _space_alignment);
 }
 
 size_t PSAdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
@@ -996,7 +996,7 @@
                                              size_t survivor_limit) {
   assert(survivor_limit >= _space_alignment,
          "survivor_limit too small");
-  assert(is_size_aligned(survivor_limit, _space_alignment),
+  assert(is_aligned(survivor_limit, _space_alignment),
          "survivor_limit not aligned");
 
   // This method is called even if the tenuring threshold and survivor
@@ -1059,7 +1059,7 @@
   // we use this to see how good of an estimate we have of what survived.
   // We're trying to pad the survivor size as little as possible without
   // overflowing the survivor spaces.
-  size_t target_size = align_size_up((size_t)_avg_survived->padded_average(),
+  size_t target_size = align_up((size_t)_avg_survived->padded_average(),
                                      _space_alignment);
   target_size = MAX2(target_size, _space_alignment);
 
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -374,7 +374,7 @@
     // large filler object at the bottom).
     const size_t sz = gen_size / MinSurvivorRatio;
     const size_t alignment = _space_alignment;
-    return sz > alignment ? align_size_down(sz, alignment) : alignment;
+    return sz > alignment ? align_down(sz, alignment) : alignment;
   }
 
   size_t live_at_last_full_gc() {
--- a/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -410,7 +410,7 @@
   const size_t alignment = old_gen->virtual_space()->alignment();
   const size_t eden_used = eden_space->used_in_bytes();
   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
-  const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
+  const size_t absorb_size = align_up(eden_used + promoted, alignment);
   const size_t eden_capacity = eden_space->capacity_in_bytes();
 
   if (absorb_size >= eden_capacity) {
--- a/hotspot/src/share/vm/gc/parallel/psOldGen.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psOldGen.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -229,8 +229,8 @@
   }
   MutexLocker x(ExpandHeap_lock);
   const size_t alignment = virtual_space()->alignment();
-  size_t aligned_bytes  = align_size_up(bytes, alignment);
-  size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
+  size_t aligned_bytes  = align_up(bytes, alignment);
+  size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
 
   if (UseNUMA) {
     // With NUMA we use round-robin page allocation for the old gen. Expand by at least
@@ -244,7 +244,7 @@
     // but not a guarantee.  Align down to give a best effort.  This is likely
     // the most that the generation can expand since it has some capacity to
     // start with.
-    aligned_bytes = align_size_down(bytes, alignment);
+    aligned_bytes = align_down(bytes, alignment);
   }
 
   bool success = false;
@@ -318,7 +318,7 @@
   assert_lock_strong(ExpandHeap_lock);
   assert_locked_or_safepoint(Heap_lock);
 
-  size_t size = align_size_down(bytes, virtual_space()->alignment());
+  size_t size = align_down(bytes, virtual_space()->alignment());
   if (size > 0) {
     assert_lock_strong(ExpandHeap_lock);
     virtual_space()->shrink_by(bytes);
@@ -343,7 +343,7 @@
   new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
 
   assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
-  new_size = align_size_up(new_size, alignment);
+  new_size = align_up(new_size, alignment);
 
   const size_t current_size = capacity_in_bytes();
 
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -432,7 +432,7 @@
   const size_t raw_bytes = count * element_size;
   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
-  _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
+  _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 
   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
     MAX2(page_sz, granularity);
@@ -1984,7 +1984,7 @@
   const size_t alignment = old_gen->virtual_space()->alignment();
   const size_t eden_used = eden_space->used_in_bytes();
   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
-  const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
+  const size_t absorb_size = align_up(eden_used + promoted, alignment);
   const size_t eden_capacity = eden_space->capacity_in_bytes();
 
   if (absorb_size >= eden_capacity) {
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.inline.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -88,7 +88,7 @@
 inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) {
   assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
          "must move left or to a different space");
-  assert(is_ptr_object_aligned(old_addr) && is_ptr_object_aligned(new_addr),
+  assert(is_object_aligned(old_addr) && is_object_aligned(new_addr),
          "checking alignment");
 }
 #endif // ASSERT
--- a/hotspot/src/share/vm/gc/parallel/psPromotionLAB.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psPromotionLAB.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -122,7 +122,7 @@
     // The 'new_top>obj' check is needed to detect overflow of obj+size.
     if (new_top > obj && new_top <= end()) {
       set_top(new_top);
-      assert(is_ptr_object_aligned(obj) && is_ptr_object_aligned(new_top),
+      assert(is_object_aligned(obj) && is_object_aligned(new_top),
              "checking alignment");
       _start_array->allocate_block(obj);
       return obj;
--- a/hotspot/src/share/vm/gc/parallel/psPromotionLAB.inline.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psPromotionLAB.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -40,7 +40,7 @@
   // The 'new_top>obj' check is needed to detect overflow of obj+size.
   if (new_top > obj && new_top <= end()) {
     set_top(new_top);
-    assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_ptr_object_aligned(new_top),
+    assert(is_aligned(obj, SurvivorAlignmentInBytes) && is_object_aligned(new_top),
            "checking alignment");
     return obj;
   } else {
--- a/hotspot/src/share/vm/gc/parallel/psYoungGen.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psYoungGen.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -116,7 +116,7 @@
 
     // round the survivor space size down to the nearest alignment
     // and make sure its size is greater than 0.
-    max_survivor_size = align_size_down(max_survivor_size, alignment);
+    max_survivor_size = align_down(max_survivor_size, alignment);
     max_survivor_size = MAX2(max_survivor_size, alignment);
 
     // set the maximum size of eden to be the size of the young gen
@@ -128,7 +128,7 @@
 
     // round the survivor space size down to the nearest alignment
     // and make sure its size is greater than 0.
-    max_survivor_size = align_size_down(max_survivor_size, alignment);
+    max_survivor_size = align_down(max_survivor_size, alignment);
     max_survivor_size = MAX2(max_survivor_size, alignment);
 
     // set the maximum size of eden to be the size of the young gen
@@ -162,7 +162,7 @@
   assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
 
   size_t survivor_size = size / InitialSurvivorRatio;
-  survivor_size = align_size_down(survivor_size, alignment);
+  survivor_size = align_down(survivor_size, alignment);
   // ... but never less than an alignment
   survivor_size = MAX2(survivor_size, alignment);
 
@@ -193,9 +193,9 @@
   char *from_end   = from_start + survivor_size;
 
   assert(from_end == virtual_space()->high(), "just checking");
-  assert(is_ptr_object_aligned(eden_start), "checking alignment");
-  assert(is_ptr_object_aligned(to_start),   "checking alignment");
-  assert(is_ptr_object_aligned(from_start), "checking alignment");
+  assert(is_object_aligned(eden_start), "checking alignment");
+  assert(is_object_aligned(to_start),   "checking alignment");
+  assert(is_object_aligned(from_start), "checking alignment");
 
   MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
   MemRegion to_mr  ((HeapWord*)to_start, (HeapWord*)from_start);
@@ -294,7 +294,7 @@
 
   // Adjust new generation size
   const size_t eden_plus_survivors =
-          align_size_up(eden_size + 2 * survivor_size, alignment);
+          align_up(eden_size + 2 * survivor_size, alignment);
   size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
                              min_gen_size());
   assert(desired_size <= max_size(), "just checking");
@@ -528,7 +528,7 @@
       if (from_size == 0) {
         from_size = alignment;
       } else {
-        from_size = align_size_up(from_size, alignment);
+        from_size = align_up(from_size, alignment);
       }
 
       from_end = from_start + from_size;
@@ -611,9 +611,9 @@
             "from start moved to the right");
   guarantee((HeapWord*)from_end >= from_space()->top(),
             "from end moved into live data");
-  assert(is_ptr_object_aligned(eden_start), "checking alignment");
-  assert(is_ptr_object_aligned(from_start), "checking alignment");
-  assert(is_ptr_object_aligned(to_start), "checking alignment");
+  assert(is_object_aligned(eden_start), "checking alignment");
+  assert(is_object_aligned(from_start), "checking alignment");
+  assert(is_object_aligned(to_start), "checking alignment");
 
   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
@@ -815,7 +815,7 @@
   }
 
   size_t delta_in_bytes = unused_committed + delta_in_survivor;
-  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
+  delta_in_bytes = align_down(delta_in_bytes, gen_alignment);
   return delta_in_bytes;
 }
 
@@ -828,7 +828,7 @@
   // Allow shrinkage into the current eden but keep eden large enough
   // to maintain the minimum young gen size
   bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
-  return align_size_down(bytes, virtual_space()->alignment());
+  return align_down(bytes, virtual_space()->alignment());
 }
 
 void PSYoungGen::reset_after_change() {
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -257,10 +257,10 @@
 
   if (eden_size < minimum_eden_size) {
     // May happen due to 64Kb rounding, if so adjust eden size back up
-    minimum_eden_size = align_size_up(minimum_eden_size, alignment);
+    minimum_eden_size = align_up(minimum_eden_size, alignment);
     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
     uintx unaligned_survivor_size =
-      align_size_down(maximum_survivor_size, alignment);
+      align_down(maximum_survivor_size, alignment);
     survivor_size = MAX2(unaligned_survivor_size, alignment);
     eden_size = size - (2*survivor_size);
     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
@@ -386,10 +386,10 @@
       if (new_size_candidate <= max_uintx - thread_increase_size) {
         new_size_candidate += thread_increase_size;
 
-        // 3. Check an overflow at 'align_size_up'.
+        // 3. Check an overflow at 'align_up'.
         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
         if (new_size_candidate <= aligned_max) {
-          desired_new_size = align_size_up(new_size_candidate, alignment);
+          desired_new_size = align_up(new_size_candidate, alignment);
         }
       }
     }
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -143,7 +143,7 @@
   // gen_size.
   size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
     size_t n = gen_size / (SurvivorRatio + 2);
-    return n > alignment ? align_size_down(n, alignment) : alignment;
+    return n > alignment ? align_down(n, alignment) : alignment;
   }
 
  public:  // was "protected" but caused compile error on win32
--- a/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -73,8 +73,8 @@
   // interface, so it is "exactly precise" (if i may be allowed the adverbial
   // redundancy for emphasis) and does not include narrow oop slots not
   // included in the original write interval.
-  HeapWord* aligned_start = align_ptr_down(start, HeapWordSize);
-  HeapWord* aligned_end   = align_ptr_up  (end,   HeapWordSize);
+  HeapWord* aligned_start = align_down(start, HeapWordSize);
+  HeapWord* aligned_end   = align_up  (end,   HeapWordSize);
   // If compressed oops were not being used, these should already be aligned
   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
          "Expected heap word alignment of start and end");
--- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -42,7 +42,7 @@
                                         "uninitialized, check declaration order");
   assert(_page_size != 0, "uninitialized, check declaration order");
   const size_t granularity = os::vm_allocation_granularity();
-  return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
+  return align_up(_guard_index + 1, MAX2(_page_size, granularity));
 }
 
 CardTableModRefBS::CardTableModRefBS(
@@ -110,7 +110,7 @@
   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 
   jbyte* guard_card = &_byte_map[_guard_index];
-  uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
+  uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
                             !ExecMem, "card table last card");
@@ -152,7 +152,7 @@
   _covered[res].set_start(base);
   _covered[res].set_word_size(0);
   jbyte* ct_start = byte_for(base);
-  uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
+  uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
   _committed[res].set_start((HeapWord*)ct_start_aligned);
   _committed[res].set_word_size(0);
   return res;
@@ -212,7 +212,7 @@
     }
     // Align the end up to a page size (starts are already aligned).
     jbyte* const new_end = byte_after(new_region.last());
-    HeapWord* new_end_aligned = (HeapWord*) align_ptr_up(new_end, _page_size);
+    HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
     assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
     // Check the other regions (excludes "ind") to ensure that
     // the new_end_aligned does not intrude onto the committed
@@ -368,8 +368,8 @@
 
 
 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
-  assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
-  assert(align_ptr_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
+  assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   jbyte* cur  = byte_for(mr.start());
   jbyte* last = byte_after(mr.last());
   while (cur < last) {
@@ -379,8 +379,8 @@
 }
 
 void CardTableModRefBS::invalidate(MemRegion mr) {
-  assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
-  assert(align_ptr_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
+  assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   for (int i = 0; i < _cur_covered_regions; i++) {
     MemRegion mri = mr.intersection(_covered[i]);
     if (!mri.is_empty()) dirty_MemRegion(mri);
--- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -168,7 +168,7 @@
   // in, um, words.
   inline size_t cards_required(size_t covered_words) {
     // Add one for a guard card, used to detect errors.
-    const size_t words = align_size_up(covered_words, card_size_in_words);
+    const size_t words = align_up(covered_words, card_size_in_words);
     return words / card_size_in_words + 1;
   }
 
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -374,7 +374,7 @@
   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
               sizeof(jint) *
               ((juint) max_jint / (size_t) HeapWordSize);
-  return align_size_down(max_int_size, MinObjAlignment);
+  return align_down(max_int_size, MinObjAlignment);
 }
 
 // Helper for ReduceInitialCardMarks. For performance,
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -268,12 +268,12 @@
     return addr;
   }
 
-  assert(is_ptr_aligned(addr, HeapWordSize),
+  assert(is_aligned(addr, HeapWordSize),
          "Address " PTR_FORMAT " is not properly aligned.", p2i(addr));
-  assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
+  assert(is_aligned(alignment_in_bytes, HeapWordSize),
          "Alignment size %u is incorrect.", alignment_in_bytes);
 
-  HeapWord* new_addr = align_ptr_up(addr, alignment_in_bytes);
+  HeapWord* new_addr = align_up(addr, alignment_in_bytes);
   size_t padding = pointer_delta(new_addr, addr);
 
   if (padding == 0) {
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -105,9 +105,9 @@
   }
 
   // User inputs from -Xmx and -Xms must be aligned
-  _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment);
-  size_t aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment);
-  size_t aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment);
+  _min_heap_byte_size = align_up(_min_heap_byte_size, _heap_alignment);
+  size_t aligned_initial_heap_size = align_up(InitialHeapSize, _heap_alignment);
+  size_t aligned_max_heap_size = align_up(MaxHeapSize, _heap_alignment);
 
   // Write back to flags if the values changed
   if (aligned_initial_heap_size != InitialHeapSize) {
@@ -133,7 +133,7 @@
   _initial_heap_byte_size = InitialHeapSize;
   _max_heap_byte_size = MaxHeapSize;
 
-  FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));
+  FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_up(MinHeapDeltaBytes, _space_alignment));
 
   DEBUG_ONLY(CollectorPolicy::assert_flags();)
 }
@@ -198,7 +198,7 @@
 {}
 
 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
-  return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
+  return align_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
 }
 
 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
@@ -221,11 +221,11 @@
 
 size_t GenCollectorPolicy::young_gen_size_lower_bound() {
   // The young generation must be aligned and have room for eden + two survivors
-  return align_size_up(3 * _space_alignment, _gen_alignment);
+  return align_up(3 * _space_alignment, _gen_alignment);
 }
 
 size_t GenCollectorPolicy::old_gen_size_lower_bound() {
-  return align_size_up(_space_alignment, _gen_alignment);
+  return align_up(_space_alignment, _gen_alignment);
 }
 
 #ifdef ASSERT
@@ -287,7 +287,7 @@
 
   // Make sure the heap is large enough for two generations
   size_t smallest_new_size = young_gen_size_lower_bound();
-  size_t smallest_heap_size = align_size_up(smallest_new_size + old_gen_size_lower_bound(),
+  size_t smallest_heap_size = align_up(smallest_new_size + old_gen_size_lower_bound(),
                                            _heap_alignment);
   if (MaxHeapSize < smallest_heap_size) {
     FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
@@ -311,7 +311,7 @@
   // Now take the actual NewSize into account. We will silently increase NewSize
   // if the user specified a smaller or unaligned value.
   size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
-  bounded_new_size = MAX2(smallest_new_size, align_size_down(bounded_new_size, _gen_alignment));
+  bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, _gen_alignment));
   if (bounded_new_size != NewSize) {
     FLAG_SET_ERGO(size_t, NewSize, bounded_new_size);
   }
@@ -334,8 +334,8 @@
       }
     } else if (MaxNewSize < _initial_young_size) {
       FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size);
-    } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
-      FLAG_SET_ERGO(size_t, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
+    } else if (!is_aligned(MaxNewSize, _gen_alignment)) {
+      FLAG_SET_ERGO(size_t, MaxNewSize, align_down(MaxNewSize, _gen_alignment));
     }
     _max_young_size = MaxNewSize;
   }
@@ -359,8 +359,8 @@
   if (OldSize < old_gen_size_lower_bound()) {
     FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound());
   }
-  if (!is_size_aligned(OldSize, _gen_alignment)) {
-    FLAG_SET_ERGO(size_t, OldSize, align_size_down(OldSize, _gen_alignment));
+  if (!is_aligned(OldSize, _gen_alignment)) {
+    FLAG_SET_ERGO(size_t, OldSize, align_down(OldSize, _gen_alignment));
   }
 
   if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
@@ -370,7 +370,7 @@
     assert(NewRatio > 0, "NewRatio should have been set up earlier");
     size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
 
-    calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
+    calculated_heapsize = align_up(calculated_heapsize, _heap_alignment);
     FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
     _max_heap_byte_size = MaxHeapSize;
     FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
@@ -384,7 +384,7 @@
       // exceed it. Adjust New/OldSize as necessary.
       size_t calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
-      size_t smaller_new_size = align_size_down((size_t)(NewSize * shrink_factor), _gen_alignment);
+      size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), _gen_alignment);
       FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
       _initial_young_size = NewSize;
 
@@ -394,7 +394,7 @@
       // is a multiple of _gen_alignment.
       FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
     } else {
-      FLAG_SET_ERGO(size_t, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
+      FLAG_SET_ERGO(size_t, MaxHeapSize, align_up(NewSize + OldSize, _heap_alignment));
       _max_heap_byte_size = MaxHeapSize;
     }
   }
--- a/hotspot/src/share/vm/gc/shared/generationSpec.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/generationSpec.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -41,8 +41,8 @@
 public:
   GenerationSpec(Generation::Name name, size_t init_size, size_t max_size, size_t alignment) :
     _name(name),
-    _init_size(align_size_up(init_size, alignment)),
-    _max_size(align_size_up(max_size, alignment))
+    _init_size(align_up(init_size, alignment)),
+    _max_size(align_up(max_size, alignment))
   { }
 
   Generation* init(ReservedSpace rs, CardTableRS* remset);
--- a/hotspot/src/share/vm/gc/shared/space.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/space.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -656,7 +656,7 @@
   if (pointer_delta(end_value, obj) >= size) {
     HeapWord* new_top = obj + size;
     set_top(new_top);
-    assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
+    assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
       "checking alignment");
     return obj;
   } else {
--- a/hotspot/src/share/vm/gc/shared/space.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/space.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -154,7 +154,7 @@
 
   // Test whether p is double-aligned
   static bool is_aligned(void* p) {
-    return is_ptr_aligned(p, sizeof(double));
+    return ::is_aligned(p, sizeof(double));
   }
 
   // Size computations.  Sizes are in bytes.
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -149,7 +149,7 @@
 size_t MmapArrayAllocator<E, F>::size_for(size_t length) {
   size_t size = length * sizeof(E);
   int alignment = os::vm_allocation_granularity();
-  return align_size_up(size, alignment);
+  return align_up(size, alignment);
 }
 
 template <class E, MEMFLAGS F>
--- a/hotspot/src/share/vm/memory/filemap.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -563,7 +563,7 @@
 // Align file position to an allocation unit boundary.
 
 void FileMapInfo::align_file_position() {
-  size_t new_file_offset = align_size_up(_file_offset,
+  size_t new_file_offset = align_up(_file_offset,
                                          os::vm_allocation_granularity());
   if (new_file_offset != _file_offset) {
     _file_offset = new_file_offset;
@@ -613,7 +613,7 @@
     return true;
   }
   size_t used = si->_used;
-  size_t size = align_size_up(used, os::vm_allocation_granularity());
+  size_t size = align_up(used, os::vm_allocation_granularity());
   if (!open_for_read()) {
     return false;
   }
@@ -664,7 +664,7 @@
   struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   size_t used = si->_used;
   size_t alignment = os::vm_allocation_granularity();
-  size_t size = align_size_up(used, alignment);
+  size_t size = align_up(used, alignment);
   char *requested_addr = _header->region_addr(i);
 
   // If a tool agent is in use (debugging enabled), we must map the address space RW
@@ -831,7 +831,7 @@
   assert(!MetaspaceShared::is_string_region(i), "sanity");
   struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   size_t used = si->_used;
-  size_t size = align_size_up(used, os::vm_allocation_granularity());
+  size_t size = align_up(used, os::vm_allocation_granularity());
 
   if (used == 0) {
     return;
--- a/hotspot/src/share/vm/memory/filemap.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -254,9 +254,9 @@
 
   // The ro+rw+md+mc spaces size
   static size_t core_spaces_size() {
-    return align_size_up((SharedReadOnlySize + SharedReadWriteSize +
-                          SharedMiscDataSize + SharedMiscCodeSize),
-                          os::vm_allocation_granularity());
+    return align_up((SharedReadOnlySize + SharedReadWriteSize +
+                     SharedMiscDataSize + SharedMiscCodeSize),
+                     os::vm_allocation_granularity());
   }
 
   // The estimated optional space size.
--- a/hotspot/src/share/vm/memory/heap.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/heap.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -112,7 +112,7 @@
   }
 
   const size_t granularity = os::vm_allocation_granularity();
-  const size_t c_size = align_size_up(committed_size, page_size);
+  const size_t c_size = align_up(committed_size, page_size);
 
   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
                        rs.base(), rs.size());
@@ -125,7 +125,7 @@
   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
-  const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
+  const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment);
   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 
   // reserve space for _segmap
--- a/hotspot/src/share/vm/memory/metachunk.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/metachunk.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -42,7 +42,7 @@
 }
 
 size_t Metachunk::overhead() {
-  return align_size_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
+  return align_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
 }
 
 // Metachunk methods
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -462,16 +462,10 @@
   void print_on(outputStream* st) const;
 };
 
-#define assert_is_ptr_aligned(ptr, alignment) \
-  assert(is_ptr_aligned(ptr, alignment),      \
-         PTR_FORMAT " is not aligned to "     \
-         SIZE_FORMAT, p2i(ptr), alignment)
-
-#define assert_is_size_aligned(size, alignment) \
-  assert(is_size_aligned(size, alignment),      \
-         SIZE_FORMAT " is not aligned to "      \
-         SIZE_FORMAT, size, alignment)
-
+#define assert_is_aligned(value, alignment)                  \
+  assert(is_aligned((value), (alignment)),                   \
+         SIZE_FORMAT_HEX " is not aligned to "               \
+         SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
 
 // Decide if large pages should be committed when the memory is reserved.
 static bool should_commit_large_pages_when_reserving(size_t bytes) {
@@ -489,7 +483,7 @@
 
   // byte_size is the size of the associated virtualspace.
 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
-  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
+  assert_is_aligned(bytes, Metaspace::reserve_alignment());
 
 #if INCLUDE_CDS
   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
@@ -497,7 +491,7 @@
   // memory addresses don't conflict.
   if (DumpSharedSpaces) {
     bool large_pages = false; // No large pages when dumping the CDS archive.
-    char* shared_base = align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
+    char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 
     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
     if (_rs.is_reserved()) {
@@ -522,8 +516,8 @@
   if (_rs.is_reserved()) {
     assert(_rs.base() != NULL, "Catch if we get a NULL address");
     assert(_rs.size() != 0, "Catch if we get a 0 size");
-    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
-    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
+    assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
+    assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
 
     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   }
@@ -863,7 +857,7 @@
     size_t byte_size = word_size * BytesPerWord;
 
     size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
-    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
+    raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
 
     size_t raw_word_size = raw_bytes_size / BytesPerWord;
     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
@@ -1068,8 +1062,8 @@
   // These are necessary restriction to make sure that the virtual space always
   // grows in steps of Metaspace::commit_alignment(). If both base and size are
   // aligned only the middle alignment of the VirtualSpace is used.
-  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
-  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
+  assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
+  assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
 
   // ReservedSpaces marked as special will have the entire memory
   // pre-committed. Setting a committed size will make sure that
@@ -1323,7 +1317,7 @@
 
   // Reserve the space
   size_t vs_byte_size = vs_word_size * BytesPerWord;
-  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
+  assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
 
   // Allocate the meta virtual space and initialize it.
   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@@ -1378,8 +1372,8 @@
 }
 
 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
-  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
-  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
+  assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
+  assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
   assert(min_words <= preferred_words, "Invalid arguments");
 
   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
@@ -1404,7 +1398,7 @@
 
   // Get another virtual space.
   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
-  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
+  grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
 
   if (create_new_virtual_space(grow_vs_words)) {
     if (current_virtual_space()->is_pre_committed()) {
@@ -1435,8 +1429,8 @@
   // The expand amount is currently only determined by the requested sizes
   // and not how much committed memory is left in the current virtual space.
 
-  size_t min_word_size       = align_size_up(chunk_word_size,              Metaspace::commit_alignment_words());
-  size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
+  size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
+  size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
   if (min_word_size >= preferred_word_size) {
     // Can happen when humongous chunks are allocated.
     preferred_word_size = min_word_size;
@@ -1488,7 +1482,7 @@
 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
   size_t min_delta = MinMetaspaceExpansion;
   size_t max_delta = MaxMetaspaceExpansion;
-  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
+  size_t delta = align_up(bytes, Metaspace::commit_alignment());
 
   if (delta <= min_delta) {
     delta = min_delta;
@@ -1503,7 +1497,7 @@
     delta = delta + min_delta;
   }
 
-  assert_is_size_aligned(delta, Metaspace::commit_alignment());
+  assert_is_aligned(delta, Metaspace::commit_alignment());
 
   return delta;
 }
@@ -1515,14 +1509,14 @@
 }
 
 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
-  assert_is_size_aligned(v, Metaspace::commit_alignment());
+  assert_is_aligned(v, Metaspace::commit_alignment());
 
   size_t capacity_until_GC = (size_t) _capacity_until_GC;
   size_t new_value = capacity_until_GC + v;
 
   if (new_value < capacity_until_GC) {
     // The addition wrapped around, set new_value to aligned max value.
-    new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
+    new_value = align_down(max_uintx, Metaspace::commit_alignment());
   }
 
   intptr_t expected = (intptr_t) capacity_until_GC;
@@ -1542,7 +1536,7 @@
 }
 
 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
-  assert_is_size_aligned(v, Metaspace::commit_alignment());
+  assert_is_aligned(v, Metaspace::commit_alignment());
 
   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
 }
@@ -1628,7 +1622,7 @@
     // If we have less capacity below the metaspace HWM, then
     // increment the HWM.
     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
-    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
+    expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
     // Don't expand unless it's significant
     if (expand_bytes >= MinMetaspaceExpansion) {
       size_t new_capacity_until_GC = 0;
@@ -1681,7 +1675,7 @@
       // size without shrinking, it goes back to 0%.
       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 
-      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
+      shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 
       assert(shrink_bytes <= max_shrink_bytes,
              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
@@ -2240,7 +2234,7 @@
   // humongous allocations sizes to be aligned up to
   // the smallest chunk size.
   size_t if_humongous_sized_chunk =
-    align_size_up(word_size + Metachunk::overhead(),
+    align_up(word_size + Metachunk::overhead(),
                   smallest_chunk_size());
   chunk_word_size =
     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
@@ -3099,9 +3093,9 @@
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
          "Metaspace size is too big");
-  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
-  assert_is_ptr_aligned(cds_base, _reserve_alignment);
-  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
+  assert_is_aligned(requested_addr, _reserve_alignment);
+  assert_is_aligned(cds_base, _reserve_alignment);
+  assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
 
   // Don't use large pages for the class space.
   bool large_pages = false;
@@ -3130,7 +3124,7 @@
     // Aix: Search for a place where we can find memory. If we need to load
     // the base, 4G alignment is helpful, too.
     size_t increment = AARCH64_ONLY(4*)G;
-    for (char *a = align_ptr_up(requested_addr, increment);
+    for (char *a = align_up(requested_addr, increment);
          a < (char*)(1024*G);
          a += increment) {
       if (a == (char *)(32*G)) {
@@ -3165,7 +3159,7 @@
   if (!metaspace_rs.is_reserved()) {
 #if INCLUDE_CDS
     if (UseSharedSpaces) {
-      size_t increment = align_size_up(1*G, _reserve_alignment);
+      size_t increment = align_up(1*G, _reserve_alignment);
 
       // Keep trying to allocate the metaspace, increasing the requested_addr
       // by 1GB each time, until we reach an address that will no longer allow
@@ -3269,20 +3263,20 @@
   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
   // globals.hpp to the aligned value, but this is not possible, since the
   // alignment depends on other flags being parsed.
-  MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
+  MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
 
   if (MetaspaceSize > MaxMetaspaceSize) {
     MetaspaceSize = MaxMetaspaceSize;
   }
 
-  MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
+  MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
 
   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
 
-  MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
-  MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
-
-  CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
+  MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
+  MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
+
+  CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
   set_compressed_class_space_size(CompressedClassSpaceSize);
 }
 
@@ -3299,16 +3293,16 @@
 #if INCLUDE_CDS
     MetaspaceShared::estimate_regions_size();
 
-    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
-    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
-    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
-    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
+    SharedReadOnlySize  = align_up(SharedReadOnlySize,  max_alignment);
+    SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
+    SharedMiscDataSize  = align_up(SharedMiscDataSize,  max_alignment);
+    SharedMiscCodeSize  = align_up(SharedMiscCodeSize,  max_alignment);
 
     // Initialize with the sum of the shared space sizes.  The read-only
     // and read write metaspace chunks will be allocated out of this and the
     // remainder is the misc code and data chunks.
     cds_total = FileMapInfo::shared_spaces_size();
-    cds_total = align_size_up(cds_total, _reserve_alignment);
+    cds_total = align_up(cds_total, _reserve_alignment);
     _space_list = new VirtualSpaceList(cds_total/wordSize);
     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
 
@@ -3355,7 +3349,7 @@
 #ifdef _LP64
         if (using_class_space()) {
           char* cds_end = (char*)(cds_address + cds_total);
-          cds_end = align_ptr_up(cds_end, _reserve_alignment);
+          cds_end = align_up(cds_end, _reserve_alignment);
           // If UseCompressedClassPointers is set then allocate the metaspace area
           // above the heap and above the CDS area (if it exists).
           allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
@@ -3373,7 +3367,7 @@
 
 #ifdef _LP64
     if (!UseSharedSpaces && using_class_space()) {
-      char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
+      char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
       allocate_metaspace_compressed_klass_ptrs(base, 0);
     }
 #endif // _LP64
@@ -3390,7 +3384,7 @@
     // Arbitrarily set the initial virtual space to a multiple
     // of the boot class loader size.
     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
-    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
+    word_size = align_up(word_size, Metaspace::reserve_alignment_words());
 
     // Initialize the list of virtual spaces.
     _space_list = new VirtualSpaceList(word_size);
@@ -4147,7 +4141,7 @@
       return sizes[rand];
     } else {
       // Note: this affects the max. size of space (see _vsn initialization in ctor).
-      return align_size_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
+      return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
     }
   }
 
@@ -4294,7 +4288,7 @@
 public:
 
   ChunkManagerReturnTestImpl()
-    : _vsn(align_size_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
+    : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
     , _cm(SpecializedChunk, SmallChunk, MediumChunk)
     , _chunks_in_chunkmanager(0)
     , _words_in_chunkmanager(0)
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -83,8 +83,8 @@
 char* SharedMiscRegion::alloc(size_t num_bytes) {
   assert(DumpSharedSpaces, "dump time only");
   size_t alignment = sizeof(char*);
-  num_bytes = align_size_up(num_bytes, alignment);
-  _alloc_top = align_ptr_up(_alloc_top, alignment);
+  num_bytes = align_up(num_bytes, alignment);
+  _alloc_top = align_up(_alloc_top, alignment);
   if (_alloc_top + num_bytes > _vs.high()) {
     report_out_of_shared_space(_space_type);
   }
--- a/hotspot/src/share/vm/memory/padded.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/padded.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -33,7 +33,7 @@
 // when the start address is not a multiple of alignment; the second maintains
 // alignment of starting addresses that happen to be a multiple.
 #define PADDING_SIZE(type, alignment)                           \
-  ((alignment) + align_size_up_(sizeof(type), alignment))
+  ((alignment) + align_up_(sizeof(type), (alignment)))
 
 // Templates to create a subclass padded to avoid cache line sharing.  These are
 // effective only when applied to derived-most (leaf) classes.
@@ -68,7 +68,7 @@
   // No padding.
 };
 
-#define PADDED_END_SIZE(type, alignment) (align_size_up_(sizeof(type), alignment) - sizeof(type))
+#define PADDED_END_SIZE(type, alignment) (align_up_(sizeof(type), (alignment)) - sizeof(type))
 
 // More memory conservative implementation of Padded. The subclass adds the
 // minimal amount of padding needed to make the size of the objects be aligned.
--- a/hotspot/src/share/vm/memory/padded.inline.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/padded.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -35,13 +35,13 @@
 template <class T, MEMFLAGS flags, size_t alignment>
 PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
   // Check that the PaddedEnd class works as intended.
-  STATIC_ASSERT(is_size_aligned_(sizeof(PaddedEnd<T>), alignment));
+  STATIC_ASSERT(is_aligned_(sizeof(PaddedEnd<T>), alignment));
 
   // Allocate a chunk of memory large enough to allow for some alignment.
   void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
 
   // Make the initial alignment.
-  PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_ptr_up(chunk, alignment);
+  PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_up(chunk, alignment);
 
   // Call the default constructor for each element.
   for (uint i = 0; i < length; i++) {
@@ -54,9 +54,9 @@
 template <class T, MEMFLAGS flags, size_t alignment>
 T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) {
   // Calculate and align the size of the first dimension's table.
-  size_t table_size = align_size_up_(rows * sizeof(T*), alignment);
+  size_t table_size = align_up_(rows * sizeof(T*), alignment);
   // The size of the separate rows.
-  size_t row_size = align_size_up_(columns * sizeof(T), alignment);
+  size_t row_size = align_up_(columns * sizeof(T), alignment);
   // Total size consists of the indirection table plus the rows.
   size_t total_size = table_size + rows * row_size + alignment;
 
@@ -65,7 +65,7 @@
   // Clear the allocated memory.
   memset(chunk, 0, total_size);
   // Align the chunk of memory.
-  T** result = (T**)align_ptr_up(chunk, alignment);
+  T** result = (T**)align_up(chunk, alignment);
   void* data_start = (void*)((uintptr_t)result + table_size);
 
   // Fill in the row table.
@@ -87,7 +87,7 @@
 
   memset(chunk, 0, length * sizeof(T) + alignment);
 
-  return (T*)align_ptr_up(chunk, alignment);
+  return (T*)align_up(chunk, alignment);
 }
 
 #endif // SHARE_VM_MEMORY_PADDED_INLINE_HPP
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -552,7 +552,7 @@
 
 
 bool Universe::on_page_boundary(void* addr) {
-  return is_ptr_aligned(addr, os::vm_page_size());
+  return is_aligned(addr, os::vm_page_size());
 }
 
 
@@ -818,11 +818,11 @@
          "actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
          alignment, Arguments::conservative_max_heap_alignment());
 
-  size_t total_reserved = align_size_up(heap_size, alignment);
+  size_t total_reserved = align_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
 
-  bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
+  bool use_large_pages = UseLargePages && is_aligned(alignment, os::large_page_size());
   assert(!UseLargePages
       || UseParallelGC
       || use_large_pages, "Wrong alignment to use large pages");
--- a/hotspot/src/share/vm/memory/virtualspace.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/memory/virtualspace.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -47,7 +47,7 @@
     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
     // ReservedSpace initialization requires size to be aligned to the given
     // alignment. Align the size up.
-    size = align_size_up(size, alignment);
+    size = align_up(size, alignment);
   } else {
     // Don't force the alignment to be large page aligned,
     // since that will waste memory.
@@ -172,7 +172,7 @@
       // Base not aligned, retry
       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
       // Make sure that size is aligned
-      size = align_size_up(size, alignment);
+      size = align_up(size, alignment);
       base = os::reserve_memory_aligned(size, alignment);
 
       if (requested_address != 0 &&
@@ -227,22 +227,22 @@
 
 
 size_t ReservedSpace::page_align_size_up(size_t size) {
-  return align_size_up(size, os::vm_page_size());
+  return align_up(size, os::vm_page_size());
 }
 
 
 size_t ReservedSpace::page_align_size_down(size_t size) {
-  return align_size_down(size, os::vm_page_size());
+  return align_down(size, os::vm_page_size());
 }
 
 
 size_t ReservedSpace::allocation_align_size_up(size_t size) {
-  return align_size_up(size, os::vm_allocation_granularity());
+  return align_up(size, os::vm_allocation_granularity());
 }
 
 
 size_t ReservedSpace::allocation_align_size_down(size_t size) {
-  return align_size_down(size, os::vm_allocation_granularity());
+  return align_down(size, os::vm_allocation_granularity());
 }
 
 
@@ -383,7 +383,7 @@
   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 
   const size_t stepsize = (attach_range == 0) ? // Only one try.
-    (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
+    (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 
   // Try attach points from top to bottom.
   char* attach_point = highest_start;
@@ -463,7 +463,7 @@
     NOT_AIX(os::vm_allocation_granularity());
   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 
-  char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
+  char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
     noaccess_prefix_size(alignment) : 0;
 
@@ -492,8 +492,8 @@
     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 
       // Calc address range within we try to attach (range of possible start addresses).
-      char* const highest_start = align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
-      char* const lowest_start  = align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
+      char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
+      char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
     }
@@ -502,7 +502,7 @@
     // But leave room for the compressed class pointers, which is allocated above
     // the heap.
     char *zerobased_max = (char *)OopEncodingHeapMax;
-    const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
+    const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
     // For small heaps, save some space for compressed class pointer
     // space so it can be decoded with no base.
     if (UseCompressedClassPointers && !UseSharedSpaces &&
@@ -517,7 +517,7 @@
          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 
       // Calc address range within we try to attach (range of possible start addresses).
-      char *const highest_start = align_ptr_down(zerobased_max - size, attach_point_alignment);
+      char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
       // Need to be careful about size being guaranteed to be less
       // than UnscaledOopHeapMax due to type constraints.
       char *lowest_start = aligned_heap_base_min_address;
@@ -525,7 +525,7 @@
       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
       }
-      lowest_start = align_ptr_up(lowest_start, attach_point_alignment);
+      lowest_start = align_up(lowest_start, attach_point_alignment);
       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
     }
@@ -562,7 +562,7 @@
   }
 
   // Heap size should be aligned to alignment, too.
-  guarantee(is_size_aligned(size, alignment), "set by caller");
+  guarantee(is_aligned(size, alignment), "set by caller");
 
   if (UseCompressedOops) {
     initialize_compressed_heap(size, alignment, large);
@@ -751,8 +751,8 @@
 }
 
 static void pretouch_expanded_memory(void* start, void* end) {
-  assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
-  assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
+  assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
+  assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 
   os::pretouch_memory(start, end);
 }
@@ -1037,7 +1037,7 @@
   static void test_reserved_space1(size_t size, size_t alignment) {
     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 
-    assert(is_size_aligned(size, alignment), "Incorrect input parameters");
+    assert(is_aligned(size, alignment), "Incorrect input parameters");
 
     ReservedSpace rs(size,          // size
                      alignment,     // alignment
@@ -1049,8 +1049,8 @@
     assert(rs.base() != NULL, "Must be");
     assert(rs.size() == size, "Must be");
 
-    assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
-    assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
+    assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
+    assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 
     if (rs.special()) {
       small_page_write(rs.base(), size);
@@ -1062,7 +1062,7 @@
   static void test_reserved_space2(size_t size) {
     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 
-    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+    assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 
     ReservedSpace rs(size);
 
@@ -1088,8 +1088,8 @@
       return;
     }
 
-    assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
-    assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
+    assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+    assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
 
     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 
@@ -1244,7 +1244,7 @@
   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
                                                         TestLargePages mode = Default) {
     size_t granularity = os::vm_allocation_granularity();
-    size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
+    size_t reserve_size_aligned = align_up(reserve_size, granularity);
 
     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
 
--- a/hotspot/src/share/vm/oops/array.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/array.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -64,14 +64,14 @@
   // Can't distinguish between array of length 0 and length 1,
   // will always return 0 in those cases.
   static int bytes_to_length(size_t bytes)       {
-    assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now");
+    assert(is_aligned(bytes, BytesPerWord), "Must be, for now");
 
     if (sizeof(Array<T>) >= bytes) {
       return 0;
     }
 
     size_t left = bytes - sizeof(Array<T>);
-    assert(is_size_aligned(left, sizeof(T)), "Must be");
+    assert(is_aligned(left, sizeof(T)), "Must be");
 
     size_t elements = left / sizeof(T);
     assert(elements <= (size_t)INT_MAX, "number of elements " SIZE_FORMAT "doesn't fit into an int.", elements);
@@ -122,7 +122,7 @@
   void release_at_put(int which, T contents) { OrderAccess::release_store(adr_at(which), contents); }
 
   static int size(int length) {
-    size_t bytes = align_size_up(byte_sizeof(length), BytesPerWord);
+    size_t bytes = align_up(byte_sizeof(length), BytesPerWord);
     size_t words = bytes / BytesPerWord;
 
     assert(words <= INT_MAX, "Overflow: " SIZE_FORMAT, words);
--- a/hotspot/src/share/vm/oops/arrayOop.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -50,7 +50,7 @@
   // Returns the aligned header_size_in_bytes.  This is not equivalent to
   // sizeof(arrayOopDesc) which should not appear in the code.
   static int header_size_in_bytes() {
-    size_t hs = align_size_up(length_offset_in_bytes() + sizeof(int),
+    size_t hs = align_up(length_offset_in_bytes() + sizeof(int),
                               HeapWordSize);
 #ifdef ASSERT
     // make sure it isn't called before UseCompressedOops is initialized.
@@ -112,7 +112,7 @@
     assert(type2aelembytes(type) != 0, "wrong type");
 
     const size_t max_element_words_per_size_t =
-      align_size_down((SIZE_MAX/HeapWordSize - header_size(type)), MinObjAlignment);
+      align_down((SIZE_MAX/HeapWordSize - header_size(type)), MinObjAlignment);
     const size_t max_elements_per_size_t =
       HeapWordSize * max_element_words_per_size_t / type2aelembytes(type);
     if ((size_t)max_jint < max_elements_per_size_t) {
@@ -120,7 +120,7 @@
       // (CollectedHeap, Klass::oop_oop_iterate(), and more) uses an int for
       // passing around the size (in words) of an object. So, we need to avoid
       // overflowing an int when we add the header. See CRs 4718400 and 7110613.
-      return align_size_down(max_jint - header_size(type), MinObjAlignment);
+      return align_down(max_jint - header_size(type), MinObjAlignment);
     }
     return (int32_t)max_elements_per_size_t;
   }
--- a/hotspot/src/share/vm/oops/constMethod.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/constMethod.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -128,7 +128,7 @@
   }
 
   // Align sizes up to a word.
-  extra_bytes = align_size_up(extra_bytes, BytesPerWord);
+  extra_bytes = align_up(extra_bytes, BytesPerWord);
 
   // One pointer per annotation array
   if (sizes->method_annotations_length() > 0) {
@@ -144,7 +144,7 @@
     extra_bytes += sizeof(AnnotationArray*);
   }
 
-  int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
+  int extra_words = align_up(extra_bytes, BytesPerWord) / BytesPerWord;
   assert(extra_words == extra_bytes/BytesPerWord, "should already be aligned");
   return align_metadata_size(header_size() + extra_words);
 }
--- a/hotspot/src/share/vm/oops/constMethod.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/constMethod.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -360,7 +360,7 @@
 
   // Sizing
   static int header_size() {
-    return align_size_up((int)sizeof(ConstMethod), wordSize) / wordSize;
+    return align_up((int)sizeof(ConstMethod), wordSize) / wordSize;
   }
 
   // Size needed
--- a/hotspot/src/share/vm/oops/constantPool.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPool.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -756,7 +756,7 @@
 
   // Sizing (in words)
   static int header_size()             {
-    return align_size_up((int)sizeof(ConstantPool), wordSize) / wordSize;
+    return align_up((int)sizeof(ConstantPool), wordSize) / wordSize;
   }
   static int size(int length)          { return align_metadata_size(header_size() + length); }
   int size() const                     { return size(length()); }
--- a/hotspot/src/share/vm/oops/cpCache.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCache.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -362,7 +362,7 @@
 
   // Code generation support
   static WordSize size()                         {
-    return in_WordSize(align_size_up((int)sizeof(ConstantPoolCacheEntry), wordSize) / wordSize);
+    return in_WordSize(align_up((int)sizeof(ConstantPoolCacheEntry), wordSize) / wordSize);
   }
   static ByteSize size_in_bytes()                { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
   static ByteSize indices_offset()               { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -100,7 +100,7 @@
 
   // sizeof(OopMapBlock) in words.
   static const int size_in_words() {
-    return align_size_up((int)sizeof(OopMapBlock), wordSize) >>
+    return align_up((int)sizeof(OopMapBlock), wordSize) >>
       LogBytesPerWord;
   }
 
--- a/hotspot/src/share/vm/oops/method.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -294,7 +294,7 @@
 int Method::size(bool is_native) {
   // If native, then include pointers for native_function and signature_handler
   int extra_bytes = (is_native) ? 2*sizeof(address*) : 0;
-  int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
+  int extra_words = align_up(extra_bytes, BytesPerWord) / BytesPerWord;
   return align_metadata_size(header_size() + extra_words);
 }
 
--- a/hotspot/src/share/vm/oops/method.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -667,7 +667,7 @@
 
   // sizing
   static int header_size()                       {
-    return align_size_up((int)sizeof(Method), wordSize) / wordSize;
+    return align_up((int)sizeof(Method), wordSize) / wordSize;
   }
   static int size(bool is_native);
   int size() const                               { return method_size(); }
--- a/hotspot/src/share/vm/oops/methodCounters.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodCounters.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -118,7 +118,7 @@
   AOT_ONLY(Method* method() const { return _method; })
 
   static int size() {
-    return align_size_up((int)sizeof(MethodCounters), wordSize) / wordSize;
+    return align_up((int)sizeof(MethodCounters), wordSize) / wordSize;
   }
 
   void clear_counters();
--- a/hotspot/src/share/vm/oops/methodData.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -937,7 +937,7 @@
 // profiling information about a given method.  Size is in words
 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
   int byte_size = compute_allocation_size_in_bytes(method);
-  int word_size = align_size_up(byte_size, BytesPerWord) / BytesPerWord;
+  int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
   return align_metadata_size(word_size);
 }
 
--- a/hotspot/src/share/vm/oops/methodData.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -2338,7 +2338,7 @@
 
   // My size
   int size_in_bytes() const { return _size; }
-  int size() const    { return align_metadata_size(align_size_up(_size, BytesPerWord)/BytesPerWord); }
+  int size() const    { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
 #if INCLUDE_SERVICES
   void collect_statistics(KlassSizeStats *sz) const;
 #endif
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -62,7 +62,7 @@
     if (HeapWordsPerOop > 0) {
       old_res = length * HeapWordsPerOop;
     } else {
-      old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
+      old_res = align_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
     }
     assert(res == old_res, "Inconsistency between old and new.");
 #endif  // ASSERT
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -3889,7 +3889,7 @@
 
     // Align offset for type.
     int typesize = type_to_size_in_bytes(con->type());
-    offset = align_size_up(offset, typesize);
+    offset = align_up(offset, typesize);
     con->set_offset(offset);   // set constant's offset
 
     if (con->type() == T_VOID) {
@@ -3903,7 +3903,7 @@
   // Align size up to the next section start (which is insts; see
   // CodeBuffer::align_at_start).
   assert(_size == -1, "already set?");
-  _size = align_size_up(offset, (int)CodeEntryAlignment);
+  _size = align_up(offset, (int)CodeEntryAlignment);
 }
 
 void Compile::ConstantTable::emit(CodeBuffer& cb) {
--- a/hotspot/src/share/vm/opto/memnode.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -3560,7 +3560,7 @@
   intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
   intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit);
   size_limit = MIN2(size_limit, ti_limit);
-  size_limit = align_size_up(size_limit, BytesPerLong);
+  size_limit = align_up(size_limit, BytesPerLong);
   int num_tiles = size_limit / BytesPerLong;
 
   // allocate space for the tile map:
@@ -3791,7 +3791,7 @@
 
     // update the map:
 
-    intptr_t this_int_off = align_size_down(st_off, BytesPerInt);
+    intptr_t this_int_off = align_down(st_off, BytesPerInt);
     if (this_int_off != int_map_off) {
       // reset the map:
       int_map = 0;
@@ -3805,7 +3805,7 @@
     }
 
     // Did this store hit or cross the word boundary?
-    intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt);
+    intptr_t next_int_off = align_down(st_off + st_size, BytesPerInt);
     if (next_int_off == this_int_off + BytesPerInt) {
       // We passed the current int, without fully initializing it.
       int_map_off = next_int_off;
@@ -3895,7 +3895,7 @@
         //   zsize          0   0   0   0     4   0     4
         if (next_full_store < 0) {
           // Conservative tack:  Zero to end of current word.
-          zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
+          zeroes_needed = align_up(zeroes_needed, BytesPerInt);
         } else {
           // Zero to beginning of next fully initialized word.
           // Or, don't zero at all, if we are already in that word.
@@ -3908,7 +3908,7 @@
       if (zeroes_needed > zeroes_done) {
         intptr_t zsize = zeroes_needed - zeroes_done;
         // Do some incremental zeroing on rawmem, in parallel with inits.
-        zeroes_done = align_size_down(zeroes_done, BytesPerInt);
+        zeroes_done = align_down(zeroes_done, BytesPerInt);
         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
                                               zeroes_done, zeroes_needed,
                                               phase);
@@ -3941,7 +3941,7 @@
       assert(st_off >= last_init_end, "tiles do not overwrite inits");
       last_tile_end = MAX2(last_tile_end, next_init_off);
     } else {
-      intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong);
+      intptr_t st_tile_end = align_up(next_init_off, BytesPerLong);
       assert(st_tile_end >= last_tile_end, "inits stay with tiles");
       assert(st_off      >= last_init_end, "inits do not overlap");
       last_init_end = next_init_off;  // it's a non-tile
@@ -3954,7 +3954,7 @@
 
   if (!(UseTLAB && ZeroTLAB)) {
     // If anything remains to be zeroed, zero it all now.
-    zeroes_done = align_size_down(zeroes_done, BytesPerInt);
+    zeroes_done = align_down(zeroes_done, BytesPerInt);
     // if it is the last unused 4 bytes of an instance, forget about it
     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
     if (zeroes_done + BytesPerLong >= size_limit) {
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -1543,7 +1543,7 @@
   }
 
   size_t new_cap_until_GC = 0;
-  size_t aligned_inc = align_size_down((size_t) inc, Metaspace::commit_alignment());
+  size_t aligned_inc = align_down((size_t) inc, Metaspace::commit_alignment());
   bool success = MetaspaceGC::inc_capacity_until_GC(aligned_inc, &new_cap_until_GC);
   if (!success) {
     THROW_MSG_0(vmSymbols::java_lang_IllegalStateException(),
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -1555,8 +1555,8 @@
 
   set_parnew_gc_flags();
 
-  size_t max_heap = align_size_down(MaxHeapSize,
-                                    CardTableRS::ct_max_alignment_constraint());
+  size_t max_heap = align_down(MaxHeapSize,
+                               CardTableRS::ct_max_alignment_constraint());
 
   // Now make adjustments for CMS
   intx   tenuring_default = (intx)6;
@@ -1567,7 +1567,7 @@
   const size_t preferred_max_new_size_unaligned =
     MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
   size_t preferred_max_new_size =
-    align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
+    align_up(preferred_max_new_size_unaligned, os::vm_page_size());
 
   // Unless explicitly requested otherwise, size young gen
   // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
@@ -1681,8 +1681,8 @@
   // keeping alignment constraints of the heap. To guarantee the latter, as the
   // NULL page is located before the heap, we pad the NULL page to the conservative
   // maximum alignment that the GC may ever impose upon the heap.
-  size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
-                                                        _conservative_max_heap_alignment);
+  size_t displacement_due_to_null_page = align_up_(os::vm_page_size(),
+                                                   _conservative_max_heap_alignment);
 
   LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
   NOT_LP64(ShouldNotReachHere(); return 0);
@@ -2763,7 +2763,7 @@
   const julong min_size = min_ThreadStackSize * K;
   const julong max_size = max_ThreadStackSize * K;
 
-  assert(is_size_aligned_(max_size, (size_t)os::vm_page_size()), "Implementation assumption");
+  assert(is_aligned_(max_size, (size_t)os::vm_page_size()), "Implementation assumption");
 
   julong size = 0;
   ArgsRange errcode = parse_memory_size(tail, &size, min_size, max_size);
@@ -2778,7 +2778,7 @@
   }
 
   // Internally track ThreadStackSize in units of 1024 bytes.
-  const julong size_aligned = align_size_up_(size, K);
+  const julong size_aligned = align_up_(size, K);
   assert(size <= size_aligned,
          "Overflow: " JULONG_FORMAT " " JULONG_FORMAT,
          size, size_aligned);
@@ -2789,7 +2789,7 @@
          size_in_K);
 
   // Check that code expanding ThreadStackSize to a page aligned number of bytes won't overflow.
-  const julong max_expanded = align_size_up_(size_in_K * K, (size_t)os::vm_page_size());
+  const julong max_expanded = align_up_(size_in_K * K, (size_t)os::vm_page_size());
   assert(max_expanded < max_uintx && max_expanded >= size_in_K,
          "Expansion overflowed: " JULONG_FORMAT " " JULONG_FORMAT,
          max_expanded, size_in_K);
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -153,7 +153,7 @@
                              jbyte compare_value, cmpxchg_memory_order order) {
   STATIC_ASSERT(sizeof(jbyte) == 1);
   volatile jint* dest_int =
-      reinterpret_cast<volatile jint*>(align_ptr_down(dest, sizeof(jint)));
+      reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint)));
   size_t offset = pointer_delta(dest, dest_int, 1);
   jint cur = *dest_int;
   jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -607,7 +607,7 @@
 }
 
 Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
-  size_t aligned_max = align_size_down(max_uintx/2, Metaspace::reserve_alignment_words());
+  size_t aligned_max = align_down(max_uintx/2, Metaspace::reserve_alignment_words());
   if (value > aligned_max) {
     CommandLineError::print(verbose,
                             "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
@@ -618,7 +618,7 @@
   return Flag::SUCCESS;
 }
 
-// To avoid an overflow by 'align_size_up(value, alignment)'.
+// To avoid an overflow by 'align_up(value, alignment)'.
 static Flag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
   size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
   if (value > aligned_max) {
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -101,7 +101,7 @@
       res = _weak_global_handles->allocate_handle(obj());
     }
     // Add weak tag.
-    assert(is_ptr_aligned(res, weak_tag_alignment), "invariant");
+    assert(is_aligned(res, weak_tag_alignment), "invariant");
     char* tptr = reinterpret_cast<char*>(res) + weak_tag_value;
     res = reinterpret_cast<jobject>(tptr);
   } else {
--- a/hotspot/src/share/vm/runtime/os.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -337,10 +337,10 @@
   // We need to adapt the configured number of stack protection pages given
   // in 4K pages to the actual os page size. We must do this before setting
   // up minimal stack sizes etc. in os::init_2().
-  JavaThread::set_stack_red_zone_size     (align_size_up(StackRedPages      * 4 * K, vm_page_size()));
-  JavaThread::set_stack_yellow_zone_size  (align_size_up(StackYellowPages   * 4 * K, vm_page_size()));
-  JavaThread::set_stack_reserved_zone_size(align_size_up(StackReservedPages * 4 * K, vm_page_size()));
-  JavaThread::set_stack_shadow_zone_size  (align_size_up(StackShadowPages   * 4 * K, vm_page_size()));
+  JavaThread::set_stack_red_zone_size     (align_up(StackRedPages      * 4 * K, vm_page_size()));
+  JavaThread::set_stack_yellow_zone_size  (align_up(StackYellowPages   * 4 * K, vm_page_size()));
+  JavaThread::set_stack_reserved_zone_size(align_up(StackReservedPages * 4 * K, vm_page_size()));
+  JavaThread::set_stack_shadow_zone_size  (align_up(StackShadowPages   * 4 * K, vm_page_size()));
 
   // VM version initialization identifies some characteristics of the
   // platform that are used during ergonomic decisions.
@@ -1344,7 +1344,7 @@
     for (size_t i = 0; _page_sizes[i] != 0; ++i) {
       const size_t page_size = _page_sizes[i];
       if (page_size <= max_page_size) {
-        if (!must_be_aligned || is_size_aligned(region_size, page_size)) {
+        if (!must_be_aligned || is_aligned(region_size, page_size)) {
           return page_size;
         }
       }
--- a/hotspot/src/share/vm/runtime/perfMemory.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/perfMemory.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -91,8 +91,8 @@
     // initialization already performed
     return;
 
-  size_t capacity = align_size_up(PerfDataMemorySize,
-                                  os::vm_allocation_granularity());
+  size_t capacity = align_up(PerfDataMemorySize,
+                             os::vm_allocation_granularity());
 
   log_debug(perf, memops)("PerfDataMemorySize = " SIZE_FORMAT ","
                           " os::vm_allocation_granularity = %d,"
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -1176,7 +1176,7 @@
     // In the current implementation objectMonitors are TSM - immortal.
     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
     // each ObjectMonitor to start at the beginning of a cache line,
-    // so we use align_size_up().
+    // so we use align_up().
     // A better solution would be to use C++ placement-new.
     // BEWARE: As it stands currently, we don't run the ctors!
     assert(_BLOCKSIZE > 1, "invariant");
@@ -1186,8 +1186,7 @@
     void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
                                                       mtInternal);
     temp = (PaddedEnd<ObjectMonitor> *)
-             align_ptr_up(real_malloc_addr,
-                           DEFAULT_CACHE_LINE_SIZE);
+             align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
 
     // NOTE: (almost) no way to recover if allocation failed.
     // We might be able to induce a STW safepoint and scavenge enough
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -172,7 +172,7 @@
     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
                                                          AllocFailStrategy::RETURN_NULL);
-    void* aligned_addr     = align_ptr_up(real_malloc_addr, alignment);
+    void* aligned_addr     = align_up(real_malloc_addr, alignment);
     assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
            ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
            "JavaThread alignment code overflowed allocated storage");
@@ -286,7 +286,7 @@
   if (UseBiasedLocking) {
     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
     assert(this == _real_malloc_address ||
-           this == align_ptr_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
+           this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
            "bug in forced alignment of thread objects");
   }
 #endif // ASSERT
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -1412,7 +1412,7 @@
     return _stack_red_zone_size;
   }
   static void set_stack_red_zone_size(size_t s) {
-    assert(is_size_aligned(s, os::vm_page_size()),
+    assert(is_aligned(s, os::vm_page_size()),
            "We can not protect if the red zone size is not page aligned.");
     assert(_stack_red_zone_size == 0, "This should be called only once.");
     _stack_red_zone_size = s;
@@ -1429,7 +1429,7 @@
     return _stack_yellow_zone_size;
   }
   static void set_stack_yellow_zone_size(size_t s) {
-    assert(is_size_aligned(s, os::vm_page_size()),
+    assert(is_aligned(s, os::vm_page_size()),
            "We can not protect if the yellow zone size is not page aligned.");
     assert(_stack_yellow_zone_size == 0, "This should be called only once.");
     _stack_yellow_zone_size = s;
@@ -1440,7 +1440,7 @@
     return _stack_reserved_zone_size;
   }
   static void set_stack_reserved_zone_size(size_t s) {
-    assert(is_size_aligned(s, os::vm_page_size()),
+    assert(is_aligned(s, os::vm_page_size()),
            "We can not protect if the reserved zone size is not page aligned.");
     assert(_stack_reserved_zone_size == 0, "This should be called only once.");
     _stack_reserved_zone_size = s;
@@ -1480,7 +1480,7 @@
     // the page size is a multiple of 4K, banging in 4K steps
     // suffices to touch all pages. (Some pages are banged
     // several times, though.)
-    assert(is_size_aligned(s, os::vm_page_size()),
+    assert(is_aligned(s, os::vm_page_size()),
            "Stack bang assumes multiple of page size.");
     assert(_stack_shadow_zone_size == 0, "This should be called only once.");
     _stack_shadow_zone_size = s;
--- a/hotspot/src/share/vm/services/nmtCommon.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/services/nmtCommon.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -28,7 +28,7 @@
 #include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_size_up_(sizeof(obj), sizeof(type))/sizeof(type))
+#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_up_(sizeof(obj), sizeof(type))/sizeof(type))
 
 // Native memory tracking level
 enum NMT_TrackingLevel {
--- a/hotspot/src/share/vm/utilities/bitMap.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -189,10 +189,10 @@
   // Align bit index up or down to the next bitmap word boundary, or check
   // alignment.
   static idx_t word_align_up(idx_t bit) {
-    return align_size_up(bit, BitsPerWord);
+    return align_up(bit, BitsPerWord);
   }
   static idx_t word_align_down(idx_t bit) {
-    return align_size_down(bit, BitsPerWord);
+    return align_down(bit, BitsPerWord);
   }
   static bool is_word_aligned(idx_t bit) {
     return word_align_up(bit) == bit;
--- a/hotspot/src/share/vm/utilities/copy.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/utilities/copy.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -71,7 +71,7 @@
     assert(dst != NULL, "address must not be NULL");
     assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
            "incorrect element size: " SIZE_FORMAT, elem_size);
-    assert(is_size_aligned(byte_count, elem_size),
+    assert(is_aligned(byte_count, elem_size),
            "byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size);
 
     address src_end = (address)src + byte_count;
@@ -189,14 +189,14 @@
    */
   template <typename T, CopyDirection direction, bool swap>
   static void do_conjoint_swap(const void* src, void* dst, size_t byte_count) {
-    if (is_ptr_aligned(src, sizeof(T))) {
-      if (is_ptr_aligned(dst, sizeof(T))) {
+    if (is_aligned(src, sizeof(T))) {
+      if (is_aligned(dst, sizeof(T))) {
         do_conjoint_swap<T,direction,swap,true,true>(src, dst, byte_count);
       } else {
         do_conjoint_swap<T,direction,swap,true,false>(src, dst, byte_count);
       }
     } else {
-      if (is_ptr_aligned(dst, sizeof(T))) {
+      if (is_aligned(dst, sizeof(T))) {
         do_conjoint_swap<T,direction,swap,false,true>(src, dst, byte_count);
       } else {
         do_conjoint_swap<T,direction,swap,false,false>(src, dst, byte_count);
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -206,7 +206,7 @@
 // parts of the memory system may require additional alignment
 // and are responsible for those alignments.
 #ifdef _LP64
-#define ScaleForWordSize(x) align_size_down_((x) * 13 / 10, HeapWordSize)
+#define ScaleForWordSize(x) align_down_((x) * 13 / 10, HeapWordSize)
 #else
 #define ScaleForWordSize(x) (x)
 #endif
@@ -514,77 +514,77 @@
 #define widen_to_type_of(what, type_carrier) (true ? (what) : (type_carrier))
 #define align_mask_widened(alignment, type_carrier) widen_to_type_of(align_mask(alignment), (type_carrier))
 
-#define align_size_down_(size, alignment) ((size) & ~align_mask_widened((alignment), (size)))
+#define align_down_(size, alignment) ((size) & ~align_mask_widened((alignment), (size)))
 
-#define align_size_up_(size, alignment) (align_size_down_((size) + align_mask(alignment), (alignment)))
+#define align_up_(size, alignment) (align_down_((size) + align_mask(alignment), (alignment)))
 
-#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
+#define is_aligned_(size, alignment) ((size) == (align_up_(size, alignment)))
 
 // Helpers to align sizes and check for alignment
 
 template <typename T, typename A>
-inline T align_size_up(T size, A alignment) {
-  return align_size_up_(size, alignment);
+inline T align_up(T size, A alignment) {
+  return align_up_(size, alignment);
 }
 
 template <typename T, typename A>
-inline T align_size_down(T size, A alignment) {
-  return align_size_down_(size, alignment);
+inline T align_down(T size, A alignment) {
+  return align_down_(size, alignment);
 }
 
 template <typename T, typename A>
-inline bool is_size_aligned(T size, A alignment) {
-  return is_size_aligned_(size, alignment);
+inline bool is_aligned(T size, A alignment) {
+  return is_aligned_(size, alignment);
 }
 
 // Align down with a lower bound. If the aligning results in 0, return 'alignment'.
 template <typename T, typename A>
-inline T align_size_down_bounded(T size, A alignment) {
-  A aligned_size = align_size_down(size, alignment);
+inline T align_down_bounded(T size, A alignment) {
+  A aligned_size = align_down(size, alignment);
   return aligned_size > 0 ? aligned_size : alignment;
 }
 
 // Helpers to align pointers and check for alignment.
 
 template <typename T, typename A>
-inline T* align_ptr_up(T* ptr, A alignment) {
-  return (T*)align_size_up((uintptr_t)ptr, alignment);
+inline T* align_up(T* ptr, A alignment) {
+  return (T*)align_up((uintptr_t)ptr, alignment);
 }
 
 template <typename T, typename A>
-inline T* align_ptr_down(T* ptr, A alignment) {
-  return (T*)align_size_down((uintptr_t)ptr, alignment);
+inline T* align_down(T* ptr, A alignment) {
+  return (T*)align_down((uintptr_t)ptr, alignment);
 }
 
 template <typename T, typename A>
-inline bool is_ptr_aligned(T* ptr, A alignment) {
-  return is_size_aligned((uintptr_t)ptr, alignment);
+inline bool is_aligned(T* ptr, A alignment) {
+  return is_aligned((uintptr_t)ptr, alignment);
 }
 
 // Align metaspace objects by rounding up to natural word boundary
 template <typename T>
 inline T align_metadata_size(T size) {
-  return align_size_up(size, 1);
+  return align_up(size, 1);
 }
 
 // Align objects in the Java Heap by rounding up their size, in HeapWord units.
 template <typename T>
 inline T align_object_size(T word_size) {
-  return align_size_up(word_size, MinObjAlignment);
+  return align_up(word_size, MinObjAlignment);
 }
 
 inline bool is_object_aligned(size_t word_size) {
-  return is_size_aligned(word_size, MinObjAlignment);
+  return is_aligned(word_size, MinObjAlignment);
 }
 
-inline bool is_ptr_object_aligned(const void* addr) {
-  return is_ptr_aligned(addr, MinObjAlignmentInBytes);
+inline bool is_object_aligned(const void* addr) {
+  return is_aligned(addr, MinObjAlignmentInBytes);
 }
 
 // Pad out certain offsets to jlong alignment, in HeapWord units.
 template <typename T>
 inline T align_object_offset(T offset) {
-  return align_size_up(offset, HeapWordsPerLong);
+  return align_up(offset, HeapWordsPerLong);
 }
 
 // Clamp an address to be within a specific page
@@ -593,15 +593,15 @@
 // 3. Otherwise, if addr is below the page_address the start of the page will be returned
 template <typename T>
 inline T* clamp_address_in_page(T* addr, T* page_address, size_t page_size) {
-  if (align_ptr_down(addr, page_size) == align_ptr_down(page_address, page_size)) {
+  if (align_down(addr, page_size) == align_down(page_address, page_size)) {
     // address is in the specified page, just return it as is
     return addr;
   } else if (addr > page_address) {
     // address is above specified page, return start of next page
-    return align_ptr_down(page_address, page_size) + page_size;
+    return align_down(page_address, page_size) + page_size;
   } else {
     // address is below specified page, return start of page
-    return align_ptr_down(page_address, page_size);
+    return align_down(page_address, page_size);
   }
 }
 
--- a/hotspot/src/share/vm/utilities/stack.inline.hpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/src/share/vm/utilities/stack.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
@@ -92,7 +92,7 @@
   const size_t ptr_sz = sizeof(E*);
   assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
   if (elem_sz < ptr_sz) {
-    return align_size_up(seg_size * elem_sz, ptr_sz) / elem_sz;
+    return align_up(seg_size * elem_sz, ptr_sz) / elem_sz;
   }
   return seg_size;
 }
@@ -100,7 +100,7 @@
 template <class E, MEMFLAGS F>
 size_t Stack<E, F>::link_offset() const
 {
-  return align_size_up(this->_seg_size * sizeof(E), sizeof(E*));
+  return align_up(this->_seg_size * sizeof(E), sizeof(E*));
 }
 
 template <class E, MEMFLAGS F>
--- a/hotspot/test/native/gc/shared/test_collectorPolicy.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/test/native/gc/shared/test_collectorPolicy.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -161,7 +161,7 @@
     SetMaxNewSizeCmd(size_t param1, size_t param2) : BinaryExecutor(param1, param2) { }
     void execute() {
       size_t heap_alignment = CollectorPolicy::compute_heap_alignment();
-      size_t new_size_value = align_size_up(MaxHeapSize, heap_alignment)
+      size_t new_size_value = align_up(MaxHeapSize, heap_alignment)
               - param1 + param2;
       FLAG_SET_CMDLINE(size_t, MaxNewSize, new_size_value);
     }
@@ -185,7 +185,7 @@
       MarkSweepPolicy msp;
       msp.initialize_all();
 
-      size_t expected_old_initial = align_size_up(InitialHeapSize, heap_alignment)
+      size_t expected_old_initial = align_up(InitialHeapSize, heap_alignment)
               - MaxNewSize;
 
       ASSERT_EQ(expected_old_initial, msp.initial_old_size());
@@ -197,13 +197,13 @@
     CheckOldInitialMaxNewSize(size_t param1, size_t param2) : BinaryExecutor(param1, param2) { }
     void execute() {
       size_t heap_alignment = CollectorPolicy::compute_heap_alignment();
-      size_t new_size_value = align_size_up(MaxHeapSize, heap_alignment)
+      size_t new_size_value = align_up(MaxHeapSize, heap_alignment)
               - param1 + param2;
 
       MarkSweepPolicy msp;
       msp.initialize_all();
 
-      size_t expected_old_initial = align_size_up(MaxHeapSize, heap_alignment)
+      size_t expected_old_initial = align_up(MaxHeapSize, heap_alignment)
               - new_size_value;
 
       ASSERT_EQ(expected_old_initial, msp.initial_old_size());
--- a/hotspot/test/native/memory/test_metachunk.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/test/native/memory/test_metachunk.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -64,7 +64,7 @@
 
   // Allocate
   size_t alloc_size = 64; // Words
-  EXPECT_TRUE(is_size_aligned(alloc_size, Metachunk::object_alignment()));
+  EXPECT_TRUE(is_aligned(alloc_size, Metachunk::object_alignment()));
 
   MetaWord* mem = metachunk->allocate(alloc_size);
 
--- a/hotspot/test/native/runtime/test_arguments.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/test/native/runtime/test_arguments.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -152,7 +152,7 @@
   assert(small_xss_input <= max_julong / 2, "Sanity");
 
   // Match code in arguments.cpp
-  julong julong_ret = align_size_up_(small_xss_input, K) / K;
+  julong julong_ret = align_up_(small_xss_input, K) / K;
   assert(julong_ret <= (julong)max_intx, "Overflow: " JULONG_FORMAT, julong_ret);
   return (intx)julong_ret;
 }
@@ -186,8 +186,8 @@
 
   // Test value aligned both to K and vm_page_size.
   {
-    EXPECT_TRUE(is_size_aligned(32 * M, K));
-    EXPECT_TRUE(is_size_aligned(32 * M, (size_t)os::vm_page_size()));
+    EXPECT_TRUE(is_aligned(32 * M, K));
+    EXPECT_TRUE(is_aligned(32 * M, (size_t)os::vm_page_size()));
     EXPECT_EQ(parse_xss_inner(to_string(32 * M), JNI_OK), (intx)(32 * M / K));
   }
 
--- a/hotspot/test/native/utilities/test_align.cpp	Wed Apr 12 17:53:18 2017 +0200
+++ b/hotspot/test/native/utilities/test_align.cpp	Tue Jul 04 15:58:10 2017 +0200
@@ -53,46 +53,46 @@
       log("--- Value: " UINT64_FORMAT "\n", values[i]);
 
       // Test align up
-      const uint64_t up = align_size_up_(values[i], (uint64_t)alignment);
+      const uint64_t up = align_up_(values[i], (uint64_t)alignment);
       if (0 < up && up <= (uint64_t)std::numeric_limits<T>::max()) {
         log("Testing align_up:   alignment: 0x" UINT64_FORMAT_X " value: 0x" UINT64_FORMAT_X " expected: 0x" UINT64_FORMAT_X "\n", (uint64_t)alignment, values[i], up);
 
         T value = T(values[i]);
 
         // Check against uint64_t version
-        ASSERT_EQ(align_size_up((uint64_t)value, alignment), up);
+        ASSERT_EQ(align_up((uint64_t)value, alignment), up);
         // Check inline function vs macro
-        ASSERT_EQ(align_size_up(value, alignment), align_size_up_(value, alignment));
+        ASSERT_EQ(align_up(value, alignment), align_up_(value, alignment));
         // Sanity check
-        ASSERT_GE(align_size_up(value, alignment), value);
+        ASSERT_GE(align_up(value, alignment), value);
       }
 
       // Test align down
-      const uint64_t down = align_size_down_(values[i], (uint64_t)alignment);
+      const uint64_t down = align_down_(values[i], (uint64_t)alignment);
       if (down <= (uint64_t)std::numeric_limits<T>::max()) {
-        log("Testing align_size_down: alignment: 0x" UINT64_FORMAT_X " value: 0x" UINT64_FORMAT_X " expected: 0x" UINT64_FORMAT_X "\n", (uint64_t)alignment, values[i], down);
+        log("Testing align_down: alignment: 0x" UINT64_FORMAT_X " value: 0x" UINT64_FORMAT_X " expected: 0x" UINT64_FORMAT_X "\n", (uint64_t)alignment, values[i], down);
 
         T value = T(values[i]);
 
         // Check against uint64_t version
-        ASSERT_EQ((uint64_t)align_size_down(value, alignment), down);
+        ASSERT_EQ((uint64_t)align_down(value, alignment), down);
         // Check inline function vs macro
-        ASSERT_EQ(align_size_down(value, alignment), align_size_down_(value, alignment));
+        ASSERT_EQ(align_down(value, alignment), align_down_(value, alignment));
         // Sanity check
-        ASSERT_LE(align_size_down(value, alignment), value);
+        ASSERT_LE(align_down(value, alignment), value);
       }
 
       // Test is aligned
-      const bool is = is_size_aligned_(values[i], (uint64_t)alignment);
+      const bool is = is_aligned_(values[i], (uint64_t)alignment);
       if (values[i] <= (uint64_t)std::numeric_limits<T>::max()) {
         log("Testing is_aligned: alignment: 0x" UINT64_FORMAT_X " value: 0x" UINT64_FORMAT_X " expected: %s\n", (uint64_t)alignment, values[i], is ? "true" : "false");
 
         T value = T(values[i]);
 
         // Check against uint64_t version
-        ASSERT_EQ(is_size_aligned(value, alignment), is);
+        ASSERT_EQ(is_aligned(value, alignment), is);
         // Check inline function vs macro
-        ASSERT_EQ(is_size_aligned(value, alignment), is_size_aligned_(value, alignment));
+        ASSERT_EQ(is_aligned(value, alignment), is_aligned_(value, alignment));
       }
     }
   }