8178500: Replace usages of round_to and round_down with align_up and align_down
authorstefank
Thu, 13 Apr 2017 09:57:51 +0200
changeset 46620 750c6edff33b
parent 46619 a3919f5e8d2b
child 46621 b93c4446e59e
child 46623 261b9d05b79d
8178500: Replace usages of round_to and round_down with align_up and align_down Reviewed-by: rehn, tschatzl
hotspot/src/cpu/aarch64/vm/aarch64.ad
hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp
hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp
hotspot/src/cpu/arm/vm/arm.ad
hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp
hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp
hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp
hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp
hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp
hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
hotspot/src/cpu/s390/vm/c1_CodeStubs_s390.cpp
hotspot/src/cpu/s390/vm/frame_s390.inline.hpp
hotspot/src/cpu/s390/vm/sharedRuntime_s390.cpp
hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp
hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
hotspot/src/cpu/x86/vm/x86_32.ad
hotspot/src/cpu/x86/vm/x86_64.ad
hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java
hotspot/src/os/posix/vm/os_posix.cpp
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/share/vm/asm/codeBuffer.cpp
hotspot/src/share/vm/c1/c1_FrameMap.cpp
hotspot/src/share/vm/c1/c1_LinearScan.hpp
hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp
hotspot/src/share/vm/classfile/verifier.cpp
hotspot/src/share/vm/code/codeBlob.cpp
hotspot/src/share/vm/code/codeBlob.hpp
hotspot/src/share/vm/code/codeCache.cpp
hotspot/src/share/vm/code/exceptionHandlerTable.hpp
hotspot/src/share/vm/code/icBuffer.hpp
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/stubs.cpp
hotspot/src/share/vm/code/vtableStubs.cpp
hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp
hotspot/src/share/vm/gc/g1/sparsePRT.hpp
hotspot/src/share/vm/gc/parallel/generationSizer.cpp
hotspot/src/share/vm/gc/parallel/mutableNUMASpace.cpp
hotspot/src/share/vm/gc/parallel/mutableSpace.cpp
hotspot/src/share/vm/interpreter/bytecode.hpp
hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
hotspot/src/share/vm/interpreter/bytecodes.cpp
hotspot/src/share/vm/interpreter/interpreter.hpp
hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp
hotspot/src/share/vm/memory/virtualspace.cpp
hotspot/src/share/vm/oops/oop.inline.hpp
hotspot/src/share/vm/opto/buildOopMap.cpp
hotspot/src/share/vm/opto/chaitin.cpp
hotspot/src/share/vm/opto/graphKit.cpp
hotspot/src/share/vm/opto/macroArrayCopy.cpp
hotspot/src/share/vm/opto/matcher.cpp
hotspot/src/share/vm/prims/unsafe.cpp
hotspot/src/share/vm/prims/whitebox.cpp
hotspot/src/share/vm/runtime/icache.cpp
hotspot/src/share/vm/runtime/stubRoutines.cpp
hotspot/src/share/vm/utilities/copy.hpp
hotspot/src/share/vm/utilities/globalDefinitions.hpp
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Thu Apr 13 09:57:51 2017 +0200
@@ -5218,7 +5218,7 @@
   // ppc port uses 0 but we definitely need to allow for fixed_slots
   // which folds in the space used for monitors
   return_addr(STACK - 2 +
-              round_to((Compile::current()->in_preserve_stack_slots() +
+              align_up((Compile::current()->in_preserve_stack_slots() +
                         Compile::current()->fixed_slots()),
                        stack_alignment_in_slots()));
 
--- a/hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -100,7 +100,7 @@
 
   // On AArch64 we always keep the stack pointer 16-aligned, so we
   // must round up here.
-  size = round_to(size, 2);
+  size = align_up(size, 2);
 
   return size;
 }
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -123,7 +123,7 @@
   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 #endif
 
-  int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
+  int frame_size_in_bytes = align_up(additional_frame_words*wordSize +
                                      reg_save_size*BytesPerInt, 16);
   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
@@ -190,7 +190,7 @@
   __ ldr(r0, Address(sp, r0_offset_in_bytes()));
 
   // Pop all of the register save are off the stack
-  __ add(sp, sp, round_to(return_offset_in_bytes(), 16));
+  __ add(sp, sp, align_up(return_offset_in_bytes(), 16));
 }
 
 // Is vector's size (in bytes) bigger than a size saved by default?
@@ -317,7 +317,7 @@
     }
   }
 
-  return round_to(stk_args, 2);
+  return align_up(stk_args, 2);
 }
 
 // Patch the callers callsite with entry to compiled code if it exists.
@@ -375,7 +375,7 @@
   __ mov(r13, sp);
 
   // stack is aligned, keep it that way
-  extraspace = round_to(extraspace, 2*wordSize);
+  extraspace = align_up(extraspace, 2*wordSize);
 
   if (extraspace)
     __ sub(sp, sp, extraspace);
@@ -547,7 +547,7 @@
   }
 
   // Cut-out for having no stack args.
-  int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+  int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
   if (comp_args_on_stack) {
     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
     __ andr(sp, rscratch1, -16);
@@ -1486,7 +1486,7 @@
     total_save_slots = double_slots * 2 + single_slots;
     // align the save area
     if (double_slots != 0) {
-      stack_slots = round_to(stack_slots, 2);
+      stack_slots = align_up(stack_slots, 2);
     }
   }
 
@@ -1543,7 +1543,7 @@
 
   // Now compute actual number of stack words we need rounding to make
   // stack properly aligned.
-  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
+  stack_slots = align_up(stack_slots, StackAlignmentInSlots);
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
@@ -2293,7 +2293,7 @@
     return 0;                   // No adjustment for negative locals
   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
   // diff is counted in stack words
-  return round_to(diff, 2);
+  return align_up(diff, 2);
 }
 
 
--- a/hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -108,7 +108,7 @@
          tempcount*Interpreter::stackElementWords + extra_args;
 
 #ifdef AARCH64
-  size = round_to(size, StackAlignmentInBytes/BytesPerWord);
+  size = align_up(size, StackAlignmentInBytes/BytesPerWord);
 #endif // AARCH64
 
   return size;
@@ -189,7 +189,7 @@
   }
   if (caller->is_interpreted_frame()) {
     intptr_t* locals_base = (locals - method->max_locals()*Interpreter::stackElementWords + 1);
-    locals_base = (intptr_t*)round_down((intptr_t)locals_base, StackAlignmentInBytes);
+    locals_base = align_down(locals_base, StackAlignmentInBytes);
     assert(interpreter_frame->sender_sp() <= locals_base, "interpreter-to-interpreter frame chaining");
 
   } else if (caller->is_compiled_frame()) {
@@ -227,7 +227,7 @@
   intptr_t* extended_sp = (intptr_t*) monbot  -
     (max_stack * Interpreter::stackElementWords) -
     popframe_extra_args;
-  extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
+  extended_sp = align_down(extended_sp, StackAlignmentInBytes);
   interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
 #else
   interpreter_frame->interpreter_frame_set_last_sp(stack_top);
@@ -239,7 +239,7 @@
 
 #ifdef AARCH64
   if (caller->is_interpreted_frame()) {
-    intptr_t* sender_sp = (intptr_t*)round_down((intptr_t)caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
+    intptr_t* sender_sp = align_down(caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
     interpreter_frame->set_interpreter_frame_sender_sp(sender_sp);
 
   } else {
--- a/hotspot/src/cpu/arm/vm/arm.ad	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/arm.ad	Thu Apr 13 09:57:51 2017 +0200
@@ -1881,7 +1881,7 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 1*VMRegImpl::slots_per_word +
-              round_to((Compile::current()->in_preserve_stack_slots() +
+              align_up((Compile::current()->in_preserve_stack_slots() +
                         Compile::current()->fixed_slots()),
                        stack_alignment_in_slots()));
 
--- a/hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -250,7 +250,7 @@
 
   __ sub(SP, SP, (reg_save_size - 2) * wordSize);
 
-  for (int i = 0; i < round_down(number_of_saved_gprs, 2); i += 2) {
+  for (int i = 0; i < align_down((int)number_of_saved_gprs, 2); i += 2) {
     __ stp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize));
   }
 
--- a/hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -747,7 +747,7 @@
   assert_different_registers(tmp, R0, R1, R2, R3, R4, R5, R6, R7, Rsender_sp, Rparams);
 
   if (comp_args_on_stack) {
-    __ sub_slow(SP, SP, round_to(comp_args_on_stack * VMRegImpl::stack_slot_size, StackAlignmentInBytes));
+    __ sub_slow(SP, SP, align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, StackAlignmentInBytes));
   }
 
   for (int i = 0; i < total_args_passed; i++) {
@@ -870,7 +870,7 @@
 
 #ifdef AARCH64
 
-  int extraspace = round_to(total_args_passed * Interpreter::stackElementSize, StackAlignmentInBytes);
+  int extraspace = align_up(total_args_passed * Interpreter::stackElementSize, StackAlignmentInBytes);
   if (extraspace) {
     __ sub(SP, SP, extraspace);
   }
@@ -1181,7 +1181,7 @@
   stack_slots += 2 * VMRegImpl::slots_per_word;
 
   // Calculate the final stack size taking account of alignment
-  stack_slots = round_to(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
+  stack_slots = align_up(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
   int lock_slot_fp_offset = stack_size - 2 * wordSize -
     lock_slot_offset * VMRegImpl::stack_slot_size;
@@ -1851,7 +1851,7 @@
 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
   int extra_locals_size = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
 #ifdef AARCH64
-  extra_locals_size = round_to(extra_locals_size, StackAlignmentInBytes/BytesPerWord);
+  extra_locals_size = align_up(extra_locals_size, StackAlignmentInBytes/BytesPerWord);
 #endif // AARCH64
   return extra_locals_size;
 }
--- a/hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -2876,7 +2876,7 @@
       BLOCK_COMMENT("PreBarrier");
 
 #ifdef AARCH64
-      callee_saved_regs = round_to(callee_saved_regs, 2);
+      callee_saved_regs = align_up(callee_saved_regs, 2);
       for (int i = 0; i < callee_saved_regs; i += 2) {
         __ raw_push(as_Register(i), as_Register(i+1));
       }
--- a/hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -675,7 +675,7 @@
   // Rstack_top & RextendedSP
   __ sub(Rstack_top, SP, 10*wordSize);
   if (native_call) {
-    __ sub(RextendedSP, Rstack_top, round_to(wordSize, StackAlignmentInBytes));    // reserve 1 slot for exception handling
+    __ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes));    // reserve 1 slot for exception handling
   } else {
     __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
     __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes);
@@ -1095,7 +1095,7 @@
   // Allocate more stack space to accomodate all arguments passed on GP and FP registers:
   // 8 * wordSize for GPRs
   // 8 * wordSize for FPRs
-  int reg_arguments = round_to(8*wordSize + 8*wordSize, StackAlignmentInBytes);
+  int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes);
 #else
 
   // C functions need aligned stack
@@ -1108,7 +1108,7 @@
   // Allocate more stack space to accomodate all GP as well as FP registers:
   // 4 * wordSize
   // 8 * BytesPerLong
-  int reg_arguments = round_to((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
+  int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
 #else
   // Reserve at least 4 words on the stack for loading
   // of parameters passed on registers (R0-R3).
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -193,7 +193,7 @@
 
 inline int frame::interpreter_frame_monitor_size() {
   // Number of stack slots for a monitor.
-  return round_to(BasicObjectLock::size(),  // number of stack slots
+  return align_up(BasicObjectLock::size(),  // number of stack slots
                   WordsPerLong);            // number of stack slots for a Java long
 }
 
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -221,7 +221,7 @@
   const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
                                    sizeof(RegisterSaver::LiveRegType);
   const int register_save_size   = regstosave_num * reg_size;
-  const int frame_size_in_bytes  = round_to(register_save_size, frame::alignment_in_bytes)
+  const int frame_size_in_bytes  = align_up(register_save_size, frame::alignment_in_bytes)
                                    + frame::abi_reg_args_size;
   *out_frame_size_in_bytes       = frame_size_in_bytes;
   const int frame_size_in_slots  = frame_size_in_bytes / sizeof(jint);
@@ -658,7 +658,7 @@
       ShouldNotReachHere();
     }
   }
-  return round_to(stk, 2);
+  return align_up(stk, 2);
 }
 
 #if defined(COMPILER1) || defined(COMPILER2)
@@ -845,7 +845,7 @@
     }
   }
 
-  return round_to(stk, 2);
+  return align_up(stk, 2);
 }
 #endif // COMPILER2
 
@@ -873,7 +873,7 @@
 
   // Adapter needs TOP_IJAVA_FRAME_ABI.
   const int adapter_size = frame::top_ijava_frame_abi_size +
-                           round_to(total_args_passed * wordSize, frame::alignment_in_bytes);
+      align_up(total_args_passed * wordSize, frame::alignment_in_bytes);
 
   // regular (verified) c2i entry point
   c2i_entrypoint = __ pc();
@@ -1022,9 +1022,9 @@
     // number (all values in registers) or the maximum stack slot accessed.
 
     // Convert 4-byte c2 stack slots to words.
-    comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+    comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
     // Round up to miminum stack alignment, in wordSize.
-    comp_words_on_stack = round_to(comp_words_on_stack, 2);
+    comp_words_on_stack = align_up(comp_words_on_stack, 2);
     __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
   }
 
@@ -1918,7 +1918,7 @@
         }
       }
     }
-    total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even
+    total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even
   }
 
   int oop_handle_slot_offset = stack_slots;
@@ -1945,7 +1945,7 @@
 
   // Now compute actual number of stack words we need.
   // Rounding to make stack properly aligned.
-  stack_slots = round_to(stack_slots,                                             // 7)
+  stack_slots = align_up(stack_slots,                                             // 7)
                          frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
   int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
 
@@ -2204,7 +2204,7 @@
 
     // Save argument registers and leave room for C-compatible ABI_REG_ARGS.
     int frame_size = frame::abi_reg_args_size +
-                     round_to(total_c_args * wordSize, frame::alignment_in_bytes);
+        align_up(total_c_args * wordSize, frame::alignment_in_bytes);
     __ mr(R11_scratch1, R1_SP);
     RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
 
@@ -2570,7 +2570,7 @@
 // This function returns the adjust size (in number of words) to a c2i adapter
 // activation for use during deoptimization.
 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
-  return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
+  return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
 }
 
 uint SharedRuntime::out_preserve_stack_slots() {
--- a/hotspot/src/cpu/s390/vm/c1_CodeStubs_s390.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/s390/vm/c1_CodeStubs_s390.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -284,7 +284,7 @@
   masm->block_comment(bc);
 #endif
 
-  masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
+  masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
 }
 
 void PatchingStub::emit_code(LIR_Assembler* ce) {
--- a/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -241,7 +241,7 @@
 
 inline int frame::interpreter_frame_monitor_size() {
   // Number of stack slots for a monitor
-  return round_to(BasicObjectLock::size() /* number of stack slots */,
+  return align_up(BasicObjectLock::size() /* number of stack slots */,
                   WordsPerLong /* Number of stack slots for a Java long. */);
 }
 
--- a/hotspot/src/cpu/s390/vm/sharedRuntime_s390.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/s390/vm/sharedRuntime_s390.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -744,7 +744,7 @@
         ShouldNotReachHere();
     }
   }
-  return round_to(stk, 2);
+  return align_up(stk, 2);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
@@ -840,7 +840,7 @@
         ShouldNotReachHere();
     }
   }
-  return round_to(stk, 2);
+  return align_up(stk, 2);
 }
 
 ////////////////////////////////////////////////////////////////////////
@@ -1734,7 +1734,7 @@
         }
       }
     }  // for
-    total_save_slots = double_slots * 2 + round_to(single_slots, 2); // Round to even.
+    total_save_slots = double_slots * 2 + align_up(single_slots, 2); // Round to even.
   }
 
   int oop_handle_slot_offset = stack_slots;
@@ -1761,7 +1761,7 @@
 
   // Now compute actual number of stack words we need.
   // Round to align stack properly.
-  stack_slots = round_to(stack_slots,                                     // 7)
+  stack_slots = align_up(stack_slots,                                     // 7)
                          frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
   int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
 
@@ -2395,7 +2395,7 @@
   // it has already been allocated.
 
   const int abi_scratch = frame::z_top_ijava_frame_abi_size;
-  int       extraspace  = round_to(total_args_passed, 2)*wordSize + abi_scratch;
+  int       extraspace  = align_up(total_args_passed, 2)*wordSize + abi_scratch;
   Register  sender_SP   = Z_R10;
   Register  value       = Z_R12;
 
@@ -2525,9 +2525,9 @@
     // registers are below. By subtracting stack0, we either get a negative
     // number (all values in registers) or the maximum stack slot accessed.
     // Convert VMRegImpl (4 byte) stack slots to words.
-    int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+    int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
     // Round up to miminum stack alignment, in wordSize
-    comp_words_on_stack = round_to(comp_words_on_stack, 2);
+    comp_words_on_stack = align_up(comp_words_on_stack, 2);
 
     __ resize_frame(-comp_words_on_stack*wordSize, Z_R0_scratch);
   }
--- a/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -66,12 +66,12 @@
   // the caller so we must ensure that it is properly aligned for our callee.
   //
   const int rounded_vm_local_words =
-       round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
+      align_up((int)frame::interpreter_frame_vm_local_words,WordsPerLong);
   // callee_locals and max_stack are counts, not the size in frame.
   const int locals_size =
-       round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
+      align_up(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
   const int max_stack_words = max_stack * Interpreter::stackElementWords;
-  return (round_to((max_stack_words
+  return (align_up((max_stack_words
                    + rounded_vm_local_words
                    + frame::memory_parameter_word_sp_offset), WordsPerLong)
                    // already rounded
@@ -82,7 +82,7 @@
 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
 
   // See call_stub code
-  int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
+  int call_stub_size  = align_up(7 + frame::memory_parameter_word_sp_offset,
                                  WordsPerLong);    // 7 + register save area
 
   // Save space for one monitor to get into the interpreted method in case
@@ -105,7 +105,7 @@
 
   int monitor_size           = monitors * frame::interpreter_frame_monitor_size();
 
-  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
+  assert(is_aligned(monitor_size, WordsPerLong), "must align");
 
   //
   // Note: if you look closely this appears to be doing something much different
@@ -131,8 +131,8 @@
   // there is no sense in messing working code.
   //
 
-  int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
-  assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
+  int rounded_cls = align_up((callee_locals - callee_params), WordsPerLong);
+  assert(is_aligned(rounded_cls, WordsPerLong), "must align");
 
   int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
 
@@ -166,9 +166,9 @@
   // even if not fully filled out.
   assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
 
-  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
+  int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words,WordsPerLong);
   int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
-  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
+  assert(is_aligned(monitor_size, WordsPerLong), "must align");
 
   intptr_t* fp = interpreter_frame->fp();
 
@@ -198,7 +198,7 @@
     int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
     locals = Lesp_ptr + parm_words;
     int delta = local_words - parm_words;
-    int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
+    int computed_sp_adjustment = (delta > 0) ? align_up(delta, WordsPerLong) : 0;
     *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
     if (!is_bottom_frame) {
       // Llast_SP is set below for the current frame to SP (with the
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -251,7 +251,7 @@
   // SP -> ---------------
   //
   int i;
-  int sp_offset = round_to(frame::register_save_words, 2); //  start doubleword aligned
+  int sp_offset = align_up((int)frame::register_save_words, 2); //  start doubleword aligned
 
   // only G int registers are saved explicitly; others are found in register windows
   for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -134,7 +134,7 @@
 // Also begin is one past last monitor.
 
 inline BasicObjectLock* frame::interpreter_frame_monitor_begin()       const  {
-  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
+  int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong);
   return (BasicObjectLock *)fp_addr_at(-rounded_vm_local_words);
 }
 
@@ -148,7 +148,7 @@
 }
 
 inline int frame::interpreter_frame_monitor_size() {
-  return round_to(BasicObjectLock::size(), WordsPerLong);
+  return align_up(BasicObjectLock::size(), WordsPerLong);
 }
 
 inline Method** frame::interpreter_frame_method_addr() const {
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -65,7 +65,7 @@
   br(Assembler::negative, true, Assembler::pt, skip_move);
   delayed()->mov(G0, delta);
   bind(skip_move);
-  round_to(delta, WordsPerLong);       // make multiple of 2 (SP must be 2-word aligned)
+  align_up(delta, WordsPerLong);       // make multiple of 2 (SP must be 2-word aligned)
   sll(delta, LogBytesPerWord, delta);  // extra space for locals in bytes
 }
 
@@ -2309,7 +2309,7 @@
 
 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
   const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
-  int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
+  int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong);
   return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
 }
 
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -1088,7 +1088,7 @@
 }
 
 void RegistersForDebugging::save_registers(MacroAssembler* a) {
-  a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
+  a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
   a->flushw();
   int i;
   for (i = 0; i < 8; ++i) {
@@ -1310,7 +1310,7 @@
 
   wrccr( O5_save_flags ); // Restore CCR's
 
-  save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+  save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2));
 
   // stop_subroutine expects message pointer in I1.
   mov(I1, O1);
@@ -1339,7 +1339,7 @@
   // add one word to size in case struct is odd number of words long
   // It must be doubleword-aligned for storing doubles into it.
 
-    save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+    save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2));
 
     // stop_subroutine expects message pointer in I1.
     // Size of set() should stay the same
@@ -1362,7 +1362,7 @@
 
 
 void MacroAssembler::warn(const char* msg) {
-  save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+  save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2));
   RegistersForDebugging::save_registers(this);
   mov(O0, L0);
   // Size of set() should stay the same
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -77,7 +77,7 @@
     call_args_area = frame::register_save_words_sp_offset +
                      frame::memory_parameter_word_sp_offset*wordSize,
     // Make sure save locations are always 8 byte aligned.
-    // can't use round_to because it doesn't produce compile time constant
+    // can't use align_up because it doesn't produce compile time constant
     start_of_extra_save_area = ((call_args_area + 7) & ~7),
     g1_offset = start_of_extra_save_area, // g-regs needing saving
     g3_offset = g1_offset+8,
@@ -119,7 +119,7 @@
   // (as the stub's I's) when the runtime routine called by the stub creates its frame.
   int i;
   // Always make the frame size 16 byte aligned.
-  int frame_size = round_to(additional_frame_words + register_save_size, 16);
+  int frame_size = align_up(additional_frame_words + register_save_size, 16);
   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
   int frame_size_in_slots = frame_size / sizeof(jint);
   // CodeBlob frame size is in words.
@@ -322,7 +322,7 @@
         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
         regs[i].set2(r->as_VMReg());
       } else {
-        slot = round_to(slot, 2);  // align
+        slot = align_up(slot, 2);  // align
         regs[i].set2(VMRegImpl::stack2reg(slot));
         slot += 2;
       }
@@ -339,13 +339,13 @@
 
     case T_DOUBLE:
       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
-      if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
-        flt_reg = round_to(flt_reg, 2);  // align
+      if (align_up(flt_reg, 2) + 1 < flt_reg_max) {
+        flt_reg = align_up(flt_reg, 2);  // align
         FloatRegister r = as_FloatRegister(flt_reg);
         regs[i].set2(r->as_VMReg());
         flt_reg += 2;
       } else {
-        slot = round_to(slot, 2);  // align
+        slot = align_up(slot, 2);  // align
         regs[i].set2(VMRegImpl::stack2reg(slot));
         slot += 2;
       }
@@ -531,7 +531,7 @@
   const int arg_size = total_args_passed * Interpreter::stackElementSize;
   const int varargs_area =
                  (frame::varargs_offset - frame::register_save_words)*wordSize;
-  const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
+  const int extraspace = align_up(arg_size + varargs_area, 2*wordSize);
 
   const int bias = STACK_BIAS;
   const int interp_arg_offset = frame::varargs_offset*wordSize +
@@ -753,9 +753,9 @@
   // in registers, we will commonly have no stack args.
   if (comp_args_on_stack > 0) {
     // Convert VMReg stack slots to words.
-    int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+    int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
     // Round up to miminum stack alignment, in wordSize
-    comp_words_on_stack = round_to(comp_words_on_stack, 2);
+    comp_words_on_stack = align_up(comp_words_on_stack, 2);
     // Now compute the distance from Lesp to SP.  This calculation does not
     // include the space for total_args_passed because Lesp has not yet popped
     // the arguments.
@@ -1068,7 +1068,7 @@
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
-  return round_to(max_stack_slots + 1, 2);
+  return align_up(max_stack_slots + 1, 2);
 
 }
 
@@ -1989,7 +1989,7 @@
 
   // Now the space for the inbound oop handle area
 
-  int oop_handle_offset = round_to(stack_slots, 2);
+  int oop_handle_offset = align_up(stack_slots, 2);
   stack_slots += total_save_slots;
 
   // Now any space we need for handlizing a klass if static method
@@ -2043,7 +2043,7 @@
 
   // Now compute actual number of stack words we need rounding to make
   // stack properly aligned.
-  stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
+  stack_slots = align_up(stack_slots, 2 * VMRegImpl::slots_per_word);
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
@@ -2591,7 +2591,7 @@
   if (callee_locals < callee_parameters)
     return 0;                   // No adjustment for negative locals
   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
-  return round_to(diff, WordsPerLong);
+  return align_up(diff, WordsPerLong);
 }
 
 // "Top of Stack" slots that may be unused by the calling convention but must
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -711,7 +711,7 @@
   // (gri - 2/25/2000)
 
 
-  int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
+  int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong );
 
   const int extra_space =
     rounded_vm_local_words +                   // frame local scratch space
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -290,7 +290,7 @@
   // very hard to make a guess about what code might be in the icache.
   // Force the instruction to be double word aligned so that it
   // doesn't span a cache line.
-  masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
+  masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
 }
 
 void PatchingStub::emit_code(LIR_Assembler* ce) {
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -506,7 +506,7 @@
   }
 
   // return value can be odd number of VMRegImpl stack slots make multiple of 2
-  return round_to(stack, 2);
+  return align_up(stack, 2);
 }
 
 // Patch the callers callsite with entry to compiled code if it exists.
@@ -782,9 +782,9 @@
     // number (all values in registers) or the maximum stack slot accessed.
     // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
     // Convert 4-byte stack slots to words.
-    comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
+    comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
     // Round up to miminum stack alignment, in wordSize
-    comp_words_on_stack = round_to(comp_words_on_stack, 2);
+    comp_words_on_stack = align_up(comp_words_on_stack, 2);
     __ subptr(rsp, comp_words_on_stack * wordSize);
   }
 
@@ -1670,7 +1670,7 @@
     total_save_slots = double_slots * 2 + single_slots;
     // align the save area
     if (double_slots != 0) {
-      stack_slots = round_to(stack_slots, 2);
+      stack_slots = align_up(stack_slots, 2);
     }
   }
 
@@ -1733,7 +1733,7 @@
 
   // Now compute actual number of stack words we need rounding to make
   // stack properly aligned.
-  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
+  stack_slots = align_up(stack_slots, StackAlignmentInSlots);
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -160,7 +160,7 @@
 #endif
 
   // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
-  int frame_size_in_bytes = round_to(reg_save_size*BytesPerInt, num_xmm_regs);
+  int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
   // CodeBlob frame size is in words.
@@ -513,7 +513,7 @@
     }
   }
 
-  return round_to(stk_args, 2);
+  return align_up(stk_args, 2);
 }
 
 // Patch the callers callsite with entry to compiled code if it exists.
@@ -582,7 +582,7 @@
   int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
 
   // stack is aligned, keep it that way
-  extraspace = round_to(extraspace, 2*wordSize);
+  extraspace = align_up(extraspace, 2*wordSize);
 
   // Get return address
   __ pop(rax);
@@ -782,9 +782,9 @@
     // number (all values in registers) or the maximum stack slot accessed.
 
     // Convert 4-byte c2 stack slots to words.
-    comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+    comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
     // Round up to miminum stack alignment, in wordSize
-    comp_words_on_stack = round_to(comp_words_on_stack, 2);
+    comp_words_on_stack = align_up(comp_words_on_stack, 2);
     __ subptr(rsp, comp_words_on_stack * wordSize);
   }
 
@@ -1982,7 +1982,7 @@
     total_save_slots = double_slots * 2 + single_slots;
     // align the save area
     if (double_slots != 0) {
-      stack_slots = round_to(stack_slots, 2);
+      stack_slots = align_up(stack_slots, 2);
     }
   }
 
@@ -2039,7 +2039,7 @@
 
   // Now compute actual number of stack words we need rounding to make
   // stack properly aligned.
-  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
+  stack_slots = align_up(stack_slots, StackAlignmentInSlots);
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Apr 13 09:57:51 2017 +0200
@@ -329,7 +329,7 @@
 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
   current_offset += pre_call_resets_size();  // skip fldcw, if any
   current_offset += 1;      // skip call opcode byte
-  return round_to(current_offset, alignment_required()) - current_offset;
+  return align_up(current_offset, alignment_required()) - current_offset;
 }
 
 // The address of the call instruction needs to be 4-byte aligned to
@@ -338,7 +338,7 @@
   current_offset += pre_call_resets_size();  // skip fldcw, if any
   current_offset += 5;      // skip MOV instruction
   current_offset += 1;      // skip call opcode byte
-  return round_to(current_offset, alignment_required()) - current_offset;
+  return align_up(current_offset, alignment_required()) - current_offset;
 }
 
 // EMIT_RM()
@@ -3275,7 +3275,7 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 1 +
-              round_to((Compile::current()->in_preserve_stack_slots() +
+              align_up((Compile::current()->in_preserve_stack_slots() +
                         Compile::current()->fixed_slots()),
                        stack_alignment_in_slots()));
 
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Apr 13 09:57:51 2017 +0200
@@ -579,7 +579,7 @@
 {
   current_offset += clear_avx_size(); // skip vzeroupper
   current_offset += 1; // skip call opcode byte
-  return round_to(current_offset, alignment_required()) - current_offset;
+  return align_up(current_offset, alignment_required()) - current_offset;
 }
 
 // The address of the call instruction needs to be 4-byte aligned to
@@ -588,7 +588,7 @@
 {
   current_offset += clear_avx_size(); // skip vzeroupper
   current_offset += 11; // skip movq instruction + call opcode byte
-  return round_to(current_offset, alignment_required()) - current_offset;
+  return align_up(current_offset, alignment_required()) - current_offset;
 }
 
 // EMIT_RM()
@@ -2807,7 +2807,7 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 2 +
-              round_to((Compile::current()->in_preserve_stack_slots() +
+              align_up((Compile::current()->in_preserve_stack_slots() +
                         Compile::current()->fixed_slots()),
                        stack_alignment_in_slots()));
 
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java	Thu Apr 13 09:57:51 2017 +0200
@@ -845,7 +845,7 @@
   // // Also begin is one past last monitor.
   //
   // inline BasicObjectLock* frame::interpreter_frame_monitor_begin()       const  {
-  //   int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
+  //   int rounded_vm_local_words = align_up(frame::interpreter_frame_vm_local_words, WordsPerLong);
   //   return (BasicObjectLock *)fp_addr_at(-rounded_vm_local_words);
   // }
   //
@@ -860,7 +860,7 @@
   //
   //
   // inline int frame::interpreter_frame_monitor_size() {
-  //   return round_to(BasicObjectLock::size(), WordsPerLong);
+  //   return align_up(BasicObjectLock::size(), WordsPerLong);
   // }
 
   public Address addressOfInterpreterFrameMethod() {
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -1197,7 +1197,7 @@
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
-  JavaThread::set_stack_size_at_create(round_to(stack_size_in_bytes, vm_page_size()));
+  JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size()));
 
   // Reminder: a compiler thread is a Java thread.
   _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -4048,7 +4048,7 @@
 
   // If stack_commit_size is 0, windows will reserve the default size,
   // but only commit a small portion of it.
-  size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
+  size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
   size_t default_reserve_size = os::win32::default_stack_size();
   size_t actual_reserve_size = stack_commit_size;
   if (stack_commit_size < default_reserve_size) {
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -748,7 +748,7 @@
   dest_blob->set_strings(_code_strings);
 
   // Done moving code bytes; were they the right size?
-  assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
+  assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
 
   // Flush generated code
   ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
--- a/hotspot/src/share/vm/c1/c1_FrameMap.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -187,9 +187,9 @@
   assert(_num_spills == -1, "can only be set once");
   _num_spills = nof_slots;
   assert(_framesize == -1, "should only be calculated once");
-  _framesize =  round_to(in_bytes(sp_offset_for_monitor_base(0)) +
-                         _num_monitors * sizeof(BasicObjectLock) +
-                         sizeof(intptr_t) +                        // offset of deopt orig pc
+  _framesize =  align_up(in_bytes(sp_offset_for_monitor_base(0)) +
+                         _num_monitors * (int)sizeof(BasicObjectLock) +
+                         (int)sizeof(intptr_t) +                        // offset of deopt orig pc
                          frame_pad_in_bytes,
                          StackAlignmentInBytes) / 4;
   int java_index = 0;
@@ -270,15 +270,15 @@
 
 ByteSize FrameMap::sp_offset_for_spill(const int index) const {
   assert(index >= 0 && index < _num_spills, "out of range");
-  int offset = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
+  int offset = align_up(first_available_sp_in_frame + _reserved_argument_area_size, (int)sizeof(double)) +
     index * spill_slot_size_in_bytes;
   return in_ByteSize(offset);
 }
 
 ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
-  int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
+  int end_of_spills = align_up(first_available_sp_in_frame + _reserved_argument_area_size, (int)sizeof(double)) +
     _num_spills * spill_slot_size_in_bytes;
-  int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
+  int offset = align_up(end_of_spills, HeapWordSize) + index * (int)sizeof(BasicObjectLock);
   return in_ByteSize(offset);
 }
 
--- a/hotspot/src/share/vm/c1/c1_LinearScan.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -171,7 +171,7 @@
 
   int           num_virtual_regs() const         { return _num_virtual_regs; }
   // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
-  int           live_set_size() const            { return round_to(_num_virtual_regs, BitsPerWord); }
+  int           live_set_size() const            { return align_up(_num_virtual_regs, BitsPerWord); }
   bool          has_fpu_registers() const        { return _has_fpu_registers; }
   int           num_loops() const                { return ir()->num_loops(); }
   bool          is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -218,7 +218,7 @@
   assert(arg >= 0 && arg < _arg_size, "must be an argument.");
   bool modified = false;
   int l = offset / HeapWordSize;
-  int h = round_to(offset + size_in_bytes, HeapWordSize) / HeapWordSize;
+  int h = align_up(offset + size_in_bytes, HeapWordSize) / HeapWordSize;
   if (l > ARG_OFFSET_MAX)
     l = ARG_OFFSET_MAX;
   if (h > ARG_OFFSET_MAX+1)
@@ -236,7 +236,7 @@
   }
   assert(arg >= 0 && arg < _arg_size, "must be an argument.");
   int l = offset / HeapWordSize;
-  int h = round_to(offset + size_in_bytes, HeapWordSize) / HeapWordSize;
+  int h = align_up(offset + size_in_bytes, HeapWordSize) / HeapWordSize;
   if (l > ARG_OFFSET_MAX)
     l = ARG_OFFSET_MAX;
   if (h > ARG_OFFSET_MAX+1)
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -2105,7 +2105,7 @@
     StackMapFrame* current_frame, StackMapTable* stackmap_table, TRAPS) {
   int bci = bcs->bci();
   address bcp = bcs->bcp();
-  address aligned_bcp = (address) round_to((intptr_t)(bcp + 1), jintSize);
+  address aligned_bcp = align_up(bcp + 1, jintSize);
 
   if (_klass->major_version() < NONZERO_PADDING_BYTES_IN_SWITCH_MAJOR_VERSION) {
     // 4639449 & 4647081: padding bytes must be 0
@@ -2162,7 +2162,7 @@
   for (int i = 0; i < keys; i++) {
     // Because check_jump_target() may safepoint, the bytecode could have
     // moved, which means 'aligned_bcp' is no good and needs to be recalculated.
-    aligned_bcp = (address)round_to((intptr_t)(bcs->bcp() + 1), jintSize);
+    aligned_bcp = align_up(bcs->bcp() + 1, jintSize);
     target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
     stackmap_table->check_jump_target(
       current_frame, target, CHECK_VERIFY(this));
@@ -2449,7 +2449,7 @@
       case Bytecodes::_lookupswitch:
       case Bytecodes::_tableswitch:
         {
-          address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
+          address aligned_bcp = align_up(bcs.bcp() + 1, jintSize);
           u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
           int keys, delta;
           if (opcode == Bytecodes::_tableswitch) {
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -60,12 +60,12 @@
 // This must be consistent with the CodeBlob constructor's layout actions.
 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
   unsigned int size = header_size;
-  size += round_to(cb->total_relocation_size(), oopSize);
+  size += align_up(cb->total_relocation_size(), oopSize);
   // align the size to CodeEntryAlignment
   size = align_code_offset(size);
-  size += round_to(cb->total_content_size(), oopSize);
-  size += round_to(cb->total_oop_size(), oopSize);
-  size += round_to(cb->total_metadata_size(), oopSize);
+  size += align_up(cb->total_content_size(), oopSize);
+  size += align_up(cb->total_oop_size(), oopSize);
+  size += align_up(cb->total_metadata_size(), oopSize);
   return size;
 }
 
@@ -87,9 +87,9 @@
   _content_begin(layout.content_begin()),
   _type(type)
 {
-  assert(layout.size()        == round_to(layout.size(),        oopSize), "unaligned size");
-  assert(layout.header_size() == round_to(layout.header_size(), oopSize), "unaligned size");
-  assert(layout.relocation_size() == round_to(layout.relocation_size(), oopSize), "unaligned size");
+  assert(is_aligned(layout.size(),            oopSize), "unaligned size");
+  assert(is_aligned(layout.header_size(),     oopSize), "unaligned size");
+  assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size");
   assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
 #ifdef COMPILER1
   // probably wrong for tiered
@@ -114,8 +114,8 @@
   _content_begin(layout.content_begin()),
   _type(type)
 {
-  assert(_size        == round_to(_size,        oopSize), "unaligned size");
-  assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
+  assert(is_aligned(_size,        oopSize), "unaligned size");
+  assert(is_aligned(_header_size, oopSize), "unaligned size");
   assert(_data_offset <= _size, "codeBlob is too small");
   assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
 
@@ -131,7 +131,7 @@
 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
   : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
 {
-  assert(locs_size   == round_to(locs_size,   oopSize), "unaligned size");
+  assert(is_aligned(locs_size, oopSize), "unaligned size");
 }
 
 
@@ -221,7 +221,7 @@
   unsigned int size = sizeof(BufferBlob);
   // align the size to CodeEntryAlignment
   size = CodeBlob::align_code_offset(size);
-  size += round_to(buffer_size, oopSize);
+  size += align_up(buffer_size, oopSize);
   assert(name != NULL, "must provide a name");
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@@ -304,7 +304,7 @@
   unsigned int size = sizeof(MethodHandlesAdapterBlob);
   // align the size to CodeEntryAlignment
   size = CodeBlob::align_code_offset(size);
-  size += round_to(buffer_size, oopSize);
+  size += align_up(buffer_size, oopSize);
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     blob = new (size) MethodHandlesAdapterBlob(size);
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -263,7 +263,7 @@
     _code_offset(_content_offset),
     _data_offset(data_offset)
   {
-    assert(_relocation_size == round_to(_relocation_size, oopSize), "unaligned size");
+    assert(is_aligned(_relocation_size, oopSize), "unaligned size");
 
     _code_begin = (address) start + _code_offset;
     _code_end = (address) start + _data_offset;
@@ -279,12 +279,12 @@
   CodeBlobLayout(const address start, int size, int header_size, const CodeBuffer* cb) :
     _size(size),
     _header_size(header_size),
-    _relocation_size(round_to(cb->total_relocation_size(), oopSize)),
+    _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
     _content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
     _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
-    _data_offset(_content_offset + round_to(cb->total_content_size(), oopSize))
+    _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize))
   {
-    assert(_relocation_size == round_to(_relocation_size, oopSize), "unaligned size");
+    assert(is_aligned(_relocation_size, oopSize), "unaligned size");
 
     _code_begin = (address) start + _code_offset;
     _code_end = (address) start + _data_offset;
--- a/hotspot/src/share/vm/code/codeCache.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -406,7 +406,7 @@
 
   // Reserve Space
   size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
-  size_initial = round_to(size_initial, os::vm_page_size());
+  size_initial = align_up(size_initial, os::vm_page_size());
   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
     vm_exit_during_initialization("Could not reserve enough space for code cache");
   }
@@ -1041,7 +1041,7 @@
   // This was originally just a check of the alignment, causing failure, instead, round
   // the code cache to the page size.  In particular, Solaris is moving to a larger
   // default page size.
-  CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
+  CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
 
   if (SegmentedCodeCache) {
     // Use multiple code heaps
--- a/hotspot/src/share/vm/code/exceptionHandlerTable.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/exceptionHandlerTable.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -114,7 +114,7 @@
   );
 
   // nmethod support
-  int  size_in_bytes() const { return round_to(_length * sizeof(HandlerTableEntry), oopSize); }
+  int  size_in_bytes() const { return align_up(_length * (int)sizeof(HandlerTableEntry), oopSize); }
   void copy_to(CompiledMethod* nm);
   void copy_bytes_to(address addr);
 
--- a/hotspot/src/share/vm/code/icBuffer.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/icBuffer.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -55,14 +55,14 @@
 
   // General info
   int     size() const                           { return _size; }
-  static  int code_size_to_size(int code_size)   { return round_to(sizeof(ICStub), CodeEntryAlignment) + code_size; }
+  static  int code_size_to_size(int code_size)   { return align_up((int)sizeof(ICStub), CodeEntryAlignment) + code_size; }
 
  public:
   // Creation
   void set_stub(CompiledIC *ic, void* cached_value, address dest_addr);
 
   // Code info
-  address code_begin() const                     { return (address)this + round_to(sizeof(ICStub), CodeEntryAlignment); }
+  address code_begin() const                     { return (address)this + align_up(sizeof(ICStub), CodeEntryAlignment); }
   address code_end() const                       { return (address)this + size(); }
 
   // Call site info
@@ -84,7 +84,7 @@
 
 // ICStub Creation
 inline ICStub* ICStub_from_destination_address(address destination_address) {
-  ICStub* stub = (ICStub*) (destination_address - round_to(sizeof(ICStub), CodeEntryAlignment));
+  ICStub* stub = (ICStub*) (destination_address - align_up(sizeof(ICStub), CodeEntryAlignment));
   #ifdef ASSERT
   stub->verify();
   #endif
--- a/hotspot/src/share/vm/code/nmethod.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -368,7 +368,7 @@
 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
 // of oopSize, then 2*sizeof(PcDesc) is)
 static int adjust_pcs_size(int pcs_size) {
-  int nsize = round_to(pcs_size,   oopSize);
+  int nsize = align_up(pcs_size,   oopSize);
   if ((nsize % sizeof(PcDesc)) != 0) {
     nsize = pcs_size + sizeof(PcDesc);
   }
@@ -487,10 +487,10 @@
     int nmethod_size =
       CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
       + adjust_pcs_size(debug_info->pcs_size())
-      + round_to(dependencies->size_in_bytes() , oopSize)
-      + round_to(handler_table->size_in_bytes(), oopSize)
-      + round_to(nul_chk_table->size_in_bytes(), oopSize)
-      + round_to(debug_info->data_size()       , oopSize);
+      + align_up((int)dependencies->size_in_bytes(), oopSize)
+      + align_up(handler_table->size_in_bytes()    , oopSize)
+      + align_up(nul_chk_table->size_in_bytes()    , oopSize)
+      + align_up(debug_info->data_size()           , oopSize);
 
     nm = new (nmethod_size, comp_level)
     nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
@@ -575,8 +575,8 @@
     _consts_offset           = data_offset();
     _stub_offset             = data_offset();
     _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
-    scopes_data_offset       = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
+    _metadata_offset         = _oops_offset         + align_up(code_buffer->total_oop_size(), oopSize);
+    scopes_data_offset       = _metadata_offset     + align_up(code_buffer->total_metadata_size(), wordSize);
     _scopes_pcs_offset       = scopes_data_offset;
     _dependencies_offset     = _scopes_pcs_offset;
     _handler_table_offset    = _dependencies_offset;
@@ -730,14 +730,14 @@
     }
 
     _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
-    int scopes_data_offset   = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
+    _metadata_offset         = _oops_offset          + align_up(code_buffer->total_oop_size(), oopSize);
+    int scopes_data_offset   = _metadata_offset      + align_up(code_buffer->total_metadata_size(), wordSize);
 
-    _scopes_pcs_offset       = scopes_data_offset    + round_to(debug_info->data_size       (), oopSize);
+    _scopes_pcs_offset       = scopes_data_offset    + align_up(debug_info->data_size       (), oopSize);
     _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
-    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
-    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
-    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
+    _handler_table_offset    = _dependencies_offset  + align_up((int)dependencies->size_in_bytes (), oopSize);
+    _nul_chk_table_offset    = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
+    _nmethod_end_offset      = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
     _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
--- a/hotspot/src/share/vm/code/stubs.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/stubs.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -64,7 +64,7 @@
 
 StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
                      Mutex* lock, const char* name) : _mutex(lock) {
-  intptr_t size = round_to(buffer_size, 2*BytesPerWord);
+  intptr_t size = align_up(buffer_size, 2*BytesPerWord);
   BufferBlob* blob = BufferBlob::create(name, size);
   if( blob == NULL) {
     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", name);
@@ -111,7 +111,7 @@
   assert(requested_code_size > 0, "requested_code_size must be > 0");
   if (_mutex != NULL) _mutex->lock();
   Stub* s = current_stub();
-  int requested_size = round_to(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
+  int requested_size = align_up(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
   if (requested_size <= available_space()) {
     if (is_contiguous()) {
       // Queue: |...|XXXXXXX|.............|
@@ -149,7 +149,7 @@
 
 void StubQueue::commit(int committed_code_size, CodeStrings& strings) {
   assert(committed_code_size > 0, "committed_code_size must be > 0");
-  int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
+  int committed_size = align_up(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
   Stub* s = current_stub();
   assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
   stub_initialize(s, committed_size, strings);
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -51,7 +51,7 @@
 void* VtableStub::operator new(size_t size, int code_size) throw() {
   assert(size == sizeof(VtableStub), "mismatched size");
   // compute real VtableStub size (rounded to nearest word)
-  const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
+  const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize);
   // malloc them in chunks to minimize header overhead
   const int chunk_factor = 32;
   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -797,7 +797,7 @@
   // because the two are not necessarily equal for some kinds of
   // spaces, in particular, certain kinds of free list spaces.
   // We could use the more complicated but more precise:
-  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
+  // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
   // but the slight imprecision seems acceptable in the assertion check.
   assert(MemRegion(bottom(), end()).contains(mr),
          "Should be within used space");
@@ -858,7 +858,7 @@
   assert_lock_strong(freelistLock());
   // Can't use used_region() below because it may not necessarily
   // be the same as [bottom(),end()); although we could
-  // use [used_region().start(),round_to(used_region().end(),CardSize)),
+  // use [used_region().start(),align_up(used_region().end(),CardSize)),
   // that appears too cumbersome, so we just do the simpler check
   // in the assertion below.
   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -896,7 +896,7 @@
         // in the heap. In the case of the MUT below, that's a
         // card size.
         MemRegion mr(start,
-                     (HeapWord*)round_to((intptr_t)(start + obj_size),
+                     align_up(start + obj_size,
                         CardTableModRefBS::card_size /* bytes */));
         if (par) {
           _modUnionTable.par_mark_range(mr);
@@ -4576,13 +4576,10 @@
   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
   MemRegion span = sp->used_region();
   HeapWord* start_addr = span.start();
-  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
-                                           alignment);
+  HeapWord* end_addr = align_up(span.end(), alignment);
   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
-  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
-         start_addr, "Check alignment");
-  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
-         chunk_size, "Check alignment");
+  assert(is_aligned(start_addr, alignment), "Check alignment");
+  assert(is_aligned(chunk_size, alignment), "Check alignment");
 
   while (!pst->is_task_claimed(/* reference */ nth_task)) {
     // Having claimed the nth_task, compute corresponding mem-region,
@@ -4928,7 +4925,7 @@
       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
       MemRegion ur = _cmsGen->used_region();
       HeapWord* lb = ur.start();
-      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
+      HeapWord* ub = align_up(ur.end(), alignment);
       MemRegion cms_span(lb, ub);
       _modUnionTable.dirty_range_iterate_clear(cms_span,
                                                &markFromDirtyCardsClosure);
@@ -5625,10 +5622,9 @@
   }
   assert(sz > 0, "size must be nonzero");
   HeapWord* next_block = addr + sz;
-  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
-                                             CardTableModRefBS::card_size);
-  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
-         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
+  HeapWord* next_card  = align_up(next_block, CardTableModRefBS::card_size);
+  assert(align_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
+         align_down((uintptr_t)next_card, CardTableModRefBS::card_size),
          "must be different cards");
   return next_card;
 }
@@ -5732,8 +5728,7 @@
   // convert address range into offset range
   size_t start_ofs = heapWordToOffset(mr.start());
   // Make sure that end() is appropriately aligned
-  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
-                        (1 << (_shifter+LogHeapWordSize))),
+  assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))),
          "Misaligned mr.end()");
   size_t end_ofs   = heapWordToOffset(mr.end());
   assert(end_ofs > start_ofs, "Should mark at least one bit");
@@ -6287,8 +6282,7 @@
   assert(_markStack->isEmpty(), "would cause duplicates on stack");
   assert(_span.contains(addr), "Out of bounds _finger?");
   _finger = addr;
-  _threshold = (HeapWord*)round_to(
-                 (intptr_t)_finger, CardTableModRefBS::card_size);
+  _threshold = align_up(_finger, CardTableModRefBS::card_size);
 }
 
 // Should revisit to see if this should be restructured for
@@ -6315,8 +6309,7 @@
         // during the preclean or remark phase. (CMSCleanOnEnter)
         if (CMSCleanOnEnter) {
           size_t sz = _collector->block_size_using_printezis_bits(addr);
-          HeapWord* end_card_addr   = (HeapWord*)round_to(
-                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
+          HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
           MemRegion redirty_range = MemRegion(addr, end_card_addr);
           assert(!redirty_range.is_empty(), "Arithmetical tautology");
           // Bump _threshold to end_card_addr; note that
@@ -6403,11 +6396,9 @@
       // _threshold is always kept card-aligned but _finger isn't
       // always card-aligned.
       HeapWord* old_threshold = _threshold;
-      assert(old_threshold == (HeapWord*)round_to(
-              (intptr_t)old_threshold, CardTableModRefBS::card_size),
+      assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
              "_threshold should always be card-aligned");
-      _threshold = (HeapWord*)round_to(
-                     (intptr_t)_finger, CardTableModRefBS::card_size);
+      _threshold = align_up(_finger, CardTableModRefBS::card_size);
       MemRegion mr(old_threshold, _threshold);
       assert(!mr.is_empty(), "Control point invariant");
       assert(_span.contains(mr), "Should clear within span");
@@ -6517,11 +6508,9 @@
     // _threshold is always kept card-aligned but _finger isn't
     // always card-aligned.
     HeapWord* old_threshold = _threshold;
-    assert(old_threshold == (HeapWord*)round_to(
-            (intptr_t)old_threshold, CardTableModRefBS::card_size),
+    assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
            "_threshold should always be card-aligned");
-    _threshold = (HeapWord*)round_to(
-                   (intptr_t)_finger, CardTableModRefBS::card_size);
+    _threshold = align_up(_finger, CardTableModRefBS::card_size);
     MemRegion mr(old_threshold, _threshold);
     assert(!mr.is_empty(), "Control point invariant");
     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
@@ -6888,8 +6877,7 @@
          // are required.
          if (obj->is_objArray()) {
            size_t sz = obj->size();
-           HeapWord* end_card_addr = (HeapWord*)round_to(
-                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
+           HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
            MemRegion redirty_range = MemRegion(addr, end_card_addr);
            assert(!redirty_range.is_empty(), "Arithmetical tautology");
            _mod_union_table->mark_range(redirty_range);
@@ -7618,8 +7606,7 @@
         // table.
         if (obj->is_objArray()) {
           size_t sz = obj->size();
-          HeapWord* end_card_addr =
-            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
+          HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
           MemRegion redirty_range = MemRegion(addr, end_card_addr);
           assert(!redirty_range.is_empty(), "Arithmetical tautology");
           _collector->_modUnionTable.mark_range(redirty_range);
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -446,7 +446,7 @@
   // Align the end of mr so it's at a card boundary.
   // This is superfluous except at the end of the space;
   // we should do better than this XXX
-  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
+  MemRegion mr2(mr.start(), align_up(mr.end(),
                  CardTableModRefBS::card_size /* bytes */));
   _t->mark_range(mr2);
 }
@@ -455,7 +455,7 @@
   // Align the end of mr so it's at a card boundary.
   // This is superfluous except at the end of the space;
   // we should do better than this XXX
-  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
+  MemRegion mr2(mr.start(), align_up(mr.end(),
                  CardTableModRefBS::card_size /* bytes */));
   _t->par_mark_range(mr2);
 }
--- a/hotspot/src/share/vm/gc/g1/sparsePRT.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/sparsePRT.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -68,7 +68,7 @@
   static size_t size() { return sizeof(SparsePRTEntry) + sizeof(card_elem_t) * (cards_num() - card_array_alignment); }
   // Returns the size of the card array.
   static int cards_num() {
-    return align_up(G1RSetSparseRegionEntries, card_array_alignment);
+    return align_up((int)G1RSetSparseRegionEntries, (int)card_array_alignment);
   }
 
   // Set the region_ind to the given value, and delete all cards.
--- a/hotspot/src/share/vm/gc/parallel/generationSizer.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/generationSizer.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -57,7 +57,7 @@
 
   // Can a page size be something else than a power of two?
   assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
-  size_t new_alignment = round_to(page_sz, _gen_alignment);
+  size_t new_alignment = align_up(page_sz, _gen_alignment);
   if (new_alignment != _gen_alignment) {
     _gen_alignment = new_alignment;
     _space_alignment = new_alignment;
--- a/hotspot/src/share/vm/gc/parallel/mutableNUMASpace.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/mutableNUMASpace.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -106,13 +106,13 @@
             }
 #endif
             MemRegion invalid;
-            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
-            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
+            HeapWord *crossing_start = align_up((HeapWord*)cur_top, os::vm_page_size());
+            HeapWord *crossing_end = align_down((HeapWord*)(cur_top + touched_words), os::vm_page_size());
             if (crossing_start != crossing_end) {
               // If object header crossed a small page boundary we mark the area
               // as invalid rounding it to a page_size().
-              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
-              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
+              HeapWord *start = MAX2(align_down((HeapWord*)cur_top, page_size()), s->bottom());
+              HeapWord *end = MIN2(align_up((HeapWord*)(cur_top + touched_words), page_size()), s->end());
               invalid = MemRegion(start, end);
             }
 
@@ -297,8 +297,8 @@
 
 // Bias region towards the first-touching lgrp. Set the right page sizes.
 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
-  HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
-  HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
+  HeapWord *start = align_up(mr.start(), page_size());
+  HeapWord *end = align_down(mr.end(), page_size());
   if (end > start) {
     MemRegion aligned_region(start, end);
     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
@@ -316,8 +316,8 @@
 
 // Free all pages in the region.
 void MutableNUMASpace::free_region(MemRegion mr) {
-  HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
-  HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
+  HeapWord *start = align_up(mr.start(), page_size());
+  HeapWord *end = align_down(mr.end(), page_size());
   if (end > start) {
     MemRegion aligned_region(start, end);
     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
@@ -437,7 +437,7 @@
 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
   size_t pages_available = base_space_size();
   for (int j = 0; j < i; j++) {
-    pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
+    pages_available -= align_down(current_chunk_size(j), page_size()) / page_size();
   }
   pages_available -= lgrp_spaces()->length() - i - 1;
   assert(pages_available > 0, "No pages left");
@@ -453,7 +453,7 @@
   chunk_size = MAX2(chunk_size, page_size());
 
   if (limit > 0) {
-    limit = round_down(limit, page_size());
+    limit = align_down(limit, page_size());
     if (chunk_size > current_chunk_size(i)) {
       size_t upper_bound = pages_available * page_size();
       if (upper_bound > limit &&
@@ -485,7 +485,7 @@
   if (new_region.start() < intersection.start()) { // Yes
     // Try to coalesce small pages into a large one.
     if (UseLargePages && page_size() >= alignment()) {
-      HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
+      HeapWord* p = align_up(intersection.start(), alignment());
       if (new_region.contains(p)
           && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
         if (intersection.contains(p)) {
@@ -504,7 +504,7 @@
   if (intersection.end() < new_region.end()) { // Yes
     // Try to coalesce small pages into a large one.
     if (UseLargePages && page_size() >= alignment()) {
-      HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
+      HeapWord* p = align_down(intersection.end(), alignment());
       if (new_region.contains(p)
           && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
         if (intersection.contains(p)) {
@@ -546,11 +546,11 @@
             HeapWord* start = invalid_region->start();
             HeapWord* end = invalid_region->end();
             if (UseLargePages && page_size() >= alignment()) {
-              HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
+              HeapWord *p = align_down(start, alignment());
               if (new_region.contains(p)) {
                 start = p;
               }
-              p = (HeapWord*)round_to((intptr_t) end, alignment());
+              p = align_up(end, alignment());
               if (new_region.contains(end)) {
                 end = p;
               }
@@ -581,8 +581,8 @@
   // Compute chunk sizes
   size_t prev_page_size = page_size();
   set_page_size(UseLargePages ? alignment() : os::vm_page_size());
-  HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
-  HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
+  HeapWord* rounded_bottom = align_up(bottom(), page_size());
+  HeapWord* rounded_end = align_down(end(), page_size());
   size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
 
   // Try small pages if the chunk size is too small
@@ -593,8 +593,8 @@
       vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size");
     }
     set_page_size(os::vm_page_size());
-    rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
-    rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
+    rounded_bottom = align_up(bottom(), page_size());
+    rounded_end = align_down(end(), page_size());
     base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
   }
   guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
@@ -725,7 +725,7 @@
   for (int i = 0; i < lgrp_spaces()->length();) {
     LGRPSpace *ls = lgrp_spaces()->at(i);
     MutableSpace *s = ls->space();
-    HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
+    HeapWord *top = MAX2(align_down(s->top(), page_size()), s->bottom());
 
     if (s->contains(value)) {
       // Check if setting the chunk's top to a given value would create a hole less than
@@ -926,8 +926,8 @@
 // Scan pages and gather stats about page placement and size.
 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
   clear_space_stats();
-  char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
-  char* end = (char*)round_down((intptr_t) space()->end(), page_size);
+  char *start = (char*)align_up(space()->bottom(), page_size);
+  char* end = (char*)align_down(space()->end(), page_size);
   if (start < end) {
     for (char *p = start; p < end;) {
       os::page_info info;
@@ -963,8 +963,8 @@
 // will be more successful.
 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
 {
-  char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
-  char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
+  char* range_start = (char*)align_up(space()->bottom(), page_size);
+  char* range_end = (char*)align_down(space()->end(), page_size);
 
   if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
     set_last_page_scanned(range_start);
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -44,8 +44,8 @@
 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
   if (!mr.is_empty()) {
     size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
-    HeapWord *start = (HeapWord*)round_to((intptr_t) mr.start(), page_size);
-    HeapWord *end =  (HeapWord*)round_down((intptr_t) mr.end(), page_size);
+    HeapWord *start = align_up(mr.start(), page_size);
+    HeapWord *end =   align_down(mr.end(), page_size);
     if (end > start) {
       size_t size = pointer_delta(end, start, sizeof(char));
       if (clear_space) {
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecode.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -44,7 +44,7 @@
   // Address computation
   address addr_at            (int offset)        const     { return (address)_bcp + offset; }
   u_char byte_at(int offset) const               { return *addr_at(offset); }
-  address aligned_addr_at    (int offset)        const     { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
+  address aligned_addr_at    (int offset)        const     { return align_up(addr_at(offset), jintSize); }
 
   // Word access:
   int     get_Java_u2_at     (int offset)        const     { return Bytes::get_Java_u2(addr_at(offset)); }
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -50,7 +50,7 @@
   Bytecodes::Code _code;
   address   _next_pc;                // current decoding position
 
-  void      align()                  { _next_pc = (address)round_to((intptr_t)_next_pc, sizeof(jint)); }
+  void      align()                  { _next_pc = align_up(_next_pc, sizeof(jint)); }
   int       get_byte()               { return *(jbyte*) _next_pc++; }  // signed
   short     get_short()              { short i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
   int       get_int()                { int i=Bytes::get_Java_u4(_next_pc); _next_pc+=4; return i; }
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -93,7 +93,7 @@
     }
     return wide_length_for(cast(*(bcp + 1)));
   case _tableswitch:
-    { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
+    { address aligned_bcp = align_up(bcp + 1, jintSize);
       if (end != NULL && aligned_bcp + 3*jintSize >= end) {
         return -1; // don't read past end of code buffer
       }
@@ -108,7 +108,7 @@
   case _lookupswitch:      // fall through
   case _fast_binaryswitch: // fall through
   case _fast_linearswitch:
-    { address aligned_bcp = (address)round_to((intptr_t)bcp + 1, jintSize);
+    { address aligned_bcp = align_up(bcp + 1, jintSize);
       if (end != NULL && aligned_bcp + 2*jintSize >= end) {
         return -1; // don't read past end of code buffer
       }
--- a/hotspot/src/share/vm/interpreter/interpreter.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreter.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -59,10 +59,10 @@
 
   // General info/converters
   int     size() const                           { return _size; }
-  static  int code_size_to_size(int code_size)   { return round_to(sizeof(InterpreterCodelet), CodeEntryAlignment) + code_size; }
+  static  int code_size_to_size(int code_size)   { return align_up((int)sizeof(InterpreterCodelet), CodeEntryAlignment) + code_size; }
 
   // Code info
-  address code_begin() const                     { return (address)this + round_to(sizeof(InterpreterCodelet), CodeEntryAlignment); }
+  address code_begin() const                     { return (address)this + align_up(sizeof(InterpreterCodelet), CodeEntryAlignment); }
   address code_end() const                       { return (address)this + size(); }
 
   // Debugging
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -1228,8 +1228,7 @@
       // create handler if necessary
       if (handler_index < 0) {
         ResourceMark rm;
-        ptrdiff_t align_offset = (address)
-          round_to((intptr_t)_buffer, CodeEntryAlignment) - (address)_buffer;
+        ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer;
         CodeBuffer buffer((address)(_buffer + align_offset),
                           SignatureHandlerLibrary::buffer_size - align_offset);
         InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
--- a/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -743,7 +743,7 @@
   // section itself so they don't need to be accounted for in the
   // locs_buffer above.
   int stubs_size = estimate_stubs_size(CHECK_OK);
-  int total_size = round_to(_code_size, buffer.insts()->alignment()) + round_to(_constants_size, buffer.consts()->alignment()) + round_to(stubs_size, buffer.stubs()->alignment());
+  int total_size = align_up(_code_size, buffer.insts()->alignment()) + align_up(_constants_size, buffer.consts()->alignment()) + align_up(stubs_size, buffer.stubs()->alignment());
 
   if (check_size && total_size > JVMCINMethodSizeLimit) {
     return JVMCIEnv::code_too_large;
--- a/hotspot/src/share/vm/memory/virtualspace.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/memory/virtualspace.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -650,8 +650,8 @@
   _upper_alignment  = os::vm_page_size();
 
   // End of each region
-  _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
-  _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
+  _lower_high_boundary = align_up(low_boundary(), middle_alignment());
+  _middle_high_boundary = align_down(high_boundary(), middle_alignment());
   _upper_high_boundary = high_boundary();
 
   // High address of each region
@@ -812,9 +812,9 @@
   // alignment will always be default page size.  middle alignment will be
   // LargePageSizeInBytes if the actual size of the virtual space is in
   // fact larger than LargePageSizeInBytes.
-  char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
-  char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
-  char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
+  char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
+  char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
+  char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 
   // Determine which regions need to grow in this expand_by call.
   // If you are growing in the lower region, high() must be in that
@@ -898,12 +898,9 @@
     MAX2(unaligned_new_high, low_boundary());
 
   // Align address to region's alignment
-  char* aligned_upper_new_high =
-    (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
-  char* aligned_middle_new_high =
-    (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
-  char* aligned_lower_new_high =
-    (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+  char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
+  char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
+  char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 
   // Determine which regions need to shrink
   size_t upper_needs = 0;
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -238,10 +238,8 @@
 
       // This code could be simplified, but by keeping array_header_in_bytes
       // in units of bytes and doing it this way we can round up just once,
-      // skipping the intermediate round to HeapWordSize.  Cast the result
-      // of round_to to size_t to guarantee unsigned division == right shift.
-      s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
-        HeapWordSize);
+      // skipping the intermediate round to HeapWordSize.
+      s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize);
 
       // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
       // of an "old copy" of an object array in the young gen so it indicates
--- a/hotspot/src/share/vm/opto/buildOopMap.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/opto/buildOopMap.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -566,7 +566,7 @@
   Arena *A = Thread::current()->resource_area();
   Block_List worklist;          // Worklist of pending blocks
 
-  int max_reg_ints = round_to(max_reg, BitsPerInt)>>LogBitsPerInt;
+  int max_reg_ints = align_up(max_reg, BitsPerInt)>>LogBitsPerInt;
   Dict *safehash = NULL;        // Used for assert only
   // Compute a backwards liveness per register.  Needs a bitarray of
   // #blocks x (#registers, rounded up to ints)
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -607,7 +607,7 @@
   assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
 
   // This frame must preserve the required fp alignment
-  _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
+  _framesize = align_up(_framesize, Matcher::stack_alignment_in_slots());
   assert(_framesize <= 1000000, "sanity check");
 #ifndef PRODUCT
   _total_framesize += _framesize;
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -3562,8 +3562,8 @@
 
   // --- Size Computation ---
   // array_size = round_to_heap(array_header + (length << elem_shift));
-  // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
-  // and round_to(x, y) == ((x + y-1) & ~(y-1))
+  // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
+  // and align_to(x, y) == ((x + y-1) & ~(y-1))
   // The rounding mask is strength-reduced, if possible.
   int round_mask = MinObjAlignmentInBytes - 1;
   Node* header_size = NULL;
--- a/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -788,7 +788,7 @@
   intptr_t end_con   = _igvn.find_intptr_t_con(dest_size, -1);
   if (slice_idx_con >= 0 && slice_len_con >= 0) {
     assert(end_con < 0, "not two cons");
-    end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
+    end_con = align_up(abase + ((slice_idx_con + slice_len_con) << scale),
                        BytesPerLong);
   }
 
--- a/hotspot/src/share/vm/opto/matcher.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -139,7 +139,7 @@
 OptoReg::Name Compile::compute_old_SP() {
   int fixed    = fixed_slots();
   int preserve = in_preserve_stack_slots();
-  return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
+  return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
 }
 
 
@@ -286,7 +286,7 @@
   // particular, in the spill area) which look aligned will in fact be
   // aligned relative to the stack pointer in the target machine.  Double
   // stack slots will always be allocated aligned.
-  _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
+  _new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
 
   // Compute highest outgoing stack argument as
   //   _new_SP + out_preserve_stack_slots + max(outgoing argument size).
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -502,7 +502,7 @@
 UNSAFE_ENTRY(jlong, Unsafe_AllocateMemory0(JNIEnv *env, jobject unsafe, jlong size)) {
   size_t sz = (size_t)size;
 
-  sz = round_to(sz, HeapWordSize);
+  sz = align_up(sz, HeapWordSize);
   void* x = os::malloc(sz, mtInternal);
 
   return addr_to_java(x);
@@ -511,7 +511,7 @@
 UNSAFE_ENTRY(jlong, Unsafe_ReallocateMemory0(JNIEnv *env, jobject unsafe, jlong addr, jlong size)) {
   void* p = addr_from_java(addr);
   size_t sz = (size_t)size;
-  sz = round_to(sz, HeapWordSize);
+  sz = align_up(sz, HeapWordSize);
 
   void* x = os::realloc(p, sz, mtInternal);
 
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -1340,7 +1340,7 @@
   BufferBlob* blob;
   int full_size = CodeBlob::align_code_offset(sizeof(BufferBlob));
   if (full_size < size) {
-    full_size += round_to(size - full_size, oopSize);
+    full_size += align_up(size - full_size, oopSize);
   }
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
--- a/hotspot/src/share/vm/runtime/icache.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/runtime/icache.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -99,7 +99,7 @@
     start -= line_offset;
     nbytes += line_offset;
   }
-  call_flush_stub(start, round_to(nbytes, ICache::line_size) >>
+  call_flush_stub(start, align_up(nbytes, (int)ICache::line_size) >>
                          ICache::log2_line_size);
 }
 
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp	Thu Apr 13 09:57:51 2017 +0200
@@ -216,8 +216,8 @@
   }
   // C++ does not guarantee jlong[] array alignment to 8 bytes.
   // Use middle of array to check that memory before it is not modified.
-  address buffer  = (address) round_to((intptr_t)&lbuffer[4], BytesPerLong);
-  address buffer2 = (address) round_to((intptr_t)&lbuffer2[4], BytesPerLong);
+  address buffer  = align_up((address)&lbuffer[4], BytesPerLong);
+  address buffer2 = align_up((address)&lbuffer2[4], BytesPerLong);
   // do an aligned copy
   ((arraycopy_fn)func)(buffer, buffer2, 0);
   for (i = 0; i < sizeof(lbuffer); i++) {
--- a/hotspot/src/share/vm/utilities/copy.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/utilities/copy.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -204,7 +204,7 @@
     assert_params_ok(from, to, LogHeapWordSize);
     assert_byte_count_ok(byte_count, HeapWordSize);
 
-    size_t count = (size_t)round_to(byte_count, HeapWordSize) >> LogHeapWordSize;
+    size_t count = align_up(byte_count, HeapWordSize) >> LogHeapWordSize;
     assert(to <= from || from + count <= to, "do not overwrite source data");
 
     while (count-- > 0) {
@@ -218,7 +218,7 @@
     assert_params_ok(from, to, LogHeapWordSize);
     assert_byte_count_ok(byte_count, HeapWordSize);
 
-    size_t count = (size_t)round_to(byte_count, HeapWordSize) >> LogHeapWordSize;
+    size_t count = align_up(byte_count, HeapWordSize) >> LogHeapWordSize;
     assert(from <= to || to + count <= from, "do not overwrite source data");
 
     from += count - 1;
@@ -353,7 +353,7 @@
 
   static void assert_byte_count_ok(size_t byte_count, size_t unit_size) {
 #ifdef ASSERT
-    if ((size_t)round_to(byte_count, unit_size) != byte_count) {
+    if (!is_aligned(byte_count, unit_size)) {
       basic_fatal("byte count must be aligned");
     }
 #endif
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Tue Jul 04 15:58:10 2017 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Apr 13 09:57:51 2017 +0200
@@ -520,20 +520,38 @@
 
 #define is_aligned_(size, alignment) ((size) == (align_up_(size, alignment)))
 
+// Temporary declaration until this file has been restructured.
+template <typename T>
+bool is_power_of_2_t(T x) {
+  return (x != T(0)) && ((x & (x - 1)) == T(0));
+}
+
 // Helpers to align sizes and check for alignment
 
 template <typename T, typename A>
 inline T align_up(T size, A alignment) {
-  return align_up_(size, alignment);
+  assert(is_power_of_2_t(alignment), "must be a power of 2: " UINT64_FORMAT, (uint64_t)alignment);
+
+  T ret = align_up_(size, alignment);
+  assert(is_aligned_(ret, alignment), "must be aligned: " UINT64_FORMAT, (uint64_t)ret);
+
+  return ret;
 }
 
 template <typename T, typename A>
 inline T align_down(T size, A alignment) {
-  return align_down_(size, alignment);
+  assert(is_power_of_2_t(alignment), "must be a power of 2: " UINT64_FORMAT, (uint64_t)alignment);
+
+  T ret = align_down_(size, alignment);
+  assert(is_aligned_(ret, alignment), "must be aligned: " UINT64_FORMAT, (uint64_t)ret);
+
+  return ret;
 }
 
 template <typename T, typename A>
 inline bool is_aligned(T size, A alignment) {
+  assert(is_power_of_2_t(alignment), "must be a power of 2: " UINT64_FORMAT, (uint64_t)alignment);
+
   return is_aligned_(size, alignment);
 }
 
@@ -1205,22 +1223,6 @@
   return log2_long(x);
 }
 
-
-// returns integer round-up to the nearest multiple of s (s must be a power of two)
-inline intptr_t round_to(intptr_t x, uintx s) {
-  assert(is_power_of_2(s), "s must be a power of 2: " UINTX_FORMAT, s);
-  const uintx m = s - 1;
-  return mask_bits(x + m, ~m);
-}
-
-// returns integer round-down to the nearest multiple of s (s must be a power of two)
-inline intptr_t round_down(intptr_t x, uintx s) {
-  assert(is_power_of_2(s), "s must be a power of 2: " UINTX_FORMAT, s);
-  const uintx m = s - 1;
-  return mask_bits(x, ~m);
-}
-
-
 inline bool is_odd (intx x) { return x & 1;      }
 inline bool is_even(intx x) { return !is_odd(x); }