8146410: Interpreter functions are declared and defined in the wrong files
authorcoleenp
Tue, 12 Jan 2016 13:14:41 -0500
changeset 35479 62c12ca7a45e
parent 35474 8333d76c7fee
child 35480 6ed8e1b70803
8146410: Interpreter functions are declared and defined in the wrong files Summary: Moved functions to the correct files. Reviewed-by: goetz, aph, twisti, mockner
hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp
hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp
hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
hotspot/src/cpu/ppc/vm/abstractInterpreter_ppc.cpp
hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp
hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp
hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
hotspot/src/cpu/x86/vm/abstractInterpreter_x86.cpp
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp
hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp
hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp
hotspot/src/cpu/zero/vm/abstractInterpreter_zero.cpp
hotspot/src/cpu/zero/vm/bytecodeInterpreter_zero.cpp
hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.cpp
hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
hotspot/src/cpu/zero/vm/interp_masm_zero.cpp
hotspot/src/cpu/zero/vm/interpreter_zero.cpp
hotspot/src/cpu/zero/vm/register_definitions_zero.cpp
hotspot/src/cpu/zero/vm/stack_zero.cpp
hotspot/src/cpu/zero/vm/stack_zero.hpp
hotspot/src/cpu/zero/vm/stack_zero.inline.hpp
hotspot/src/os/aix/vm/os_aix.inline.hpp
hotspot/src/os/bsd/vm/os_bsd.inline.hpp
hotspot/src/os/linux/vm/os_linux.inline.hpp
hotspot/src/os/solaris/vm/os_solaris.inline.hpp
hotspot/src/os/windows/vm/os_windows.inline.hpp
hotspot/src/share/vm/interpreter/abstractInterpreter.cpp
hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
hotspot/src/share/vm/interpreter/cppInterpreter.cpp
hotspot/src/share/vm/interpreter/cppInterpreterGenerator.cpp
hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp
hotspot/src/share/vm/interpreter/interpreter.cpp
hotspot/src/share/vm/interpreter/templateInterpreter.cpp
hotspot/src/share/vm/interpreter/templateInterpreter.hpp
hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp
hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp
hotspot/src/share/vm/runtime/javaCalls.cpp
hotspot/src/share/vm/runtime/os.hpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/constMethod.hpp"
+#include "oops/method.hpp"
+#include "runtime/frame.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
+         "index out of bounds");
+  return i;
+}
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    : // fall thru
+    case Interpreter::java_lang_math_pow     : // fall thru
+    case Interpreter::java_lang_math_exp     :
+      return false;
+    default:
+      return true;
+  }
+}
+
+// How much stack a method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  const int entry_size = frame::interpreter_frame_monitor_size();
+
+  // total overhead size: entry_size + (saved rfp thru expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
+
+  const int stub_code = frame::entry_frame_after_call_words;
+  const int method_stack = (method->max_locals() + method->max_stack()) *
+                           Interpreter::stackElementWords;
+  return (overhead_size + method_stack + stub_code);
+}
+
+// asm based interpreter deoptimization helpers
+int AbstractInterpreter::size_activation(int max_stack,
+                                         int temps,
+                                         int extra_args,
+                                         int monitors,
+                                         int callee_params,
+                                         int callee_locals,
+                                         bool is_top_frame) {
+  // Note: This calculation must exactly parallel the frame setup
+  // in TemplateInterpreterGenerator::generate_method_entry.
+
+  // fixed size of an interpreter frame:
+  int overhead = frame::sender_sp_offset -
+                 frame::interpreter_frame_initial_sp_offset;
+  // Our locals were accounted for by the caller (or last_frame_adjust
+  // on the transistion) Since the callee parameters already account
+  // for the callee's params we only need to account for the extra
+  // locals.
+  int size = overhead +
+         (callee_locals - callee_params)*Interpreter::stackElementWords +
+         monitors * frame::interpreter_frame_monitor_size() +
+         temps* Interpreter::stackElementWords + extra_args;
+
+  // On AArch64 we always keep the stack pointer 16-aligned, so we
+  // must round up here.
+  size = round_to(size, 2);
+
+  return size;
+}
+
+void AbstractInterpreter::layout_activation(Method* method,
+                                            int tempcount,
+                                            int popframe_extra_args,
+                                            int moncount,
+                                            int caller_actual_parameters,
+                                            int callee_param_count,
+                                            int callee_locals,
+                                            frame* caller,
+                                            frame* interpreter_frame,
+                                            bool is_top_frame,
+                                            bool is_bottom_frame) {
+  // The frame interpreter_frame is guaranteed to be the right size,
+  // as determined by a previous call to the size_activation() method.
+  // It is also guaranteed to be walkable even though it is in a
+  // skeletal state
+
+  int max_locals = method->max_locals() * Interpreter::stackElementWords;
+  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
+    Interpreter::stackElementWords;
+
+#ifdef ASSERT
+  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
+#endif
+
+  interpreter_frame->interpreter_frame_set_method(method);
+  // NOTE the difference in using sender_sp and
+  // interpreter_frame_sender_sp interpreter_frame_sender_sp is
+  // the original sp of the caller (the unextended_sp) and
+  // sender_sp is fp+8/16 (32bit/64bit) XXX
+  intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
+
+#ifdef ASSERT
+  if (caller->is_interpreted_frame()) {
+    assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
+  }
+#endif
+
+  interpreter_frame->interpreter_frame_set_locals(locals);
+  BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
+  BasicObjectLock* monbot = montop - moncount;
+  interpreter_frame->interpreter_frame_set_monitor_end(monbot);
+
+  // Set last_sp
+  intptr_t*  esp = (intptr_t*) monbot -
+    tempcount*Interpreter::stackElementWords -
+    popframe_extra_args;
+  interpreter_frame->interpreter_frame_set_last_sp(esp);
+
+  // All frames but the initial (oldest) interpreter frame we fill in have
+  // a value for sender_sp that allows walking the stack but isn't
+  // truly correct. Correct the value here.
+  if (extra_locals != 0 &&
+      interpreter_frame->sender_sp() ==
+      interpreter_frame->interpreter_frame_sender_sp()) {
+    interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
+                                                       extra_locals);
+  }
+  *interpreter_frame->interpreter_frame_cache_addr() =
+    method->constants()->cache();
+}
--- a/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateInterpreterGenerator.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-
-#define __ _masm->
-
-
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-  address entry = __ pc();
-
-  __ andr(esp, esp, -16);
-  __ mov(c_rarg3, esp);
-  // rmethod
-  // rlocals
-  // c_rarg3: first stack arg - wordSize
-
-  // adjust sp
-  __ sub(sp, c_rarg3, 18 * wordSize);
-  __ str(lr, Address(__ pre(sp, -2 * wordSize)));
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::slow_signature_handler),
-             rmethod, rlocals, c_rarg3);
-
-  // r0: result handler
-
-  // Stack layout:
-  // rsp: return address           <- sp
-  //      1 garbage
-  //      8 integer args (if static first is unused)
-  //      1 float/double identifiers
-  //      8 double args
-  //        stack args              <- esp
-  //        garbage
-  //        expression stack bottom
-  //        bcp (NULL)
-  //        ...
-
-  // Restore LR
-  __ ldr(lr, Address(__ post(sp, 2 * wordSize)));
-
-  // Do FP first so we can use c_rarg3 as temp
-  __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
-
-  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
-    const FloatRegister r = as_FloatRegister(i);
-
-    Label d, done;
-
-    __ tbnz(c_rarg3, i, d);
-    __ ldrs(r, Address(sp, (10 + i) * wordSize));
-    __ b(done);
-    __ bind(d);
-    __ ldrd(r, Address(sp, (10 + i) * wordSize));
-    __ bind(done);
-  }
-
-  // c_rarg0 contains the result from the call of
-  // InterpreterRuntime::slow_signature_handler so we don't touch it
-  // here.  It will be loaded with the JNIEnv* later.
-  __ ldr(c_rarg1, Address(sp, 1 * wordSize));
-  for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
-    Register rm = as_Register(i), rn = as_Register(i+1);
-    __ ldp(rm, rn, Address(sp, i * wordSize));
-  }
-
-  __ add(sp, sp, 18 * wordSize);
-  __ ret(lr);
-
-  return entry;
-}
-
-
-//
-// Various method entries
-//
-
-address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
-  // rmethod: Method*
-  // r13: sender sp
-  // esp: args
-
-  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
-
-  // These don't need a safepoint check because they aren't virtually
-  // callable. We won't enter these intrinsics from compiled code.
-  // If in the future we added an intrinsic which was virtually callable
-  // we'd have to worry about how to safepoint so that this code is used.
-
-  // mathematical functions inlined by compiler
-  // (interpreter must provide identical implementation
-  // in order to avoid monotonicity bugs when switching
-  // from interpreter to compiler in the middle of some
-  // computation)
-  //
-  // stack:
-  //        [ arg ] <-- esp
-  //        [ arg ]
-  // retaddr in lr
-
-  address entry_point = NULL;
-  Register continuation = lr;
-  switch (kind) {
-  case Interpreter::java_lang_math_abs:
-    entry_point = __ pc();
-    __ ldrd(v0, Address(esp));
-    __ fabsd(v0, v0);
-    __ mov(sp, r13); // Restore caller's SP
-    break;
-  case Interpreter::java_lang_math_sqrt:
-    entry_point = __ pc();
-    __ ldrd(v0, Address(esp));
-    __ fsqrtd(v0, v0);
-    __ mov(sp, r13);
-    break;
-  case Interpreter::java_lang_math_sin :
-  case Interpreter::java_lang_math_cos :
-  case Interpreter::java_lang_math_tan :
-  case Interpreter::java_lang_math_log :
-  case Interpreter::java_lang_math_log10 :
-  case Interpreter::java_lang_math_exp :
-    entry_point = __ pc();
-    __ ldrd(v0, Address(esp));
-    __ mov(sp, r13);
-    __ mov(r19, lr);
-    continuation = r19;  // The first callee-saved register
-    generate_transcendental_entry(kind, 1);
-    break;
-  case Interpreter::java_lang_math_pow :
-    entry_point = __ pc();
-    __ mov(r19, lr);
-    continuation = r19;
-    __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize));
-    __ ldrd(v1, Address(esp));
-    __ mov(sp, r13);
-    generate_transcendental_entry(kind, 2);
-    break;
-  default:
-    ;
-  }
-  if (entry_point) {
-    __ br(continuation);
-  }
-
-  return entry_point;
-}
-
-  // double trigonometrics and transcendentals
-  // static jdouble dsin(jdouble x);
-  // static jdouble dcos(jdouble x);
-  // static jdouble dtan(jdouble x);
-  // static jdouble dlog(jdouble x);
-  // static jdouble dlog10(jdouble x);
-  // static jdouble dexp(jdouble x);
-  // static jdouble dpow(jdouble x, jdouble y);
-
-void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
-  address fn;
-  switch (kind) {
-  case Interpreter::java_lang_math_sin :
-    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
-    break;
-  case Interpreter::java_lang_math_cos :
-    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
-    break;
-  case Interpreter::java_lang_math_tan :
-    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
-    break;
-  case Interpreter::java_lang_math_log :
-    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
-    break;
-  case Interpreter::java_lang_math_log10 :
-    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
-    break;
-  case Interpreter::java_lang_math_exp :
-    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
-    break;
-  case Interpreter::java_lang_math_pow :
-    fpargs = 2;
-    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
-    break;
-  default:
-    ShouldNotReachHere();
-  }
-  const int gpargs = 0, rtype = 3;
-  __ mov(rscratch1, fn);
-  __ blrt(rscratch1, gpargs, fpargs, rtype);
-}
-
-// Abstract method entry
-// Attempt to execute abstract method. Throw exception
-address TemplateInterpreterGenerator::generate_abstract_entry(void) {
-  // rmethod: Method*
-  // r13: sender SP
-
-  address entry_point = __ pc();
-
-  // abstract method entry
-
-  //  pop return address, reset last_sp to NULL
-  __ empty_expression_stack();
-  __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
-  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
-
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address,
-                             InterpreterRuntime::throw_AbstractMethodError));
-  // the call_VM checks for exception, so we should never return here.
-  __ should_not_reach_here();
-
-  return entry_point;
-}
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -57,6 +57,13 @@
 #include "../../../../../../simulator/simulator.hpp"
 #endif
 
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
+
 #define __ _masm->
 
 //-----------------------------------------------------------------------------
@@ -65,6 +72,212 @@
 
 //-----------------------------------------------------------------------------
 
+address TemplateInterpreterGenerator::generate_slow_signature_handler() {
+  address entry = __ pc();
+
+  __ andr(esp, esp, -16);
+  __ mov(c_rarg3, esp);
+  // rmethod
+  // rlocals
+  // c_rarg3: first stack arg - wordSize
+
+  // adjust sp
+  __ sub(sp, c_rarg3, 18 * wordSize);
+  __ str(lr, Address(__ pre(sp, -2 * wordSize)));
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::slow_signature_handler),
+             rmethod, rlocals, c_rarg3);
+
+  // r0: result handler
+
+  // Stack layout:
+  // rsp: return address           <- sp
+  //      1 garbage
+  //      8 integer args (if static first is unused)
+  //      1 float/double identifiers
+  //      8 double args
+  //        stack args              <- esp
+  //        garbage
+  //        expression stack bottom
+  //        bcp (NULL)
+  //        ...
+
+  // Restore LR
+  __ ldr(lr, Address(__ post(sp, 2 * wordSize)));
+
+  // Do FP first so we can use c_rarg3 as temp
+  __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
+
+  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
+    const FloatRegister r = as_FloatRegister(i);
+
+    Label d, done;
+
+    __ tbnz(c_rarg3, i, d);
+    __ ldrs(r, Address(sp, (10 + i) * wordSize));
+    __ b(done);
+    __ bind(d);
+    __ ldrd(r, Address(sp, (10 + i) * wordSize));
+    __ bind(done);
+  }
+
+  // c_rarg0 contains the result from the call of
+  // InterpreterRuntime::slow_signature_handler so we don't touch it
+  // here.  It will be loaded with the JNIEnv* later.
+  __ ldr(c_rarg1, Address(sp, 1 * wordSize));
+  for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
+    Register rm = as_Register(i), rn = as_Register(i+1);
+    __ ldp(rm, rn, Address(sp, i * wordSize));
+  }
+
+  __ add(sp, sp, 18 * wordSize);
+  __ ret(lr);
+
+  return entry;
+}
+
+
+//
+// Various method entries
+//
+
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+  // rmethod: Method*
+  // r13: sender sp
+  // esp: args
+
+  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
+
+  // These don't need a safepoint check because they aren't virtually
+  // callable. We won't enter these intrinsics from compiled code.
+  // If in the future we added an intrinsic which was virtually callable
+  // we'd have to worry about how to safepoint so that this code is used.
+
+  // mathematical functions inlined by compiler
+  // (interpreter must provide identical implementation
+  // in order to avoid monotonicity bugs when switching
+  // from interpreter to compiler in the middle of some
+  // computation)
+  //
+  // stack:
+  //        [ arg ] <-- esp
+  //        [ arg ]
+  // retaddr in lr
+
+  address entry_point = NULL;
+  Register continuation = lr;
+  switch (kind) {
+  case Interpreter::java_lang_math_abs:
+    entry_point = __ pc();
+    __ ldrd(v0, Address(esp));
+    __ fabsd(v0, v0);
+    __ mov(sp, r13); // Restore caller's SP
+    break;
+  case Interpreter::java_lang_math_sqrt:
+    entry_point = __ pc();
+    __ ldrd(v0, Address(esp));
+    __ fsqrtd(v0, v0);
+    __ mov(sp, r13);
+    break;
+  case Interpreter::java_lang_math_sin :
+  case Interpreter::java_lang_math_cos :
+  case Interpreter::java_lang_math_tan :
+  case Interpreter::java_lang_math_log :
+  case Interpreter::java_lang_math_log10 :
+  case Interpreter::java_lang_math_exp :
+    entry_point = __ pc();
+    __ ldrd(v0, Address(esp));
+    __ mov(sp, r13);
+    __ mov(r19, lr);
+    continuation = r19;  // The first callee-saved register
+    generate_transcendental_entry(kind, 1);
+    break;
+  case Interpreter::java_lang_math_pow :
+    entry_point = __ pc();
+    __ mov(r19, lr);
+    continuation = r19;
+    __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize));
+    __ ldrd(v1, Address(esp));
+    __ mov(sp, r13);
+    generate_transcendental_entry(kind, 2);
+    break;
+  default:
+    ;
+  }
+  if (entry_point) {
+    __ br(continuation);
+  }
+
+  return entry_point;
+}
+
+  // double trigonometrics and transcendentals
+  // static jdouble dsin(jdouble x);
+  // static jdouble dcos(jdouble x);
+  // static jdouble dtan(jdouble x);
+  // static jdouble dlog(jdouble x);
+  // static jdouble dlog10(jdouble x);
+  // static jdouble dexp(jdouble x);
+  // static jdouble dpow(jdouble x, jdouble y);
+
+void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
+  address fn;
+  switch (kind) {
+  case Interpreter::java_lang_math_sin :
+    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
+    break;
+  case Interpreter::java_lang_math_cos :
+    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
+    break;
+  case Interpreter::java_lang_math_tan :
+    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
+    break;
+  case Interpreter::java_lang_math_log :
+    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
+    break;
+  case Interpreter::java_lang_math_log10 :
+    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
+    break;
+  case Interpreter::java_lang_math_exp :
+    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
+    break;
+  case Interpreter::java_lang_math_pow :
+    fpargs = 2;
+    fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+  const int gpargs = 0, rtype = 3;
+  __ mov(rscratch1, fn);
+  __ blrt(rscratch1, gpargs, fpargs, rtype);
+}
+
+// Abstract method entry
+// Attempt to execute abstract method. Throw exception
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
+  // rmethod: Method*
+  // r13: sender SP
+
+  address entry_point = __ pc();
+
+  // abstract method entry
+
+  //  pop return address, reset last_sp to NULL
+  __ empty_expression_stack();
+  __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
+  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
+
+  // throw exception
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                             InterpreterRuntime::throw_AbstractMethodError));
+  // the call_VM checks for exception, so we should never return here.
+  __ should_not_reach_here();
+
+  return entry_point;
+}
+
 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
   address entry = __ pc();
 
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "interpreter/interpreter.hpp"
-#include "oops/constMethod.hpp"
-#include "oops/method.hpp"
-#include "runtime/frame.inline.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-// Size of interpreter code.  Increase if too small.  Interpreter will
-// fail with a guarantee ("not enough space for interpreter generation");
-// if too small.
-// Run with +PrintInterpreter to get the VM to print out the size.
-// Max size with JVMTI
-int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
-         "index out of bounds");
-  return i;
-}
-
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     :
-      return false;
-    default:
-      return true;
-  }
-}
-
-// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-  const int entry_size = frame::interpreter_frame_monitor_size();
-
-  // total overhead size: entry_size + (saved rfp thru expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
-
-  const int stub_code = frame::entry_frame_after_call_words;
-  const int method_stack = (method->max_locals() + method->max_stack()) *
-                           Interpreter::stackElementWords;
-  return (overhead_size + method_stack + stub_code);
-}
-
-// asm based interpreter deoptimization helpers
-int AbstractInterpreter::size_activation(int max_stack,
-                                         int temps,
-                                         int extra_args,
-                                         int monitors,
-                                         int callee_params,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  // Note: This calculation must exactly parallel the frame setup
-  // in TemplateInterpreterGenerator::generate_method_entry.
-
-  // fixed size of an interpreter frame:
-  int overhead = frame::sender_sp_offset -
-                 frame::interpreter_frame_initial_sp_offset;
-  // Our locals were accounted for by the caller (or last_frame_adjust
-  // on the transistion) Since the callee parameters already account
-  // for the callee's params we only need to account for the extra
-  // locals.
-  int size = overhead +
-         (callee_locals - callee_params)*Interpreter::stackElementWords +
-         monitors * frame::interpreter_frame_monitor_size() +
-         temps* Interpreter::stackElementWords + extra_args;
-
-  // On AArch64 we always keep the stack pointer 16-aligned, so we
-  // must round up here.
-  size = round_to(size, 2);
-
-  return size;
-}
-
-void AbstractInterpreter::layout_activation(Method* method,
-                                            int tempcount,
-                                            int popframe_extra_args,
-                                            int moncount,
-                                            int caller_actual_parameters,
-                                            int callee_param_count,
-                                            int callee_locals,
-                                            frame* caller,
-                                            frame* interpreter_frame,
-                                            bool is_top_frame,
-                                            bool is_bottom_frame) {
-  // The frame interpreter_frame is guaranteed to be the right size,
-  // as determined by a previous call to the size_activation() method.
-  // It is also guaranteed to be walkable even though it is in a
-  // skeletal state
-
-  int max_locals = method->max_locals() * Interpreter::stackElementWords;
-  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
-    Interpreter::stackElementWords;
-
-#ifdef ASSERT
-  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
-#endif
-
-  interpreter_frame->interpreter_frame_set_method(method);
-  // NOTE the difference in using sender_sp and
-  // interpreter_frame_sender_sp interpreter_frame_sender_sp is
-  // the original sp of the caller (the unextended_sp) and
-  // sender_sp is fp+8/16 (32bit/64bit) XXX
-  intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
-
-#ifdef ASSERT
-  if (caller->is_interpreted_frame()) {
-    assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
-  }
-#endif
-
-  interpreter_frame->interpreter_frame_set_locals(locals);
-  BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
-  BasicObjectLock* monbot = montop - moncount;
-  interpreter_frame->interpreter_frame_set_monitor_end(monbot);
-
-  // Set last_sp
-  intptr_t*  esp = (intptr_t*) monbot -
-    tempcount*Interpreter::stackElementWords -
-    popframe_extra_args;
-  interpreter_frame->interpreter_frame_set_last_sp(esp);
-
-  // All frames but the initial (oldest) interpreter frame we fill in have
-  // a value for sender_sp that allows walking the stack but isn't
-  // truly correct. Correct the value here.
-  if (extra_locals != 0 &&
-      interpreter_frame->sender_sp() ==
-      interpreter_frame->interpreter_frame_sender_sp()) {
-    interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
-                                                       extra_locals);
-  }
-  *interpreter_frame->interpreter_frame_cache_addr() =
-    method->constants()->cache();
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/ppc/vm/abstractInterpreter_ppc.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/constMethod.hpp"
+#include "oops/method.hpp"
+#include "runtime/frame.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
+}
+
+// Support abs and sqrt like in compiler.
+// For others we can use a normal (native) entry.
+bool AbstractInterpreter::math_entry_available(AbstractInterpreter::MethodKind kind) {
+  if (!InlineIntrinsics) return false;
+
+  return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
+          (kind==Interpreter::java_lang_math_abs));
+}
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  return !math_entry_available(method_kind(m));
+}
+
+// How much stack a method activation needs in stack slots.
+// We must calc this exactly like in generate_fixed_frame.
+// Note: This returns the conservative size assuming maximum alignment.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  const int max_alignment_size = 2;
+  const int abi_scratch = frame::abi_reg_args_size;
+  return method->max_locals() + method->max_stack() +
+         frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
+}
+
+// Returns number of stackElementWords needed for the interpreter frame with the
+// given sections.
+// This overestimates the stack by one slot in case of alignments.
+int AbstractInterpreter::size_activation(int max_stack,
+                                         int temps,
+                                         int extra_args,
+                                         int monitors,
+                                         int callee_params,
+                                         int callee_locals,
+                                         bool is_top_frame) {
+  // Note: This calculation must exactly parallel the frame setup
+  // in TemplateInterpreterGenerator::generate_fixed_frame.
+  assert(Interpreter::stackElementWords == 1, "sanity");
+  const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
+  const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
+                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
+  const int size =
+    max_stack                                                +
+    (callee_locals - callee_params)                          +
+    monitors * frame::interpreter_frame_monitor_size()       +
+    max_alignment_space                                      +
+    abi_scratch                                              +
+    frame::ijava_state_size / Interpreter::stackElementSize;
+
+  // Fixed size of an interpreter frame, align to 16-byte.
+  return (size & -2);
+}
+
+// Fills a sceletal interpreter frame generated during deoptimizations.
+//
+// Parameters:
+//
+// interpreter_frame != NULL:
+//   set up the method, locals, and monitors.
+//   The frame interpreter_frame, if not NULL, is guaranteed to be the
+//   right size, as determined by a previous call to this method.
+//   It is also guaranteed to be walkable even though it is in a skeletal state
+//
+// is_top_frame == true:
+//   We're processing the *oldest* interpreter frame!
+//
+// pop_frame_extra_args:
+//   If this is != 0 we are returning to a deoptimized frame by popping
+//   off the callee frame. We want to re-execute the call that called the
+//   callee interpreted, but since the return to the interpreter would pop
+//   the arguments off advance the esp by dummy popframe_extra_args slots.
+//   Popping off those will establish the stack layout as it was before the call.
+//
+void AbstractInterpreter::layout_activation(Method* method,
+                                            int tempcount,
+                                            int popframe_extra_args,
+                                            int moncount,
+                                            int caller_actual_parameters,
+                                            int callee_param_count,
+                                            int callee_locals_count,
+                                            frame* caller,
+                                            frame* interpreter_frame,
+                                            bool is_top_frame,
+                                            bool is_bottom_frame) {
+
+  const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
+                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
+
+  intptr_t* locals_base  = (caller->is_interpreted_frame()) ?
+    caller->interpreter_frame_esp() + caller_actual_parameters :
+    caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
+
+  intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
+  intptr_t* monitor      = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
+  intptr_t* esp_base     = monitor - 1;
+  intptr_t* esp          = esp_base - tempcount - popframe_extra_args;
+  intptr_t* sp           = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
+  intptr_t* sender_sp    = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
+  intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
+
+  interpreter_frame->interpreter_frame_set_method(method);
+  interpreter_frame->interpreter_frame_set_locals(locals_base);
+  interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
+  interpreter_frame->interpreter_frame_set_esp(esp);
+  interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
+  interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
+  if (!is_bottom_frame) {
+    interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
+  }
+}
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,555 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2015 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateInterpreterGenerator.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-
-#define __ _masm->
-
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) // nothing
-#else
-#define BLOCK_COMMENT(str) __ block_comment(str)
-#endif
-
-#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
-
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-  // Slow_signature handler that respects the PPC C calling conventions.
-  //
-  // We get called by the native entry code with our output register
-  // area == 8. First we call InterpreterRuntime::get_result_handler
-  // to copy the pointer to the signature string temporarily to the
-  // first C-argument and to return the result_handler in
-  // R3_RET. Since native_entry will copy the jni-pointer to the
-  // first C-argument slot later on, it is OK to occupy this slot
-  // temporarilly. Then we copy the argument list on the java
-  // expression stack into native varargs format on the native stack
-  // and load arguments into argument registers. Integer arguments in
-  // the varargs vector will be sign-extended to 8 bytes.
-  //
-  // On entry:
-  //   R3_ARG1        - intptr_t*     Address of java argument list in memory.
-  //   R15_prev_state - BytecodeInterpreter* Address of interpreter state for
-  //     this method
-  //   R19_method
-  //
-  // On exit (just before return instruction):
-  //   R3_RET            - contains the address of the result_handler.
-  //   R4_ARG2           - is not updated for static methods and contains "this" otherwise.
-  //   R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
-  //                       ARGi contains this argument. Otherwise, ARGi is not updated.
-  //   F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
-
-  const int LogSizeOfTwoInstructions = 3;
-
-  // FIXME: use Argument:: GL: Argument names different numbers!
-  const int max_fp_register_arguments  = 13;
-  const int max_int_register_arguments = 6;  // first 2 are reserved
-
-  const Register arg_java       = R21_tmp1;
-  const Register arg_c          = R22_tmp2;
-  const Register signature      = R23_tmp3;  // is string
-  const Register sig_byte       = R24_tmp4;
-  const Register fpcnt          = R25_tmp5;
-  const Register argcnt         = R26_tmp6;
-  const Register intSlot        = R27_tmp7;
-  const Register target_sp      = R28_tmp8;
-  const FloatRegister floatSlot = F0;
-
-  address entry = __ function_entry();
-
-  __ save_LR_CR(R0);
-  __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
-  // We use target_sp for storing arguments in the C frame.
-  __ mr(target_sp, R1_SP);
-  __ push_frame_reg_args_nonvolatiles(0, R11_scratch1);
-
-  __ mr(arg_java, R3_ARG1);
-
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
-
-  // Signature is in R3_RET. Signature is callee saved.
-  __ mr(signature, R3_RET);
-
-  // Get the result handler.
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
-
-  {
-    Label L;
-    // test if static
-    // _access_flags._flags must be at offset 0.
-    // TODO PPC port: requires change in shared code.
-    //assert(in_bytes(AccessFlags::flags_offset()) == 0,
-    //       "MethodDesc._access_flags == MethodDesc._access_flags._flags");
-    // _access_flags must be a 32 bit value.
-    assert(sizeof(AccessFlags) == 4, "wrong size");
-    __ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
-    // testbit with condition register.
-    __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
-    __ btrue(CCR0, L);
-    // For non-static functions, pass "this" in R4_ARG2 and copy it
-    // to 2nd C-arg slot.
-    // We need to box the Java object here, so we use arg_java
-    // (address of current Java stack slot) as argument and don't
-    // dereference it as in case of ints, floats, etc.
-    __ mr(R4_ARG2, arg_java);
-    __ addi(arg_java, arg_java, -BytesPerWord);
-    __ std(R4_ARG2, _abi(carg_2), target_sp);
-    __ bind(L);
-  }
-
-  // Will be incremented directly after loop_start. argcnt=0
-  // corresponds to 3rd C argument.
-  __ li(argcnt, -1);
-  // arg_c points to 3rd C argument
-  __ addi(arg_c, target_sp, _abi(carg_3));
-  // no floating-point args parsed so far
-  __ li(fpcnt, 0);
-
-  Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
-  Label loop_start, loop_end;
-  Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
-
-  // signature points to '(' at entry
-#ifdef ASSERT
-  __ lbz(sig_byte, 0, signature);
-  __ cmplwi(CCR0, sig_byte, '(');
-  __ bne(CCR0, do_dontreachhere);
-#endif
-
-  __ bind(loop_start);
-
-  __ addi(argcnt, argcnt, 1);
-  __ lbzu(sig_byte, 1, signature);
-
-  __ cmplwi(CCR0, sig_byte, ')'); // end of signature
-  __ beq(CCR0, loop_end);
-
-  __ cmplwi(CCR0, sig_byte, 'B'); // byte
-  __ beq(CCR0, do_int);
-
-  __ cmplwi(CCR0, sig_byte, 'C'); // char
-  __ beq(CCR0, do_int);
-
-  __ cmplwi(CCR0, sig_byte, 'D'); // double
-  __ beq(CCR0, do_double);
-
-  __ cmplwi(CCR0, sig_byte, 'F'); // float
-  __ beq(CCR0, do_float);
-
-  __ cmplwi(CCR0, sig_byte, 'I'); // int
-  __ beq(CCR0, do_int);
-
-  __ cmplwi(CCR0, sig_byte, 'J'); // long
-  __ beq(CCR0, do_long);
-
-  __ cmplwi(CCR0, sig_byte, 'S'); // short
-  __ beq(CCR0, do_int);
-
-  __ cmplwi(CCR0, sig_byte, 'Z'); // boolean
-  __ beq(CCR0, do_int);
-
-  __ cmplwi(CCR0, sig_byte, 'L'); // object
-  __ beq(CCR0, do_object);
-
-  __ cmplwi(CCR0, sig_byte, '['); // array
-  __ beq(CCR0, do_array);
-
-  //  __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
-  //  __ beq(CCR0, do_void);
-
-  __ bind(do_dontreachhere);
-
-  __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
-
-  __ bind(do_array);
-
-  {
-    Label start_skip, end_skip;
-
-    __ bind(start_skip);
-    __ lbzu(sig_byte, 1, signature);
-    __ cmplwi(CCR0, sig_byte, '[');
-    __ beq(CCR0, start_skip); // skip further brackets
-    __ cmplwi(CCR0, sig_byte, '9');
-    __ bgt(CCR0, end_skip);   // no optional size
-    __ cmplwi(CCR0, sig_byte, '0');
-    __ bge(CCR0, start_skip); // skip optional size
-    __ bind(end_skip);
-
-    __ cmplwi(CCR0, sig_byte, 'L');
-    __ beq(CCR0, do_object);  // for arrays of objects, the name of the object must be skipped
-    __ b(do_boxed);          // otherwise, go directly to do_boxed
-  }
-
-  __ bind(do_object);
-  {
-    Label L;
-    __ bind(L);
-    __ lbzu(sig_byte, 1, signature);
-    __ cmplwi(CCR0, sig_byte, ';');
-    __ bne(CCR0, L);
-   }
-  // Need to box the Java object here, so we use arg_java (address of
-  // current Java stack slot) as argument and don't dereference it as
-  // in case of ints, floats, etc.
-  Label do_null;
-  __ bind(do_boxed);
-  __ ld(R0,0, arg_java);
-  __ cmpdi(CCR0, R0, 0);
-  __ li(intSlot,0);
-  __ beq(CCR0, do_null);
-  __ mr(intSlot, arg_java);
-  __ bind(do_null);
-  __ std(intSlot, 0, arg_c);
-  __ addi(arg_java, arg_java, -BytesPerWord);
-  __ addi(arg_c, arg_c, BytesPerWord);
-  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
-  __ blt(CCR0, move_intSlot_to_ARG);
-  __ b(loop_start);
-
-  __ bind(do_int);
-  __ lwa(intSlot, 0, arg_java);
-  __ std(intSlot, 0, arg_c);
-  __ addi(arg_java, arg_java, -BytesPerWord);
-  __ addi(arg_c, arg_c, BytesPerWord);
-  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
-  __ blt(CCR0, move_intSlot_to_ARG);
-  __ b(loop_start);
-
-  __ bind(do_long);
-  __ ld(intSlot, -BytesPerWord, arg_java);
-  __ std(intSlot, 0, arg_c);
-  __ addi(arg_java, arg_java, - 2 * BytesPerWord);
-  __ addi(arg_c, arg_c, BytesPerWord);
-  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
-  __ blt(CCR0, move_intSlot_to_ARG);
-  __ b(loop_start);
-
-  __ bind(do_float);
-  __ lfs(floatSlot, 0, arg_java);
-#if defined(LINUX)
-  // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
-  // in the least significant word of an argument slot.
-#if defined(VM_LITTLE_ENDIAN)
-  __ stfs(floatSlot, 0, arg_c);
-#else
-  __ stfs(floatSlot, 4, arg_c);
-#endif
-#elif defined(AIX)
-  // Although AIX runs on big endian CPU, float is in most significant
-  // word of an argument slot.
-  __ stfs(floatSlot, 0, arg_c);
-#else
-#error "unknown OS"
-#endif
-  __ addi(arg_java, arg_java, -BytesPerWord);
-  __ addi(arg_c, arg_c, BytesPerWord);
-  __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
-  __ blt(CCR0, move_floatSlot_to_FARG);
-  __ b(loop_start);
-
-  __ bind(do_double);
-  __ lfd(floatSlot, - BytesPerWord, arg_java);
-  __ stfd(floatSlot, 0, arg_c);
-  __ addi(arg_java, arg_java, - 2 * BytesPerWord);
-  __ addi(arg_c, arg_c, BytesPerWord);
-  __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
-  __ blt(CCR0, move_floatSlot_to_FARG);
-  __ b(loop_start);
-
-  __ bind(loop_end);
-
-  __ pop_frame();
-  __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
-  __ restore_LR_CR(R0);
-
-  __ blr();
-
-  Label move_int_arg, move_float_arg;
-  __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
-  __ mr(R5_ARG3, intSlot);  __ b(loop_start);
-  __ mr(R6_ARG4, intSlot);  __ b(loop_start);
-  __ mr(R7_ARG5, intSlot);  __ b(loop_start);
-  __ mr(R8_ARG6, intSlot);  __ b(loop_start);
-  __ mr(R9_ARG7, intSlot);  __ b(loop_start);
-  __ mr(R10_ARG8, intSlot); __ b(loop_start);
-
-  __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
-  __ fmr(F1_ARG1, floatSlot);   __ b(loop_start);
-  __ fmr(F2_ARG2, floatSlot);   __ b(loop_start);
-  __ fmr(F3_ARG3, floatSlot);   __ b(loop_start);
-  __ fmr(F4_ARG4, floatSlot);   __ b(loop_start);
-  __ fmr(F5_ARG5, floatSlot);   __ b(loop_start);
-  __ fmr(F6_ARG6, floatSlot);   __ b(loop_start);
-  __ fmr(F7_ARG7, floatSlot);   __ b(loop_start);
-  __ fmr(F8_ARG8, floatSlot);   __ b(loop_start);
-  __ fmr(F9_ARG9, floatSlot);   __ b(loop_start);
-  __ fmr(F10_ARG10, floatSlot); __ b(loop_start);
-  __ fmr(F11_ARG11, floatSlot); __ b(loop_start);
-  __ fmr(F12_ARG12, floatSlot); __ b(loop_start);
-  __ fmr(F13_ARG13, floatSlot); __ b(loop_start);
-
-  __ bind(move_intSlot_to_ARG);
-  __ sldi(R0, argcnt, LogSizeOfTwoInstructions);
-  __ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
-  __ add(R11_scratch1, R0, R11_scratch1);
-  __ mtctr(R11_scratch1/*branch_target*/);
-  __ bctr();
-  __ bind(move_floatSlot_to_FARG);
-  __ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
-  __ addi(fpcnt, fpcnt, 1);
-  __ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
-  __ add(R11_scratch1, R0, R11_scratch1);
-  __ mtctr(R11_scratch1/*branch_target*/);
-  __ bctr();
-
-  return entry;
-}
-
-address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  //
-  // Registers alive
-  //   R3_RET
-  //   LR
-  //
-  // Registers updated
-  //   R3_RET
-  //
-
-  Label done;
-  address entry = __ pc();
-
-  switch (type) {
-  case T_BOOLEAN:
-    // convert !=0 to 1
-    __ neg(R0, R3_RET);
-    __ orr(R0, R3_RET, R0);
-    __ srwi(R3_RET, R0, 31);
-    break;
-  case T_BYTE:
-     // sign extend 8 bits
-     __ extsb(R3_RET, R3_RET);
-     break;
-  case T_CHAR:
-     // zero extend 16 bits
-     __ clrldi(R3_RET, R3_RET, 48);
-     break;
-  case T_SHORT:
-     // sign extend 16 bits
-     __ extsh(R3_RET, R3_RET);
-     break;
-  case T_INT:
-     // sign extend 32 bits
-     __ extsw(R3_RET, R3_RET);
-     break;
-  case T_LONG:
-     break;
-  case T_OBJECT:
-    // unbox result if not null
-    __ cmpdi(CCR0, R3_RET, 0);
-    __ beq(CCR0, done);
-    __ ld(R3_RET, 0, R3_RET);
-    __ verify_oop(R3_RET);
-    break;
-  case T_FLOAT:
-     break;
-  case T_DOUBLE:
-     break;
-  case T_VOID:
-     break;
-  default: ShouldNotReachHere();
-  }
-
-  __ BIND(done);
-  __ blr();
-
-  return entry;
-}
-
-// Abstract method entry.
-//
-address TemplateInterpreterGenerator::generate_abstract_entry(void) {
-  address entry = __ pc();
-
-  //
-  // Registers alive
-  //   R16_thread     - JavaThread*
-  //   R19_method     - callee's method (method to be invoked)
-  //   R1_SP          - SP prepared such that caller's outgoing args are near top
-  //   LR             - return address to caller
-  //
-  // Stack layout at this point:
-  //
-  //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
-  //           alignment (optional)
-  //           [outgoing Java arguments]
-  //           ...
-  //   PARENT  [PARENT_IJAVA_FRAME_ABI]
-  //            ...
-  //
-
-  // Can't use call_VM here because we have not set up a new
-  // interpreter state. Make the call to the vm and make it look like
-  // our caller set up the JavaFrameAnchor.
-  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
-
-  // Push a new C frame and save LR.
-  __ save_LR_CR(R0);
-  __ push_frame_reg_args(0, R11_scratch1);
-
-  // This is not a leaf but we have a JavaFrameAnchor now and we will
-  // check (create) exceptions afterward so this is ok.
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError),
-                  R16_thread);
-
-  // Pop the C frame and restore LR.
-  __ pop_frame();
-  __ restore_LR_CR(R0);
-
-  // Reset JavaFrameAnchor from call_VM_leaf above.
-  __ reset_last_Java_frame();
-
-  // We don't know our caller, so jump to the general forward exception stub,
-  // which will also pop our full frame off. Satisfy the interface of
-  // SharedRuntime::generate_forward_exception()
-  __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
-  __ mtctr(R11_scratch1);
-  __ bctr();
-
-  return entry;
-}
-
-// Interpreter intrinsic for WeakReference.get().
-// 1. Don't push a full blown frame and go on dispatching, but fetch the value
-//    into R8 and return quickly
-// 2. If G1 is active we *must* execute this intrinsic for corrrectness:
-//    It contains a GC barrier which puts the reference into the satb buffer
-//    to indicate that someone holds a strong reference to the object the
-//    weak ref points to!
-address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. the "intrinsified" code for G1 (or any SATB based GC),
-  //    2. the slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-
-  if (UseG1GC) {
-    address entry = __ pc();
-
-    const int referent_offset = java_lang_ref_Reference::referent_offset;
-    guarantee(referent_offset > 0, "referent offset not initialized");
-
-    Label slow_path;
-
-    // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
-
-    // In the G1 code we don't check if we need to reach a safepoint. We
-    // continue and the thread will safepoint at the next bytecode dispatch.
-
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver
-
-    // Check if receiver == NULL and go the slow path.
-    __ cmpdi(CCR0, R3_RET, 0);
-    __ beq(CCR0, slow_path);
-
-    // Load the value of the referent field.
-    __ load_heap_oop(R3_RET, referent_offset, R3_RET);
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer. Note with
-    // these parameters the pre-barrier does not generate
-    // the load of the previous value.
-
-    // Restore caller sp for c2i case.
-#ifdef ASSERT
-      __ ld(R9_ARG7, 0, R1_SP);
-      __ ld(R10_ARG8, 0, R21_sender_SP);
-      __ cmpd(CCR0, R9_ARG7, R10_ARG8);
-      __ asm_assert_eq("backlink", 0x544);
-#endif // ASSERT
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
-    __ g1_write_barrier_pre(noreg,         // obj
-                            noreg,         // offset
-                            R3_RET,        // pre_val
-                            R11_scratch1,  // tmp
-                            R12_scratch2,  // tmp
-                            true);         // needs_frame
-
-    __ blr();
-
-    // Generate regular method entry.
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
-    return entry;
-  }
-
-  return NULL;
-}
--- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -51,6 +51,13 @@
 #undef __
 #define __ _masm->
 
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+int TemplateInterpreter::InterpreterCodeSize = 230*K;
+
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
 #else
@@ -61,6 +68,500 @@
 
 //-----------------------------------------------------------------------------
 
+address TemplateInterpreterGenerator::generate_slow_signature_handler() {
+  // Slow_signature handler that respects the PPC C calling conventions.
+  //
+  // We get called by the native entry code with our output register
+  // area == 8. First we call InterpreterRuntime::get_result_handler
+  // to copy the pointer to the signature string temporarily to the
+  // first C-argument and to return the result_handler in
+  // R3_RET. Since native_entry will copy the jni-pointer to the
+  // first C-argument slot later on, it is OK to occupy this slot
+  // temporarilly. Then we copy the argument list on the java
+  // expression stack into native varargs format on the native stack
+  // and load arguments into argument registers. Integer arguments in
+  // the varargs vector will be sign-extended to 8 bytes.
+  //
+  // On entry:
+  //   R3_ARG1        - intptr_t*     Address of java argument list in memory.
+  //   R15_prev_state - BytecodeInterpreter* Address of interpreter state for
+  //     this method
+  //   R19_method
+  //
+  // On exit (just before return instruction):
+  //   R3_RET            - contains the address of the result_handler.
+  //   R4_ARG2           - is not updated for static methods and contains "this" otherwise.
+  //   R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
+  //                       ARGi contains this argument. Otherwise, ARGi is not updated.
+  //   F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
+
+  const int LogSizeOfTwoInstructions = 3;
+
+  // FIXME: use Argument:: GL: Argument names different numbers!
+  const int max_fp_register_arguments  = 13;
+  const int max_int_register_arguments = 6;  // first 2 are reserved
+
+  const Register arg_java       = R21_tmp1;
+  const Register arg_c          = R22_tmp2;
+  const Register signature      = R23_tmp3;  // is string
+  const Register sig_byte       = R24_tmp4;
+  const Register fpcnt          = R25_tmp5;
+  const Register argcnt         = R26_tmp6;
+  const Register intSlot        = R27_tmp7;
+  const Register target_sp      = R28_tmp8;
+  const FloatRegister floatSlot = F0;
+
+  address entry = __ function_entry();
+
+  __ save_LR_CR(R0);
+  __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
+  // We use target_sp for storing arguments in the C frame.
+  __ mr(target_sp, R1_SP);
+  __ push_frame_reg_args_nonvolatiles(0, R11_scratch1);
+
+  __ mr(arg_java, R3_ARG1);
+
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
+
+  // Signature is in R3_RET. Signature is callee saved.
+  __ mr(signature, R3_RET);
+
+  // Get the result handler.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
+
+  {
+    Label L;
+    // test if static
+    // _access_flags._flags must be at offset 0.
+    // TODO PPC port: requires change in shared code.
+    //assert(in_bytes(AccessFlags::flags_offset()) == 0,
+    //       "MethodDesc._access_flags == MethodDesc._access_flags._flags");
+    // _access_flags must be a 32 bit value.
+    assert(sizeof(AccessFlags) == 4, "wrong size");
+    __ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
+    // testbit with condition register.
+    __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
+    __ btrue(CCR0, L);
+    // For non-static functions, pass "this" in R4_ARG2 and copy it
+    // to 2nd C-arg slot.
+    // We need to box the Java object here, so we use arg_java
+    // (address of current Java stack slot) as argument and don't
+    // dereference it as in case of ints, floats, etc.
+    __ mr(R4_ARG2, arg_java);
+    __ addi(arg_java, arg_java, -BytesPerWord);
+    __ std(R4_ARG2, _abi(carg_2), target_sp);
+    __ bind(L);
+  }
+
+  // Will be incremented directly after loop_start. argcnt=0
+  // corresponds to 3rd C argument.
+  __ li(argcnt, -1);
+  // arg_c points to 3rd C argument
+  __ addi(arg_c, target_sp, _abi(carg_3));
+  // no floating-point args parsed so far
+  __ li(fpcnt, 0);
+
+  Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
+  Label loop_start, loop_end;
+  Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
+
+  // signature points to '(' at entry
+#ifdef ASSERT
+  __ lbz(sig_byte, 0, signature);
+  __ cmplwi(CCR0, sig_byte, '(');
+  __ bne(CCR0, do_dontreachhere);
+#endif
+
+  __ bind(loop_start);
+
+  __ addi(argcnt, argcnt, 1);
+  __ lbzu(sig_byte, 1, signature);
+
+  __ cmplwi(CCR0, sig_byte, ')'); // end of signature
+  __ beq(CCR0, loop_end);
+
+  __ cmplwi(CCR0, sig_byte, 'B'); // byte
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'C'); // char
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'D'); // double
+  __ beq(CCR0, do_double);
+
+  __ cmplwi(CCR0, sig_byte, 'F'); // float
+  __ beq(CCR0, do_float);
+
+  __ cmplwi(CCR0, sig_byte, 'I'); // int
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'J'); // long
+  __ beq(CCR0, do_long);
+
+  __ cmplwi(CCR0, sig_byte, 'S'); // short
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'Z'); // boolean
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'L'); // object
+  __ beq(CCR0, do_object);
+
+  __ cmplwi(CCR0, sig_byte, '['); // array
+  __ beq(CCR0, do_array);
+
+  //  __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
+  //  __ beq(CCR0, do_void);
+
+  __ bind(do_dontreachhere);
+
+  __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
+
+  __ bind(do_array);
+
+  {
+    Label start_skip, end_skip;
+
+    __ bind(start_skip);
+    __ lbzu(sig_byte, 1, signature);
+    __ cmplwi(CCR0, sig_byte, '[');
+    __ beq(CCR0, start_skip); // skip further brackets
+    __ cmplwi(CCR0, sig_byte, '9');
+    __ bgt(CCR0, end_skip);   // no optional size
+    __ cmplwi(CCR0, sig_byte, '0');
+    __ bge(CCR0, start_skip); // skip optional size
+    __ bind(end_skip);
+
+    __ cmplwi(CCR0, sig_byte, 'L');
+    __ beq(CCR0, do_object);  // for arrays of objects, the name of the object must be skipped
+    __ b(do_boxed);          // otherwise, go directly to do_boxed
+  }
+
+  __ bind(do_object);
+  {
+    Label L;
+    __ bind(L);
+    __ lbzu(sig_byte, 1, signature);
+    __ cmplwi(CCR0, sig_byte, ';');
+    __ bne(CCR0, L);
+   }
+  // Need to box the Java object here, so we use arg_java (address of
+  // current Java stack slot) as argument and don't dereference it as
+  // in case of ints, floats, etc.
+  Label do_null;
+  __ bind(do_boxed);
+  __ ld(R0,0, arg_java);
+  __ cmpdi(CCR0, R0, 0);
+  __ li(intSlot,0);
+  __ beq(CCR0, do_null);
+  __ mr(intSlot, arg_java);
+  __ bind(do_null);
+  __ std(intSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, -BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
+  __ blt(CCR0, move_intSlot_to_ARG);
+  __ b(loop_start);
+
+  __ bind(do_int);
+  __ lwa(intSlot, 0, arg_java);
+  __ std(intSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, -BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
+  __ blt(CCR0, move_intSlot_to_ARG);
+  __ b(loop_start);
+
+  __ bind(do_long);
+  __ ld(intSlot, -BytesPerWord, arg_java);
+  __ std(intSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, - 2 * BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
+  __ blt(CCR0, move_intSlot_to_ARG);
+  __ b(loop_start);
+
+  __ bind(do_float);
+  __ lfs(floatSlot, 0, arg_java);
+#if defined(LINUX)
+  // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
+  // in the least significant word of an argument slot.
+#if defined(VM_LITTLE_ENDIAN)
+  __ stfs(floatSlot, 0, arg_c);
+#else
+  __ stfs(floatSlot, 4, arg_c);
+#endif
+#elif defined(AIX)
+  // Although AIX runs on big endian CPU, float is in most significant
+  // word of an argument slot.
+  __ stfs(floatSlot, 0, arg_c);
+#else
+#error "unknown OS"
+#endif
+  __ addi(arg_java, arg_java, -BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
+  __ blt(CCR0, move_floatSlot_to_FARG);
+  __ b(loop_start);
+
+  __ bind(do_double);
+  __ lfd(floatSlot, - BytesPerWord, arg_java);
+  __ stfd(floatSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, - 2 * BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
+  __ blt(CCR0, move_floatSlot_to_FARG);
+  __ b(loop_start);
+
+  __ bind(loop_end);
+
+  __ pop_frame();
+  __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
+  __ restore_LR_CR(R0);
+
+  __ blr();
+
+  Label move_int_arg, move_float_arg;
+  __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
+  __ mr(R5_ARG3, intSlot);  __ b(loop_start);
+  __ mr(R6_ARG4, intSlot);  __ b(loop_start);
+  __ mr(R7_ARG5, intSlot);  __ b(loop_start);
+  __ mr(R8_ARG6, intSlot);  __ b(loop_start);
+  __ mr(R9_ARG7, intSlot);  __ b(loop_start);
+  __ mr(R10_ARG8, intSlot); __ b(loop_start);
+
+  __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
+  __ fmr(F1_ARG1, floatSlot);   __ b(loop_start);
+  __ fmr(F2_ARG2, floatSlot);   __ b(loop_start);
+  __ fmr(F3_ARG3, floatSlot);   __ b(loop_start);
+  __ fmr(F4_ARG4, floatSlot);   __ b(loop_start);
+  __ fmr(F5_ARG5, floatSlot);   __ b(loop_start);
+  __ fmr(F6_ARG6, floatSlot);   __ b(loop_start);
+  __ fmr(F7_ARG7, floatSlot);   __ b(loop_start);
+  __ fmr(F8_ARG8, floatSlot);   __ b(loop_start);
+  __ fmr(F9_ARG9, floatSlot);   __ b(loop_start);
+  __ fmr(F10_ARG10, floatSlot); __ b(loop_start);
+  __ fmr(F11_ARG11, floatSlot); __ b(loop_start);
+  __ fmr(F12_ARG12, floatSlot); __ b(loop_start);
+  __ fmr(F13_ARG13, floatSlot); __ b(loop_start);
+
+  __ bind(move_intSlot_to_ARG);
+  __ sldi(R0, argcnt, LogSizeOfTwoInstructions);
+  __ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
+  __ add(R11_scratch1, R0, R11_scratch1);
+  __ mtctr(R11_scratch1/*branch_target*/);
+  __ bctr();
+  __ bind(move_floatSlot_to_FARG);
+  __ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
+  __ addi(fpcnt, fpcnt, 1);
+  __ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
+  __ add(R11_scratch1, R0, R11_scratch1);
+  __ mtctr(R11_scratch1/*branch_target*/);
+  __ bctr();
+
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
+  //
+  // Registers alive
+  //   R3_RET
+  //   LR
+  //
+  // Registers updated
+  //   R3_RET
+  //
+
+  Label done;
+  address entry = __ pc();
+
+  switch (type) {
+  case T_BOOLEAN:
+    // convert !=0 to 1
+    __ neg(R0, R3_RET);
+    __ orr(R0, R3_RET, R0);
+    __ srwi(R3_RET, R0, 31);
+    break;
+  case T_BYTE:
+     // sign extend 8 bits
+     __ extsb(R3_RET, R3_RET);
+     break;
+  case T_CHAR:
+     // zero extend 16 bits
+     __ clrldi(R3_RET, R3_RET, 48);
+     break;
+  case T_SHORT:
+     // sign extend 16 bits
+     __ extsh(R3_RET, R3_RET);
+     break;
+  case T_INT:
+     // sign extend 32 bits
+     __ extsw(R3_RET, R3_RET);
+     break;
+  case T_LONG:
+     break;
+  case T_OBJECT:
+    // unbox result if not null
+    __ cmpdi(CCR0, R3_RET, 0);
+    __ beq(CCR0, done);
+    __ ld(R3_RET, 0, R3_RET);
+    __ verify_oop(R3_RET);
+    break;
+  case T_FLOAT:
+     break;
+  case T_DOUBLE:
+     break;
+  case T_VOID:
+     break;
+  default: ShouldNotReachHere();
+  }
+
+  BIND(done);
+  __ blr();
+
+  return entry;
+}
+
+// Abstract method entry.
+//
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
+  address entry = __ pc();
+
+  //
+  // Registers alive
+  //   R16_thread     - JavaThread*
+  //   R19_method     - callee's method (method to be invoked)
+  //   R1_SP          - SP prepared such that caller's outgoing args are near top
+  //   LR             - return address to caller
+  //
+  // Stack layout at this point:
+  //
+  //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
+  //           alignment (optional)
+  //           [outgoing Java arguments]
+  //           ...
+  //   PARENT  [PARENT_IJAVA_FRAME_ABI]
+  //            ...
+  //
+
+  // Can't use call_VM here because we have not set up a new
+  // interpreter state. Make the call to the vm and make it look like
+  // our caller set up the JavaFrameAnchor.
+  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
+
+  // Push a new C frame and save LR.
+  __ save_LR_CR(R0);
+  __ push_frame_reg_args(0, R11_scratch1);
+
+  // This is not a leaf but we have a JavaFrameAnchor now and we will
+  // check (create) exceptions afterward so this is ok.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError),
+                  R16_thread);
+
+  // Pop the C frame and restore LR.
+  __ pop_frame();
+  __ restore_LR_CR(R0);
+
+  // Reset JavaFrameAnchor from call_VM_leaf above.
+  __ reset_last_Java_frame();
+
+  // We don't know our caller, so jump to the general forward exception stub,
+  // which will also pop our full frame off. Satisfy the interface of
+  // SharedRuntime::generate_forward_exception()
+  __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
+  __ mtctr(R11_scratch1);
+  __ bctr();
+
+  return entry;
+}
+
+// Interpreter intrinsic for WeakReference.get().
+// 1. Don't push a full blown frame and go on dispatching, but fetch the value
+//    into R8 and return quickly
+// 2. If G1 is active we *must* execute this intrinsic for corrrectness:
+//    It contains a GC barrier which puts the reference into the satb buffer
+//    to indicate that someone holds a strong reference to the object the
+//    weak ref points to!
+address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. the "intrinsified" code for G1 (or any SATB based GC),
+  //    2. the slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+
+  if (UseG1GC) {
+    address entry = __ pc();
+
+    const int referent_offset = java_lang_ref_Reference::referent_offset;
+    guarantee(referent_offset > 0, "referent offset not initialized");
+
+    Label slow_path;
+
+    // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
+
+    // In the G1 code we don't check if we need to reach a safepoint. We
+    // continue and the thread will safepoint at the next bytecode dispatch.
+
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver
+
+    // Check if receiver == NULL and go the slow path.
+    __ cmpdi(CCR0, R3_RET, 0);
+    __ beq(CCR0, slow_path);
+
+    // Load the value of the referent field.
+    __ load_heap_oop(R3_RET, referent_offset, R3_RET);
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer. Note with
+    // these parameters the pre-barrier does not generate
+    // the load of the previous value.
+
+    // Restore caller sp for c2i case.
+#ifdef ASSERT
+      __ ld(R9_ARG7, 0, R1_SP);
+      __ ld(R10_ARG8, 0, R21_sender_SP);
+      __ cmpd(CCR0, R9_ARG7, R10_ARG8);
+      __ asm_assert_eq("backlink", 0x544);
+#endif // ASSERT
+    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+
+    __ g1_write_barrier_pre(noreg,         // obj
+                            noreg,         // offset
+                            R3_RET,        // pre_val
+                            R11_scratch1,  // tmp
+                            R12_scratch2,  // tmp
+                            true);         // needs_frame
+
+    __ blr();
+
+    // Generate regular method entry.
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
+    return entry;
+  }
+
+  return NULL;
+}
+
 // Actually we should never reach here since we do stack overflow checks before pushing any frame.
 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
   address entry = __ pc();
@@ -222,12 +723,6 @@
   return entry;
 }
 
-// A result handler converts the native result into java format.
-// Use the shared code between c++ and template interpreter.
-address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  return AbstractInterpreterGenerator::generate_result_handler_for(type);
-}
-
 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
   address entry = __ pc();
 
@@ -606,7 +1101,7 @@
 // End of helpers
 
 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
-  if (!TemplateInterpreter::math_entry_available(kind)) {
+  if (!Interpreter::math_entry_available(kind)) {
     NOT_PRODUCT(__ should_not_reach_here();)
     return NULL;
   }
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,173 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2015 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "interpreter/interpreter.hpp"
-#include "oops/constMethod.hpp"
-#include "oops/method.hpp"
-#include "runtime/frame.inline.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-// Size of interpreter code.  Increase if too small.  Interpreter will
-// fail with a guarantee ("not enough space for interpreter generation");
-// if too small.
-// Run with +PrintInterpreter to get the VM to print out the size.
-// Max size with JVMTI
-int TemplateInterpreter::InterpreterCodeSize = 230*K;
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  return !TemplateInterpreter::math_entry_available(method_kind(m));
-}
-
-// How much stack a method activation needs in stack slots.
-// We must calc this exactly like in generate_fixed_frame.
-// Note: This returns the conservative size assuming maximum alignment.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-  const int max_alignment_size = 2;
-  const int abi_scratch = frame::abi_reg_args_size;
-  return method->max_locals() + method->max_stack() +
-         frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
-}
-
-// Returns number of stackElementWords needed for the interpreter frame with the
-// given sections.
-// This overestimates the stack by one slot in case of alignments.
-int AbstractInterpreter::size_activation(int max_stack,
-                                         int temps,
-                                         int extra_args,
-                                         int monitors,
-                                         int callee_params,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  // Note: This calculation must exactly parallel the frame setup
-  // in TemplateInterpreterGenerator::generate_fixed_frame.
-  assert(Interpreter::stackElementWords == 1, "sanity");
-  const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
-  const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
-                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
-  const int size =
-    max_stack                                                +
-    (callee_locals - callee_params)                          +
-    monitors * frame::interpreter_frame_monitor_size()       +
-    max_alignment_space                                      +
-    abi_scratch                                              +
-    frame::ijava_state_size / Interpreter::stackElementSize;
-
-  // Fixed size of an interpreter frame, align to 16-byte.
-  return (size & -2);
-}
-
-// Fills a sceletal interpreter frame generated during deoptimizations.
-//
-// Parameters:
-//
-// interpreter_frame != NULL:
-//   set up the method, locals, and monitors.
-//   The frame interpreter_frame, if not NULL, is guaranteed to be the
-//   right size, as determined by a previous call to this method.
-//   It is also guaranteed to be walkable even though it is in a skeletal state
-//
-// is_top_frame == true:
-//   We're processing the *oldest* interpreter frame!
-//
-// pop_frame_extra_args:
-//   If this is != 0 we are returning to a deoptimized frame by popping
-//   off the callee frame. We want to re-execute the call that called the
-//   callee interpreted, but since the return to the interpreter would pop
-//   the arguments off advance the esp by dummy popframe_extra_args slots.
-//   Popping off those will establish the stack layout as it was before the call.
-//
-void AbstractInterpreter::layout_activation(Method* method,
-                                            int tempcount,
-                                            int popframe_extra_args,
-                                            int moncount,
-                                            int caller_actual_parameters,
-                                            int callee_param_count,
-                                            int callee_locals_count,
-                                            frame* caller,
-                                            frame* interpreter_frame,
-                                            bool is_top_frame,
-                                            bool is_bottom_frame) {
-
-  const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
-                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
-
-  intptr_t* locals_base  = (caller->is_interpreted_frame()) ?
-    caller->interpreter_frame_esp() + caller_actual_parameters :
-    caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
-
-  intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
-  intptr_t* monitor      = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
-  intptr_t* esp_base     = monitor - 1;
-  intptr_t* esp          = esp_base - tempcount - popframe_extra_args;
-  intptr_t* sp           = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
-  intptr_t* sender_sp    = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
-  intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
-
-  interpreter_frame->interpreter_frame_set_method(method);
-  interpreter_frame->interpreter_frame_set_locals(locals_base);
-  interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
-  interpreter_frame->interpreter_frame_set_esp(esp);
-  interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
-  interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
-  if (!is_bottom_frame) {
-    interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
-  }
-}
-
-// Support abs and sqrt like in compiler.
-// For others we can use a normal (native) entry.
-
-bool TemplateInterpreter::math_entry_available(AbstractInterpreter::MethodKind kind) {
-  if (!InlineIntrinsics) return false;
-
-  return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
-          (kind==Interpreter::java_lang_math_abs));
-}
-
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/constMethod.hpp"
+#include "oops/method.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/synchronizer.hpp"
+#include "utilities/macros.hpp"
+
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
+}
+
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  // No special entry points that preclude compilation
+  return true;
+}
+
+static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
+
+  // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
+  // expression stack, the callee will have callee_extra_locals (so we can account for
+  // frame extension) and monitor_size for monitors. Basically we need to calculate
+  // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
+  //
+  //
+  // The big complicating thing here is that we must ensure that the stack stays properly
+  // aligned. This would be even uglier if monitor size wasn't modulo what the stack
+  // needs to be aligned for). We are given that the sp (fp) is already aligned by
+  // the caller so we must ensure that it is properly aligned for our callee.
+  //
+  const int rounded_vm_local_words =
+       round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
+  // callee_locals and max_stack are counts, not the size in frame.
+  const int locals_size =
+       round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
+  const int max_stack_words = max_stack * Interpreter::stackElementWords;
+  return (round_to((max_stack_words
+                   + rounded_vm_local_words
+                   + frame::memory_parameter_word_sp_offset), WordsPerLong)
+                   // already rounded
+                   + locals_size + monitor_size);
+}
+
+// How much stack a method top interpreter activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+
+  // See call_stub code
+  int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
+                                 WordsPerLong);    // 7 + register save area
+
+  // Save space for one monitor to get into the interpreted method in case
+  // the method is synchronized
+  int monitor_size    = method->is_synchronized() ?
+                                1*frame::interpreter_frame_monitor_size() : 0;
+  return size_activation_helper(method->max_locals(), method->max_stack(),
+                                monitor_size) + call_stub_size;
+}
+
+int AbstractInterpreter::size_activation(int max_stack,
+                                         int temps,
+                                         int extra_args,
+                                         int monitors,
+                                         int callee_params,
+                                         int callee_locals,
+                                         bool is_top_frame) {
+  // Note: This calculation must exactly parallel the frame setup
+  // in TemplateInterpreterGenerator::generate_fixed_frame.
+
+  int monitor_size           = monitors * frame::interpreter_frame_monitor_size();
+
+  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
+
+  //
+  // Note: if you look closely this appears to be doing something much different
+  // than generate_fixed_frame. What is happening is this. On sparc we have to do
+  // this dance with interpreter_sp_adjustment because the window save area would
+  // appear just below the bottom (tos) of the caller's java expression stack. Because
+  // the interpreter want to have the locals completely contiguous generate_fixed_frame
+  // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
+  // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
+  // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
+  // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
+  // because the oldest frame would have adjust its callers frame and yet that frame
+  // already exists and isn't part of this array of frames we are unpacking. So at first
+  // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
+  // will after it calculates all of the frame's on_stack_size()'s will then figure out the
+  // amount to adjust the caller of the initial (oldest) frame and the calculation will all
+  // add up. It does seem like it simpler to account for the adjustment here (and remove the
+  // callee... parameters here). However this would mean that this routine would have to take
+  // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
+  // and run the calling loop in the reverse order. This would also would appear to mean making
+  // this code aware of what the interactions are when that initial caller fram was an osr or
+  // other adapter frame. deoptimization is complicated enough and  hard enough to debug that
+  // there is no sense in messing working code.
+  //
+
+  int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
+  assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
+
+  int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
+
+  return raw_frame_size;
+}
+
+void AbstractInterpreter::layout_activation(Method* method,
+                                            int tempcount,
+                                            int popframe_extra_args,
+                                            int moncount,
+                                            int caller_actual_parameters,
+                                            int callee_param_count,
+                                            int callee_local_count,
+                                            frame* caller,
+                                            frame* interpreter_frame,
+                                            bool is_top_frame,
+                                            bool is_bottom_frame) {
+  // Set up the following variables:
+  //   - Lmethod
+  //   - Llocals
+  //   - Lmonitors (to the indicated number of monitors)
+  //   - Lesp (to the indicated number of temps)
+  // The frame caller on entry is a description of the caller of the
+  // frame we are about to layout. We are guaranteed that we will be
+  // able to fill in a new interpreter frame as its callee (i.e. the
+  // stack space is allocated and the amount was determined by an
+  // earlier call to the size_activation() method).  On return caller
+  // while describe the interpreter frame we just layed out.
+
+  // The skeleton frame must already look like an interpreter frame
+  // even if not fully filled out.
+  assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
+
+  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
+  int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
+  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
+
+  intptr_t* fp = interpreter_frame->fp();
+
+  JavaThread* thread = JavaThread::current();
+  RegisterMap map(thread, false);
+  // More verification that skeleton frame is properly walkable
+  assert(fp == caller->sp(), "fp must match");
+
+  intptr_t* montop     = fp - rounded_vm_local_words;
+
+  // preallocate monitors (cf. __ add_monitor_to_stack)
+  intptr_t* monitors = montop - monitor_size;
+
+  // preallocate stack space
+  intptr_t*  esp = monitors - 1 -
+    (tempcount * Interpreter::stackElementWords) -
+    popframe_extra_args;
+
+  int local_words = method->max_locals() * Interpreter::stackElementWords;
+  NEEDS_CLEANUP;
+  intptr_t* locals;
+  if (caller->is_interpreted_frame()) {
+    // Can force the locals area to end up properly overlapping the top of the expression stack.
+    intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
+    // Note that this computation means we replace size_of_parameters() values from the caller
+    // interpreter frame's expression stack with our argument locals
+    int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
+    locals = Lesp_ptr + parm_words;
+    int delta = local_words - parm_words;
+    int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
+    *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
+    if (!is_bottom_frame) {
+      // Llast_SP is set below for the current frame to SP (with the
+      // extra space for the callee's locals). Here we adjust
+      // Llast_SP for the caller's frame, removing the extra space
+      // for the current method's locals.
+      *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
+    } else {
+      assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
+    }
+  } else {
+    assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
+    // Don't have Lesp available; lay out locals block in the caller
+    // adjacent to the register window save area.
+    //
+    // Compiled frames do not allocate a varargs area which is why this if
+    // statement is needed.
+    //
+    if (caller->is_compiled_frame()) {
+      locals = fp + frame::register_save_words + local_words - 1;
+    } else {
+      locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
+    }
+    if (!caller->is_entry_frame()) {
+      // Caller wants his own SP back
+      int caller_frame_size = caller->cb()->frame_size();
+      *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
+    }
+  }
+  if (TraceDeoptimization) {
+    if (caller->is_entry_frame()) {
+      // make sure I5_savedSP and the entry frames notion of saved SP
+      // agree.  This assertion duplicate a check in entry frame code
+      // but catches the failure earlier.
+      assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
+             "would change callers SP");
+    }
+    if (caller->is_entry_frame()) {
+      tty->print("entry ");
+    }
+    if (caller->is_compiled_frame()) {
+      tty->print("compiled ");
+      if (caller->is_deoptimized_frame()) {
+        tty->print("(deopt) ");
+      }
+    }
+    if (caller->is_interpreted_frame()) {
+      tty->print("interpreted ");
+    }
+    tty->print_cr("caller fp=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->sp()));
+    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->sp()), p2i(caller->sp() + 16));
+    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->fp() + 16));
+    tty->print_cr("interpreter fp=" INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->sp()));
+    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->sp()), p2i(interpreter_frame->sp() + 16));
+    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->fp() + 16));
+    tty->print_cr("Llocals = " INTPTR_FORMAT, p2i(locals));
+    tty->print_cr("Lesp = " INTPTR_FORMAT, p2i(esp));
+    tty->print_cr("Lmonitors = " INTPTR_FORMAT, p2i(monitors));
+  }
+
+  if (method->max_locals() > 0) {
+    assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
+    assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
+    assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
+    assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
+  }
+#ifdef _LP64
+  assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
+#endif
+
+  *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
+  *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
+  *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
+  *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
+  // Llast_SP will be same as SP as there is no adapter space
+  *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
+  *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
+#ifdef FAST_DISPATCH
+  *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
+#endif
+
+
+#ifdef ASSERT
+  BasicObjectLock* mp = (BasicObjectLock*)monitors;
+
+  assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
+  assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
+  assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
+  assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
+  assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
+
+  // check bounds
+  intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
+  intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
+  assert(lo < monitors && montop <= hi, "monitors in bounds");
+  assert(lo <= esp && esp < monitors, "esp in bounds");
+#endif // ASSERT
+}
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateInterpreterGenerator.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-
-
-
-// Generation of Interpreter
-//
-// The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
-
-
-#define __ _masm->
-
-
-//----------------------------------------------------------------------------------------------------
-
-#ifndef _LP64
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-  address entry = __ pc();
-  Argument argv(0, true);
-
-  // We are in the jni transition frame. Save the last_java_frame corresponding to the
-  // outer interpreter frame
-  //
-  __ set_last_Java_frame(FP, noreg);
-  // make sure the interpreter frame we've pushed has a valid return pc
-  __ mov(O7, I7);
-  __ mov(Lmethod, G3_scratch);
-  __ mov(Llocals, G4_scratch);
-  __ save_frame(0);
-  __ mov(G2_thread, L7_thread_cache);
-  __ add(argv.address_in_frame(), O3);
-  __ mov(G2_thread, O0);
-  __ mov(G3_scratch, O1);
-  __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
-  __ delayed()->mov(G4_scratch, O2);
-  __ mov(L7_thread_cache, G2_thread);
-  __ reset_last_Java_frame();
-
-  // load the register arguments (the C code packed them as varargs)
-  for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
-      __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
-  }
-  __ ret();
-  __ delayed()->
-     restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
-  return entry;
-}
-
-
-#else
-// LP64 passes floating point arguments in F1, F3, F5, etc. instead of
-// O0, O1, O2 etc..
-// Doubles are passed in D0, D2, D4
-// We store the signature of the first 16 arguments in the first argument
-// slot because it will be overwritten prior to calling the native
-// function, with the pointer to the JNIEnv.
-// If LP64 there can be up to 16 floating point arguments in registers
-// or 6 integer registers.
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-
-  enum {
-    non_float  = 0,
-    float_sig  = 1,
-    double_sig = 2,
-    sig_mask   = 3
-  };
-
-  address entry = __ pc();
-  Argument argv(0, true);
-
-  // We are in the jni transition frame. Save the last_java_frame corresponding to the
-  // outer interpreter frame
-  //
-  __ set_last_Java_frame(FP, noreg);
-  // make sure the interpreter frame we've pushed has a valid return pc
-  __ mov(O7, I7);
-  __ mov(Lmethod, G3_scratch);
-  __ mov(Llocals, G4_scratch);
-  __ save_frame(0);
-  __ mov(G2_thread, L7_thread_cache);
-  __ add(argv.address_in_frame(), O3);
-  __ mov(G2_thread, O0);
-  __ mov(G3_scratch, O1);
-  __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
-  __ delayed()->mov(G4_scratch, O2);
-  __ mov(L7_thread_cache, G2_thread);
-  __ reset_last_Java_frame();
-
-
-  // load the register arguments (the C code packed them as varargs)
-  Address Sig = argv.address_in_frame();        // Argument 0 holds the signature
-  __ ld_ptr( Sig, G3_scratch );                   // Get register argument signature word into G3_scratch
-  __ mov( G3_scratch, G4_scratch);
-  __ srl( G4_scratch, 2, G4_scratch);             // Skip Arg 0
-  Label done;
-  for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) {
-    Label NonFloatArg;
-    Label LoadFloatArg;
-    Label LoadDoubleArg;
-    Label NextArg;
-    Address a = ldarg.address_in_frame();
-    __ andcc(G4_scratch, sig_mask, G3_scratch);
-    __ br(Assembler::zero, false, Assembler::pt, NonFloatArg);
-    __ delayed()->nop();
-
-    __ cmp(G3_scratch, float_sig );
-    __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg);
-    __ delayed()->nop();
-
-    __ cmp(G3_scratch, double_sig );
-    __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg);
-    __ delayed()->nop();
-
-    __ bind(NonFloatArg);
-    // There are only 6 integer register arguments!
-    if ( ldarg.is_register() )
-      __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
-    else {
-    // Optimization, see if there are any more args and get out prior to checking
-    // all 16 float registers.  My guess is that this is rare.
-    // If is_register is false, then we are done the first six integer args.
-      __ br_null_short(G4_scratch, Assembler::pt, done);
-    }
-    __ ba(NextArg);
-    __ delayed()->srl( G4_scratch, 2, G4_scratch );
-
-    __ bind(LoadFloatArg);
-    __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
-    __ ba(NextArg);
-    __ delayed()->srl( G4_scratch, 2, G4_scratch );
-
-    __ bind(LoadDoubleArg);
-    __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
-    __ ba(NextArg);
-    __ delayed()->srl( G4_scratch, 2, G4_scratch );
-
-    __ bind(NextArg);
-
-  }
-
-  __ bind(done);
-  __ ret();
-  __ delayed()->
-     restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
-  return entry;
-}
-#endif
-
-void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
-
-  // Generate code to initiate compilation on the counter overflow.
-
-  // InterpreterRuntime::frequency_counter_overflow takes two arguments,
-  // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
-  // and the second is only used when the first is true.  We pass zero for both.
-  // The call returns the address of the verified entry point for the method or NULL
-  // if the compilation did not complete (either went background or bailed out).
-  __ set((int)false, O2);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
-  // returns verified_entry_point or NULL
-  // we ignore it in any case
-  __ ba_short(Lcontinue);
-
-}
-
-
-// End of helpers
-
-// Various method entries
-
-// Abstract method entry
-// Attempt to execute abstract method. Throw exception
-//
-address TemplateInterpreterGenerator::generate_abstract_entry(void) {
-  address entry = __ pc();
-  // abstract method entry
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
-  // the call_VM checks for exception, so we should never return here.
-  __ should_not_reach_here();
-  return entry;
-
-}
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,18 @@
 #endif
 #undef FAST_DISPATCH
 
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+#ifdef _LP64
+  // The sethi() instruction generates lots more instructions when shell
+  // stack limit is unlimited, so that's why this is much bigger.
+int TemplateInterpreter::InterpreterCodeSize = 260 * K;
+#else
+int TemplateInterpreter::InterpreterCodeSize = 230 * K;
+#endif
 
 // Generation of Interpreter
 //
@@ -63,6 +75,174 @@
 
 //----------------------------------------------------------------------------------------------------
 
+#ifndef _LP64
+address TemplateInterpreterGenerator::generate_slow_signature_handler() {
+  address entry = __ pc();
+  Argument argv(0, true);
+
+  // We are in the jni transition frame. Save the last_java_frame corresponding to the
+  // outer interpreter frame
+  //
+  __ set_last_Java_frame(FP, noreg);
+  // make sure the interpreter frame we've pushed has a valid return pc
+  __ mov(O7, I7);
+  __ mov(Lmethod, G3_scratch);
+  __ mov(Llocals, G4_scratch);
+  __ save_frame(0);
+  __ mov(G2_thread, L7_thread_cache);
+  __ add(argv.address_in_frame(), O3);
+  __ mov(G2_thread, O0);
+  __ mov(G3_scratch, O1);
+  __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
+  __ delayed()->mov(G4_scratch, O2);
+  __ mov(L7_thread_cache, G2_thread);
+  __ reset_last_Java_frame();
+
+  // load the register arguments (the C code packed them as varargs)
+  for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
+      __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
+  }
+  __ ret();
+  __ delayed()->
+     restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
+  return entry;
+}
+
+
+#else
+// LP64 passes floating point arguments in F1, F3, F5, etc. instead of
+// O0, O1, O2 etc..
+// Doubles are passed in D0, D2, D4
+// We store the signature of the first 16 arguments in the first argument
+// slot because it will be overwritten prior to calling the native
+// function, with the pointer to the JNIEnv.
+// If LP64 there can be up to 16 floating point arguments in registers
+// or 6 integer registers.
+address TemplateInterpreterGenerator::generate_slow_signature_handler() {
+
+  enum {
+    non_float  = 0,
+    float_sig  = 1,
+    double_sig = 2,
+    sig_mask   = 3
+  };
+
+  address entry = __ pc();
+  Argument argv(0, true);
+
+  // We are in the jni transition frame. Save the last_java_frame corresponding to the
+  // outer interpreter frame
+  //
+  __ set_last_Java_frame(FP, noreg);
+  // make sure the interpreter frame we've pushed has a valid return pc
+  __ mov(O7, I7);
+  __ mov(Lmethod, G3_scratch);
+  __ mov(Llocals, G4_scratch);
+  __ save_frame(0);
+  __ mov(G2_thread, L7_thread_cache);
+  __ add(argv.address_in_frame(), O3);
+  __ mov(G2_thread, O0);
+  __ mov(G3_scratch, O1);
+  __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
+  __ delayed()->mov(G4_scratch, O2);
+  __ mov(L7_thread_cache, G2_thread);
+  __ reset_last_Java_frame();
+
+
+  // load the register arguments (the C code packed them as varargs)
+  Address Sig = argv.address_in_frame();        // Argument 0 holds the signature
+  __ ld_ptr( Sig, G3_scratch );                   // Get register argument signature word into G3_scratch
+  __ mov( G3_scratch, G4_scratch);
+  __ srl( G4_scratch, 2, G4_scratch);             // Skip Arg 0
+  Label done;
+  for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) {
+    Label NonFloatArg;
+    Label LoadFloatArg;
+    Label LoadDoubleArg;
+    Label NextArg;
+    Address a = ldarg.address_in_frame();
+    __ andcc(G4_scratch, sig_mask, G3_scratch);
+    __ br(Assembler::zero, false, Assembler::pt, NonFloatArg);
+    __ delayed()->nop();
+
+    __ cmp(G3_scratch, float_sig );
+    __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg);
+    __ delayed()->nop();
+
+    __ cmp(G3_scratch, double_sig );
+    __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg);
+    __ delayed()->nop();
+
+    __ bind(NonFloatArg);
+    // There are only 6 integer register arguments!
+    if ( ldarg.is_register() )
+      __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
+    else {
+    // Optimization, see if there are any more args and get out prior to checking
+    // all 16 float registers.  My guess is that this is rare.
+    // If is_register is false, then we are done the first six integer args.
+      __ br_null_short(G4_scratch, Assembler::pt, done);
+    }
+    __ ba(NextArg);
+    __ delayed()->srl( G4_scratch, 2, G4_scratch );
+
+    __ bind(LoadFloatArg);
+    __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
+    __ ba(NextArg);
+    __ delayed()->srl( G4_scratch, 2, G4_scratch );
+
+    __ bind(LoadDoubleArg);
+    __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
+    __ ba(NextArg);
+    __ delayed()->srl( G4_scratch, 2, G4_scratch );
+
+    __ bind(NextArg);
+
+  }
+
+  __ bind(done);
+  __ ret();
+  __ delayed()->
+     restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
+  return entry;
+}
+#endif
+
+void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
+
+  // Generate code to initiate compilation on the counter overflow.
+
+  // InterpreterRuntime::frequency_counter_overflow takes two arguments,
+  // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
+  // and the second is only used when the first is true.  We pass zero for both.
+  // The call returns the address of the verified entry point for the method or NULL
+  // if the compilation did not complete (either went background or bailed out).
+  __ set((int)false, O2);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
+  // returns verified_entry_point or NULL
+  // we ignore it in any case
+  __ ba_short(Lcontinue);
+
+}
+
+
+// End of helpers
+
+// Various method entries
+
+// Abstract method entry
+// Attempt to execute abstract method. Throw exception
+//
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
+  address entry = __ pc();
+  // abstract method entry
+  // throw exception
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+  // the call_VM checks for exception, so we should never return here.
+  __ should_not_reach_here();
+  return entry;
+
+}
 
 void TemplateInterpreterGenerator::save_native_result(void) {
   // result potentially in O0/O1: save it across calls
@@ -911,6 +1091,31 @@
 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
   return NULL;
 }
+
+// TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
+// generate exception
+void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
+  // Quick & dirty stack overflow checking: bang the stack & handle trap.
+  // Note that we do the banging after the frame is setup, since the exception
+  // handling code expects to find a valid interpreter frame on the stack.
+  // Doing the banging earlier fails if the caller frame is not an interpreter
+  // frame.
+  // (Also, the exception throwing code expects to unlock any synchronized
+  // method receiever, so do the banging after locking the receiver.)
+
+  // Bang each page in the shadow zone. We can't assume it's been done for
+  // an interpreter frame with greater than a page of locals, so each page
+  // needs to be checked.  Only true for non-native.
+  if (UseStackBanging) {
+    const int page_size = os::vm_page_size();
+    const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
+    const int start_page = native_call ? n_shadow_pages : 1;
+    for (int pages = start_page; pages <= n_shadow_pages; pages++) {
+      __ bang_stack_with_offset(pages*page_size);
+    }
+  }
+}
+
 //
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the native method
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,316 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "interpreter/interpreter.hpp"
-#include "oops/constMethod.hpp"
-#include "oops/method.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/synchronizer.hpp"
-#include "utilities/macros.hpp"
-
-// Size of interpreter code.  Increase if too small.  Interpreter will
-// fail with a guarantee ("not enough space for interpreter generation");
-// if too small.
-// Run with +PrintInterpreter to get the VM to print out the size.
-// Max size with JVMTI
-#ifdef _LP64
-  // The sethi() instruction generates lots more instructions when shell
-  // stack limit is unlimited, so that's why this is much bigger.
-int TemplateInterpreter::InterpreterCodeSize = 260 * K;
-#else
-int TemplateInterpreter::InterpreterCodeSize = 230 * K;
-#endif
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  // No special entry points that preclude compilation
-  return true;
-}
-
-static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
-
-  // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
-  // expression stack, the callee will have callee_extra_locals (so we can account for
-  // frame extension) and monitor_size for monitors. Basically we need to calculate
-  // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
-  //
-  //
-  // The big complicating thing here is that we must ensure that the stack stays properly
-  // aligned. This would be even uglier if monitor size wasn't modulo what the stack
-  // needs to be aligned for). We are given that the sp (fp) is already aligned by
-  // the caller so we must ensure that it is properly aligned for our callee.
-  //
-  const int rounded_vm_local_words =
-       round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
-  // callee_locals and max_stack are counts, not the size in frame.
-  const int locals_size =
-       round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
-  const int max_stack_words = max_stack * Interpreter::stackElementWords;
-  return (round_to((max_stack_words
-                   + rounded_vm_local_words
-                   + frame::memory_parameter_word_sp_offset), WordsPerLong)
-                   // already rounded
-                   + locals_size + monitor_size);
-}
-
-// How much stack a method top interpreter activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-
-  // See call_stub code
-  int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
-                                 WordsPerLong);    // 7 + register save area
-
-  // Save space for one monitor to get into the interpreted method in case
-  // the method is synchronized
-  int monitor_size    = method->is_synchronized() ?
-                                1*frame::interpreter_frame_monitor_size() : 0;
-  return size_activation_helper(method->max_locals(), method->max_stack(),
-                                monitor_size) + call_stub_size;
-}
-
-int AbstractInterpreter::size_activation(int max_stack,
-                                         int temps,
-                                         int extra_args,
-                                         int monitors,
-                                         int callee_params,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  // Note: This calculation must exactly parallel the frame setup
-  // in TemplateInterpreterGenerator::generate_fixed_frame.
-
-  int monitor_size           = monitors * frame::interpreter_frame_monitor_size();
-
-  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
-
-  //
-  // Note: if you look closely this appears to be doing something much different
-  // than generate_fixed_frame. What is happening is this. On sparc we have to do
-  // this dance with interpreter_sp_adjustment because the window save area would
-  // appear just below the bottom (tos) of the caller's java expression stack. Because
-  // the interpreter want to have the locals completely contiguous generate_fixed_frame
-  // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
-  // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
-  // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
-  // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
-  // because the oldest frame would have adjust its callers frame and yet that frame
-  // already exists and isn't part of this array of frames we are unpacking. So at first
-  // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
-  // will after it calculates all of the frame's on_stack_size()'s will then figure out the
-  // amount to adjust the caller of the initial (oldest) frame and the calculation will all
-  // add up. It does seem like it simpler to account for the adjustment here (and remove the
-  // callee... parameters here). However this would mean that this routine would have to take
-  // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
-  // and run the calling loop in the reverse order. This would also would appear to mean making
-  // this code aware of what the interactions are when that initial caller fram was an osr or
-  // other adapter frame. deoptimization is complicated enough and  hard enough to debug that
-  // there is no sense in messing working code.
-  //
-
-  int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
-  assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
-
-  int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
-
-  return raw_frame_size;
-}
-
-void AbstractInterpreter::layout_activation(Method* method,
-                                            int tempcount,
-                                            int popframe_extra_args,
-                                            int moncount,
-                                            int caller_actual_parameters,
-                                            int callee_param_count,
-                                            int callee_local_count,
-                                            frame* caller,
-                                            frame* interpreter_frame,
-                                            bool is_top_frame,
-                                            bool is_bottom_frame) {
-  // Set up the following variables:
-  //   - Lmethod
-  //   - Llocals
-  //   - Lmonitors (to the indicated number of monitors)
-  //   - Lesp (to the indicated number of temps)
-  // The frame caller on entry is a description of the caller of the
-  // frame we are about to layout. We are guaranteed that we will be
-  // able to fill in a new interpreter frame as its callee (i.e. the
-  // stack space is allocated and the amount was determined by an
-  // earlier call to the size_activation() method).  On return caller
-  // while describe the interpreter frame we just layed out.
-
-  // The skeleton frame must already look like an interpreter frame
-  // even if not fully filled out.
-  assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
-
-  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
-  int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
-  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
-
-  intptr_t* fp = interpreter_frame->fp();
-
-  JavaThread* thread = JavaThread::current();
-  RegisterMap map(thread, false);
-  // More verification that skeleton frame is properly walkable
-  assert(fp == caller->sp(), "fp must match");
-
-  intptr_t* montop     = fp - rounded_vm_local_words;
-
-  // preallocate monitors (cf. __ add_monitor_to_stack)
-  intptr_t* monitors = montop - monitor_size;
-
-  // preallocate stack space
-  intptr_t*  esp = monitors - 1 -
-    (tempcount * Interpreter::stackElementWords) -
-    popframe_extra_args;
-
-  int local_words = method->max_locals() * Interpreter::stackElementWords;
-  NEEDS_CLEANUP;
-  intptr_t* locals;
-  if (caller->is_interpreted_frame()) {
-    // Can force the locals area to end up properly overlapping the top of the expression stack.
-    intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
-    // Note that this computation means we replace size_of_parameters() values from the caller
-    // interpreter frame's expression stack with our argument locals
-    int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
-    locals = Lesp_ptr + parm_words;
-    int delta = local_words - parm_words;
-    int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
-    *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
-    if (!is_bottom_frame) {
-      // Llast_SP is set below for the current frame to SP (with the
-      // extra space for the callee's locals). Here we adjust
-      // Llast_SP for the caller's frame, removing the extra space
-      // for the current method's locals.
-      *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
-    } else {
-      assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
-    }
-  } else {
-    assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
-    // Don't have Lesp available; lay out locals block in the caller
-    // adjacent to the register window save area.
-    //
-    // Compiled frames do not allocate a varargs area which is why this if
-    // statement is needed.
-    //
-    if (caller->is_compiled_frame()) {
-      locals = fp + frame::register_save_words + local_words - 1;
-    } else {
-      locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
-    }
-    if (!caller->is_entry_frame()) {
-      // Caller wants his own SP back
-      int caller_frame_size = caller->cb()->frame_size();
-      *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
-    }
-  }
-  if (TraceDeoptimization) {
-    if (caller->is_entry_frame()) {
-      // make sure I5_savedSP and the entry frames notion of saved SP
-      // agree.  This assertion duplicate a check in entry frame code
-      // but catches the failure earlier.
-      assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
-             "would change callers SP");
-    }
-    if (caller->is_entry_frame()) {
-      tty->print("entry ");
-    }
-    if (caller->is_compiled_frame()) {
-      tty->print("compiled ");
-      if (caller->is_deoptimized_frame()) {
-        tty->print("(deopt) ");
-      }
-    }
-    if (caller->is_interpreted_frame()) {
-      tty->print("interpreted ");
-    }
-    tty->print_cr("caller fp=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->sp()));
-    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->sp()), p2i(caller->sp() + 16));
-    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->fp() + 16));
-    tty->print_cr("interpreter fp=" INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->sp()));
-    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->sp()), p2i(interpreter_frame->sp() + 16));
-    tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->fp() + 16));
-    tty->print_cr("Llocals = " INTPTR_FORMAT, p2i(locals));
-    tty->print_cr("Lesp = " INTPTR_FORMAT, p2i(esp));
-    tty->print_cr("Lmonitors = " INTPTR_FORMAT, p2i(monitors));
-  }
-
-  if (method->max_locals() > 0) {
-    assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
-    assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
-    assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
-    assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
-  }
-#ifdef _LP64
-  assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
-#endif
-
-  *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
-  *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
-  *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
-  *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
-  // Llast_SP will be same as SP as there is no adapter space
-  *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
-  *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
-#ifdef FAST_DISPATCH
-  *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
-#endif
-
-
-#ifdef ASSERT
-  BasicObjectLock* mp = (BasicObjectLock*)monitors;
-
-  assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
-  assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
-  assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
-  assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
-  assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
-
-  // check bounds
-  intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
-  intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
-  assert(lo < monitors && montop <= hi, "monitors in bounds");
-  assert(lo <= esp && esp < monitors, "esp in bounds");
-#endif // ASSERT
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/abstractInterpreter_x86.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "ci/ciMethod.hpp"
+#include "interpreter/interpreter.hpp"
+#include "runtime/frame.inline.hpp"
+
+
+// asm based interpreter deoptimization helpers
+int AbstractInterpreter::size_activation(int max_stack,
+                                         int temps,
+                                         int extra_args,
+                                         int monitors,
+                                         int callee_params,
+                                         int callee_locals,
+                                         bool is_top_frame) {
+  // Note: This calculation must exactly parallel the frame setup
+  // in TemplateInterpreterGenerator::generate_fixed_frame.
+
+  // fixed size of an interpreter frame:
+  int overhead = frame::sender_sp_offset -
+                 frame::interpreter_frame_initial_sp_offset;
+  // Our locals were accounted for by the caller (or last_frame_adjust
+  // on the transistion) Since the callee parameters already account
+  // for the callee's params we only need to account for the extra
+  // locals.
+  int size = overhead +
+         (callee_locals - callee_params)*Interpreter::stackElementWords +
+         monitors * frame::interpreter_frame_monitor_size() +
+         temps* Interpreter::stackElementWords + extra_args;
+
+  return size;
+}
+
+void AbstractInterpreter::layout_activation(Method* method,
+                                            int tempcount,
+                                            int popframe_extra_args,
+                                            int moncount,
+                                            int caller_actual_parameters,
+                                            int callee_param_count,
+                                            int callee_locals,
+                                            frame* caller,
+                                            frame* interpreter_frame,
+                                            bool is_top_frame,
+                                            bool is_bottom_frame) {
+  // The frame interpreter_frame is guaranteed to be the right size,
+  // as determined by a previous call to the size_activation() method.
+  // It is also guaranteed to be walkable even though it is in a
+  // skeletal state
+
+  int max_locals = method->max_locals() * Interpreter::stackElementWords;
+  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
+    Interpreter::stackElementWords;
+
+#ifdef ASSERT
+  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
+#endif
+
+  interpreter_frame->interpreter_frame_set_method(method);
+  // NOTE the difference in using sender_sp and
+  // interpreter_frame_sender_sp interpreter_frame_sender_sp is
+  // the original sp of the caller (the unextended_sp) and
+  // sender_sp is fp+8/16 (32bit/64bit) XXX
+  intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
+
+#ifdef ASSERT
+  if (caller->is_interpreted_frame()) {
+    assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
+  }
+#endif
+
+  interpreter_frame->interpreter_frame_set_locals(locals);
+  BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
+  BasicObjectLock* monbot = montop - moncount;
+  interpreter_frame->interpreter_frame_set_monitor_end(monbot);
+
+  // Set last_sp
+  intptr_t*  esp = (intptr_t*) monbot -
+    tempcount*Interpreter::stackElementWords -
+    popframe_extra_args;
+  interpreter_frame->interpreter_frame_set_last_sp(esp);
+
+  // All frames but the initial (oldest) interpreter frame we fill in have
+  // a value for sender_sp that allows walking the stack but isn't
+  // truly correct. Correct the value here.
+  if (extra_locals != 0 &&
+      interpreter_frame->sender_sp() ==
+      interpreter_frame->interpreter_frame_sender_sp()) {
+    interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
+                                                       extra_locals);
+  }
+  *interpreter_frame->interpreter_frame_cache_addr() =
+    method->constants()->cache();
+}
+
+#ifndef _LP64
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : // fall through
+    case T_LONG   : // fall through
+    case T_VOID   : i = 4; break;
+    case T_FLOAT  : i = 5; break;  // have to treat float and double separately for SSE
+    case T_DOUBLE : i = 6; break;
+    case T_OBJECT : // fall through
+    case T_ARRAY  : i = 7; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
+}
+#else
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
+         "index out of bounds");
+  return i;
+}
+#endif // _LP64
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    : // fall thru
+    case Interpreter::java_lang_math_pow     : // fall thru
+    case Interpreter::java_lang_math_exp     :
+      return false;
+    default:
+      return true;
+  }
+}
+
+// How much stack a method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  const int entry_size = frame::interpreter_frame_monitor_size();
+
+  // total overhead size: entry_size + (saved rbp thru expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
+
+#ifndef _LP64
+  const int stub_code = 4;  // see generate_call_stub
+#else
+  const int stub_code = frame::entry_frame_after_call_words;
+#endif
+
+  const int method_stack = (method->max_locals() + method->max_stack()) *
+                           Interpreter::stackElementWords;
+  return (overhead_size + method_stack + stub_code);
+}
--- a/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateInterpreterGenerator.hpp"
-
-#define __ _masm->
-
-// Abstract method entry
-// Attempt to execute abstract method. Throw exception
-address TemplateInterpreterGenerator::generate_abstract_entry(void) {
-
-  address entry_point = __ pc();
-
-  // abstract method entry
-
-  //  pop return address, reset last_sp to NULL
-  __ empty_expression_stack();
-  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
-  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
-
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
-  // the call_VM checks for exception, so we should never return here.
-  __ should_not_reach_here();
-
-  return entry_point;
-}
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateInterpreterGenerator.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-
-#define __ _masm->
-
-//------------------------------------------------------------------------------------------------------------------------
-
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-  address entry = __ pc();
-  // rbx,: method
-  // rcx: temporary
-  // rdi: pointer to locals
-  // rsp: end of copied parameters area
-  __ mov(rcx, rsp);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
-  __ ret(0);
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
-
-  // rbx,: Method*
-  // rcx: scratrch
-  // rsi: sender sp
-
-  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
-
-  address entry_point = __ pc();
-
-  // These don't need a safepoint check because they aren't virtually
-  // callable. We won't enter these intrinsics from compiled code.
-  // If in the future we added an intrinsic which was virtually callable
-  // we'd have to worry about how to safepoint so that this code is used.
-
-  // mathematical functions inlined by compiler
-  // (interpreter must provide identical implementation
-  // in order to avoid monotonicity bugs when switching
-  // from interpreter to compiler in the middle of some
-  // computation)
-  //
-  // stack: [ ret adr ] <-- rsp
-  //        [ lo(arg) ]
-  //        [ hi(arg) ]
-  //
-
-  // Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are
-  //       native methods. Interpreter::method_kind(...) does a check for
-  //       native methods first before checking for intrinsic methods and
-  //       thus will never select this entry point. Make sure it is not
-  //       called accidentally since the SharedRuntime entry points will
-  //       not work for JDK 1.2.
-  //
-  // We no longer need to check for JDK 1.2 since it's EOL'ed.
-  // The following check existed in pre 1.6 implementation,
-  //    if (Universe::is_jdk12x_version()) {
-  //      __ should_not_reach_here();
-  //    }
-  // Universe::is_jdk12x_version() always returns false since
-  // the JDK version is not yet determined when this method is called.
-  // This method is called during interpreter_init() whereas
-  // JDK version is only determined when universe2_init() is called.
-
-  // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are
-  //       java methods.  Interpreter::method_kind(...) will select
-  //       this entry point for the corresponding methods in JDK 1.3.
-  // get argument
-  __ fld_d(Address(rsp, 1*wordSize));
-  switch (kind) {
-    case Interpreter::java_lang_math_sin :
-        __ trigfunc('s');
-        break;
-    case Interpreter::java_lang_math_cos :
-        __ trigfunc('c');
-        break;
-    case Interpreter::java_lang_math_tan :
-        __ trigfunc('t');
-        break;
-    case Interpreter::java_lang_math_sqrt:
-        __ fsqrt();
-        break;
-    case Interpreter::java_lang_math_abs:
-        __ fabs();
-        break;
-    case Interpreter::java_lang_math_log:
-        __ subptr(rsp, 2 * wordSize);
-        __ fstp_d(Address(rsp, 0));
-        if (VM_Version::supports_sse2()) {
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
-        }
-        else {
-          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dlog)));
-        }
-        __ addptr(rsp, 2 * wordSize);
-        break;
-    case Interpreter::java_lang_math_log10:
-        __ flog10();
-        // Store to stack to convert 80bit precision back to 64bits
-        __ push_fTOS();
-        __ pop_fTOS();
-        break;
-    case Interpreter::java_lang_math_pow:
-      __ fld_d(Address(rsp, 3*wordSize)); // second argument
-      __ pow_with_fallback(0);
-      // Store to stack to convert 80bit precision back to 64bits
-      __ push_fTOS();
-      __ pop_fTOS();
-      break;
-    case Interpreter::java_lang_math_exp:
-      __ subptr(rsp, 2*wordSize);
-      __ fstp_d(Address(rsp, 0));
-      if (VM_Version::supports_sse2()) {
-        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
-      } else {
-        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dexp)));
-      }
-      __ addptr(rsp, 2*wordSize);
-    break;
-    default                              :
-        ShouldNotReachHere();
-  }
-
-  // return double result in xmm0 for interpreter and compilers.
-  if (UseSSE >= 2) {
-    __ subptr(rsp, 2*wordSize);
-    __ fstp_d(Address(rsp, 0));
-    __ movdbl(xmm0, Address(rsp, 0));
-    __ addptr(rsp, 2*wordSize);
-  }
-
-  // done, result in FPU ST(0) or XMM0
-  __ pop(rdi);                               // get return address
-  __ mov(rsp, rsi);                          // set sp to sender sp
-  __ jmp(rdi);
-
-  return entry_point;
-}
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,299 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateInterpreterGenerator.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-
-#define __ _masm->
-
-#ifdef _WIN64
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-  address entry = __ pc();
-
-  // rbx: method
-  // r14: pointer to locals
-  // c_rarg3: first stack arg - wordSize
-  __ mov(c_rarg3, rsp);
-  // adjust rsp
-  __ subptr(rsp, 4 * wordSize);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::slow_signature_handler),
-             rbx, r14, c_rarg3);
-
-  // rax: result handler
-
-  // Stack layout:
-  // rsp: 3 integer or float args (if static first is unused)
-  //      1 float/double identifiers
-  //        return address
-  //        stack args
-  //        garbage
-  //        expression stack bottom
-  //        bcp (NULL)
-  //        ...
-
-  // Do FP first so we can use c_rarg3 as temp
-  __ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers
-
-  for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) {
-    XMMRegister floatreg = as_XMMRegister(i+1);
-    Label isfloatordouble, isdouble, next;
-
-    __ testl(c_rarg3, 1 << (i*2));      // Float or Double?
-    __ jcc(Assembler::notZero, isfloatordouble);
-
-    // Do Int register here
-    switch ( i ) {
-      case 0:
-        __ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
-        __ testl(rscratch1, JVM_ACC_STATIC);
-        __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
-        break;
-      case 1:
-        __ movptr(c_rarg2, Address(rsp, wordSize));
-        break;
-      case 2:
-        __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
-        break;
-      default:
-        break;
-    }
-
-    __ jmp (next);
-
-    __ bind(isfloatordouble);
-    __ testl(c_rarg3, 1 << ((i*2)+1));     // Double?
-    __ jcc(Assembler::notZero, isdouble);
-
-// Do Float Here
-    __ movflt(floatreg, Address(rsp, i * wordSize));
-    __ jmp(next);
-
-// Do Double here
-    __ bind(isdouble);
-    __ movdbl(floatreg, Address(rsp, i * wordSize));
-
-    __ bind(next);
-  }
-
-
-  // restore rsp
-  __ addptr(rsp, 4 * wordSize);
-
-  __ ret(0);
-
-  return entry;
-}
-#else
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-  address entry = __ pc();
-
-  // rbx: method
-  // r14: pointer to locals
-  // c_rarg3: first stack arg - wordSize
-  __ mov(c_rarg3, rsp);
-  // adjust rsp
-  __ subptr(rsp, 14 * wordSize);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::slow_signature_handler),
-             rbx, r14, c_rarg3);
-
-  // rax: result handler
-
-  // Stack layout:
-  // rsp: 5 integer args (if static first is unused)
-  //      1 float/double identifiers
-  //      8 double args
-  //        return address
-  //        stack args
-  //        garbage
-  //        expression stack bottom
-  //        bcp (NULL)
-  //        ...
-
-  // Do FP first so we can use c_rarg3 as temp
-  __ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers
-
-  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
-    const XMMRegister r = as_XMMRegister(i);
-
-    Label d, done;
-
-    __ testl(c_rarg3, 1 << i);
-    __ jcc(Assembler::notZero, d);
-    __ movflt(r, Address(rsp, (6 + i) * wordSize));
-    __ jmp(done);
-    __ bind(d);
-    __ movdbl(r, Address(rsp, (6 + i) * wordSize));
-    __ bind(done);
-  }
-
-  // Now handle integrals.  Only do c_rarg1 if not static.
-  __ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
-  __ testl(c_rarg3, JVM_ACC_STATIC);
-  __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
-
-  __ movptr(c_rarg2, Address(rsp, wordSize));
-  __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
-  __ movptr(c_rarg4, Address(rsp, 3 * wordSize));
-  __ movptr(c_rarg5, Address(rsp, 4 * wordSize));
-
-  // restore rsp
-  __ addptr(rsp, 14 * wordSize);
-
-  __ ret(0);
-
-  return entry;
-}
-#endif
-
-
-//
-// Various method entries
-//
-
-address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
-
-  // rbx,: Method*
-  // rcx: scratrch
-  // r13: sender sp
-
-  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
-
-  address entry_point = __ pc();
-
-  // These don't need a safepoint check because they aren't virtually
-  // callable. We won't enter these intrinsics from compiled code.
-  // If in the future we added an intrinsic which was virtually callable
-  // we'd have to worry about how to safepoint so that this code is used.
-
-  // mathematical functions inlined by compiler
-  // (interpreter must provide identical implementation
-  // in order to avoid monotonicity bugs when switching
-  // from interpreter to compiler in the middle of some
-  // computation)
-  //
-  // stack: [ ret adr ] <-- rsp
-  //        [ lo(arg) ]
-  //        [ hi(arg) ]
-  //
-
-  // Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are
-  //       native methods. Interpreter::method_kind(...) does a check for
-  //       native methods first before checking for intrinsic methods and
-  //       thus will never select this entry point. Make sure it is not
-  //       called accidentally since the SharedRuntime entry points will
-  //       not work for JDK 1.2.
-  //
-  // We no longer need to check for JDK 1.2 since it's EOL'ed.
-  // The following check existed in pre 1.6 implementation,
-  //    if (Universe::is_jdk12x_version()) {
-  //      __ should_not_reach_here();
-  //    }
-  // Universe::is_jdk12x_version() always returns false since
-  // the JDK version is not yet determined when this method is called.
-  // This method is called during interpreter_init() whereas
-  // JDK version is only determined when universe2_init() is called.
-
-  // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are
-  //       java methods.  Interpreter::method_kind(...) will select
-  //       this entry point for the corresponding methods in JDK 1.3.
-  // get argument
-
-  if (kind == Interpreter::java_lang_math_sqrt) {
-    __ sqrtsd(xmm0, Address(rsp, wordSize));
-  } else if (kind == Interpreter::java_lang_math_exp) {
-    __ movdbl(xmm0, Address(rsp, wordSize));
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
-  } else if (kind == Interpreter::java_lang_math_log) {
-    __ movdbl(xmm0, Address(rsp, wordSize));
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
-  } else {
-    __ fld_d(Address(rsp, wordSize));
-    switch (kind) {
-      case Interpreter::java_lang_math_sin :
-          __ trigfunc('s');
-          break;
-      case Interpreter::java_lang_math_cos :
-          __ trigfunc('c');
-          break;
-      case Interpreter::java_lang_math_tan :
-          __ trigfunc('t');
-          break;
-      case Interpreter::java_lang_math_abs:
-          __ fabs();
-          break;
-      case Interpreter::java_lang_math_log10:
-          __ flog10();
-          break;
-      case Interpreter::java_lang_math_pow:
-          __ fld_d(Address(rsp, 3*wordSize)); // second argument (one
-                                              // empty stack slot)
-          __ pow_with_fallback(0);
-          break;
-      default                              :
-          ShouldNotReachHere();
-    }
-
-    // return double result in xmm0 for interpreter and compilers.
-    __ subptr(rsp, 2*wordSize);
-    // Round to 64bit precision
-    __ fstp_d(Address(rsp, 0));
-    __ movdbl(xmm0, Address(rsp, 0));
-    __ addptr(rsp, 2*wordSize);
-  }
-
-
-  __ pop(rax);
-  __ mov(rsp, r13);
-  __ jmp(rax);
-
-  return entry_point;
-}
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,17 @@
 
 #define __ _masm->
 
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+#ifdef AMD64
+int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
+#else
+int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
+#endif // AMD64
+
 // Global Register Names
 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
@@ -57,6 +68,7 @@
 const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
 
+
 //-----------------------------------------------------------------------------
 
 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
@@ -778,6 +790,30 @@
   return NULL;
 }
 
+// TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
+// generate exception.  Windows might need this to map the shadow pages though.
+void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
+  // Quick & dirty stack overflow checking: bang the stack & handle trap.
+  // Note that we do the banging after the frame is setup, since the exception
+  // handling code expects to find a valid interpreter frame on the stack.
+  // Doing the banging earlier fails if the caller frame is not an interpreter
+  // frame.
+  // (Also, the exception throwing code expects to unlock any synchronized
+  // method receiever, so do the banging after locking the receiver.)
+
+  // Bang each page in the shadow zone. We can't assume it's been done for
+  // an interpreter frame with greater than a page of locals, so each page
+  // needs to be checked.  Only true for non-native.
+  if (UseStackBanging) {
+    const int page_size = os::vm_page_size();
+    const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
+    const int start_page = native_call ? n_shadow_pages : 1;
+    for (int pages = start_page; pages <= n_shadow_pages; pages++) {
+      __ bang_stack_with_offset(pages*page_size);
+    }
+  }
+}
+
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the
 // native method than the typical interpreter frame setup.
@@ -1304,6 +1340,27 @@
   return entry_point;
 }
 
+// Abstract method entry
+// Attempt to execute abstract method. Throw exception
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
+
+  address entry_point = __ pc();
+
+  // abstract method entry
+
+  //  pop return address, reset last_sp to NULL
+  __ empty_expression_stack();
+  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
+  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
+
+  // throw exception
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+  // the call_VM checks for exception, so we should never return here.
+  __ should_not_reach_here();
+
+  return entry_point;
+}
+
 //
 // Generic interpreted method entry to (asm) interpreter
 //
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,12 +26,26 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/templateInterpreterGenerator.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/sharedRuntime.hpp"
 
 #define __ _masm->
 
 
+address TemplateInterpreterGenerator::generate_slow_signature_handler() {
+  address entry = __ pc();
+  // rbx,: method
+  // rcx: temporary
+  // rdi: pointer to locals
+  // rsp: end of copied parameters area
+  __ mov(rcx, rsp);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
+  __ ret(0);
+  return entry;
+}
+
 /**
  * Method entry for static native methods:
  *   int java.util.zip.CRC32.update(int crc, int b)
@@ -301,3 +315,100 @@
 
   return NULL;
 }
+
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+
+  // rbx,: Method*
+  // rcx: scratrch
+  // rsi: sender sp
+
+  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
+
+  address entry_point = __ pc();
+
+  // These don't need a safepoint check because they aren't virtually
+  // callable. We won't enter these intrinsics from compiled code.
+  // If in the future we added an intrinsic which was virtually callable
+  // we'd have to worry about how to safepoint so that this code is used.
+
+  // mathematical functions inlined by compiler
+  // (interpreter must provide identical implementation
+  // in order to avoid monotonicity bugs when switching
+  // from interpreter to compiler in the middle of some
+  // computation)
+  //
+  // stack: [ ret adr ] <-- rsp
+  //        [ lo(arg) ]
+  //        [ hi(arg) ]
+  //
+
+  __ fld_d(Address(rsp, 1*wordSize));
+  switch (kind) {
+    case Interpreter::java_lang_math_sin :
+        __ trigfunc('s');
+        break;
+    case Interpreter::java_lang_math_cos :
+        __ trigfunc('c');
+        break;
+    case Interpreter::java_lang_math_tan :
+        __ trigfunc('t');
+        break;
+    case Interpreter::java_lang_math_sqrt:
+        __ fsqrt();
+        break;
+    case Interpreter::java_lang_math_abs:
+        __ fabs();
+        break;
+    case Interpreter::java_lang_math_log:
+        __ subptr(rsp, 2 * wordSize);
+        __ fstp_d(Address(rsp, 0));
+        if (VM_Version::supports_sse2()) {
+          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
+        }
+        else {
+          __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dlog)));
+        }
+        __ addptr(rsp, 2 * wordSize);
+        break;
+    case Interpreter::java_lang_math_log10:
+        __ flog10();
+        // Store to stack to convert 80bit precision back to 64bits
+        __ push_fTOS();
+        __ pop_fTOS();
+        break;
+    case Interpreter::java_lang_math_pow:
+      __ fld_d(Address(rsp, 3*wordSize)); // second argument
+      __ pow_with_fallback(0);
+      // Store to stack to convert 80bit precision back to 64bits
+      __ push_fTOS();
+      __ pop_fTOS();
+      break;
+    case Interpreter::java_lang_math_exp:
+      __ subptr(rsp, 2*wordSize);
+      __ fstp_d(Address(rsp, 0));
+      if (VM_Version::supports_sse2()) {
+        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
+      } else {
+        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dexp)));
+      }
+      __ addptr(rsp, 2*wordSize);
+    break;
+    default                              :
+        ShouldNotReachHere();
+  }
+
+  // return double result in xmm0 for interpreter and compilers.
+  if (UseSSE >= 2) {
+    __ subptr(rsp, 2*wordSize);
+    __ fstp_d(Address(rsp, 0));
+    __ movdbl(xmm0, Address(rsp, 0));
+    __ addptr(rsp, 2*wordSize);
+  }
+
+  // done, result in FPU ST(0) or XMM0
+  __ pop(rdi);                               // get return address
+  __ mov(rsp, rsi);                          // set sp to sender sp
+  __ jmp(rdi);
+
+  return entry_point;
+}
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,155 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/templateInterpreterGenerator.hpp"
 #include "runtime/arguments.hpp"
 
 #define __ _masm->
 
+#ifdef _WIN64
+address TemplateInterpreterGenerator::generate_slow_signature_handler() {
+  address entry = __ pc();
+
+  // rbx: method
+  // r14: pointer to locals
+  // c_rarg3: first stack arg - wordSize
+  __ mov(c_rarg3, rsp);
+  // adjust rsp
+  __ subptr(rsp, 4 * wordSize);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::slow_signature_handler),
+             rbx, r14, c_rarg3);
+
+  // rax: result handler
+
+  // Stack layout:
+  // rsp: 3 integer or float args (if static first is unused)
+  //      1 float/double identifiers
+  //        return address
+  //        stack args
+  //        garbage
+  //        expression stack bottom
+  //        bcp (NULL)
+  //        ...
+
+  // Do FP first so we can use c_rarg3 as temp
+  __ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers
+
+  for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) {
+    XMMRegister floatreg = as_XMMRegister(i+1);
+    Label isfloatordouble, isdouble, next;
+
+    __ testl(c_rarg3, 1 << (i*2));      // Float or Double?
+    __ jcc(Assembler::notZero, isfloatordouble);
+
+    // Do Int register here
+    switch ( i ) {
+      case 0:
+        __ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
+        __ testl(rscratch1, JVM_ACC_STATIC);
+        __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
+        break;
+      case 1:
+        __ movptr(c_rarg2, Address(rsp, wordSize));
+        break;
+      case 2:
+        __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
+        break;
+      default:
+        break;
+    }
+
+    __ jmp (next);
+
+    __ bind(isfloatordouble);
+    __ testl(c_rarg3, 1 << ((i*2)+1));     // Double?
+    __ jcc(Assembler::notZero, isdouble);
+
+// Do Float Here
+    __ movflt(floatreg, Address(rsp, i * wordSize));
+    __ jmp(next);
+
+// Do Double here
+    __ bind(isdouble);
+    __ movdbl(floatreg, Address(rsp, i * wordSize));
+
+    __ bind(next);
+  }
+
+
+  // restore rsp
+  __ addptr(rsp, 4 * wordSize);
+
+  __ ret(0);
+
+  return entry;
+}
+#else
+address TemplateInterpreterGenerator::generate_slow_signature_handler() {
+  address entry = __ pc();
+
+  // rbx: method
+  // r14: pointer to locals
+  // c_rarg3: first stack arg - wordSize
+  __ mov(c_rarg3, rsp);
+  // adjust rsp
+  __ subptr(rsp, 14 * wordSize);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::slow_signature_handler),
+             rbx, r14, c_rarg3);
+
+  // rax: result handler
+
+  // Stack layout:
+  // rsp: 5 integer args (if static first is unused)
+  //      1 float/double identifiers
+  //      8 double args
+  //        return address
+  //        stack args
+  //        garbage
+  //        expression stack bottom
+  //        bcp (NULL)
+  //        ...
+
+  // Do FP first so we can use c_rarg3 as temp
+  __ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers
+
+  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
+    const XMMRegister r = as_XMMRegister(i);
+
+    Label d, done;
+
+    __ testl(c_rarg3, 1 << i);
+    __ jcc(Assembler::notZero, d);
+    __ movflt(r, Address(rsp, (6 + i) * wordSize));
+    __ jmp(done);
+    __ bind(d);
+    __ movdbl(r, Address(rsp, (6 + i) * wordSize));
+    __ bind(done);
+  }
+
+  // Now handle integrals.  Only do c_rarg1 if not static.
+  __ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
+  __ testl(c_rarg3, JVM_ACC_STATIC);
+  __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
+
+  __ movptr(c_rarg2, Address(rsp, wordSize));
+  __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
+  __ movptr(c_rarg4, Address(rsp, 3 * wordSize));
+  __ movptr(c_rarg5, Address(rsp, 4 * wordSize));
+
+  // restore rsp
+  __ addptr(rsp, 14 * wordSize);
+
+  __ ret(0);
+
+  return entry;
+}
+#endif  // __WIN64
+
 /**
  * Method entry for static native methods:
  *   int java.util.zip.CRC32.update(int crc, int b)
@@ -193,3 +337,85 @@
 
   return NULL;
 }
+
+//
+// Various method entries
+//
+
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+
+  // rbx,: Method*
+  // rcx: scratrch
+  // r13: sender sp
+
+  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
+
+  address entry_point = __ pc();
+
+  // These don't need a safepoint check because they aren't virtually
+  // callable. We won't enter these intrinsics from compiled code.
+  // If in the future we added an intrinsic which was virtually callable
+  // we'd have to worry about how to safepoint so that this code is used.
+
+  // mathematical functions inlined by compiler
+  // (interpreter must provide identical implementation
+  // in order to avoid monotonicity bugs when switching
+  // from interpreter to compiler in the middle of some
+  // computation)
+  //
+  // stack: [ ret adr ] <-- rsp
+  //        [ lo(arg) ]
+  //        [ hi(arg) ]
+  //
+
+
+  if (kind == Interpreter::java_lang_math_sqrt) {
+    __ sqrtsd(xmm0, Address(rsp, wordSize));
+  } else if (kind == Interpreter::java_lang_math_exp) {
+    __ movdbl(xmm0, Address(rsp, wordSize));
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
+  } else if (kind == Interpreter::java_lang_math_log) {
+    __ movdbl(xmm0, Address(rsp, wordSize));
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
+  } else {
+    __ fld_d(Address(rsp, wordSize));
+    switch (kind) {
+      case Interpreter::java_lang_math_sin :
+          __ trigfunc('s');
+          break;
+      case Interpreter::java_lang_math_cos :
+          __ trigfunc('c');
+          break;
+      case Interpreter::java_lang_math_tan :
+          __ trigfunc('t');
+          break;
+      case Interpreter::java_lang_math_abs:
+          __ fabs();
+          break;
+      case Interpreter::java_lang_math_log10:
+          __ flog10();
+          break;
+      case Interpreter::java_lang_math_pow:
+          __ fld_d(Address(rsp, 3*wordSize)); // second argument (one
+                                              // empty stack slot)
+          __ pow_with_fallback(0);
+          break;
+      default                              :
+          ShouldNotReachHere();
+    }
+
+    // return double result in xmm0 for interpreter and compilers.
+    __ subptr(rsp, 2*wordSize);
+    // Round to 64bit precision
+    __ fstp_d(Address(rsp, 0));
+    __ movdbl(xmm0, Address(rsp, 0));
+    __ addptr(rsp, 2*wordSize);
+  }
+
+
+  __ pop(rax);
+  __ mov(rsp, r13);
+  __ jmp(rax);
+
+  return entry_point;
+}
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "ci/ciMethod.hpp"
-#include "interpreter/interpreter.hpp"
-#include "runtime/frame.inline.hpp"
-
-// Size of interpreter code.  Increase if too small.  Interpreter will
-// fail with a guarantee ("not enough space for interpreter generation");
-// if too small.
-// Run with +PrintInterpreter to get the VM to print out the size.
-// Max size with JVMTI
-#ifdef AMD64
-int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
-#else
-int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
-#endif // AMD64
-
-// asm based interpreter deoptimization helpers
-int AbstractInterpreter::size_activation(int max_stack,
-                                         int temps,
-                                         int extra_args,
-                                         int monitors,
-                                         int callee_params,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  // Note: This calculation must exactly parallel the frame setup
-  // in TemplateInterpreterGenerator::generate_fixed_frame.
-
-  // fixed size of an interpreter frame:
-  int overhead = frame::sender_sp_offset -
-                 frame::interpreter_frame_initial_sp_offset;
-  // Our locals were accounted for by the caller (or last_frame_adjust
-  // on the transistion) Since the callee parameters already account
-  // for the callee's params we only need to account for the extra
-  // locals.
-  int size = overhead +
-         (callee_locals - callee_params)*Interpreter::stackElementWords +
-         monitors * frame::interpreter_frame_monitor_size() +
-         temps* Interpreter::stackElementWords + extra_args;
-
-  return size;
-}
-
-void AbstractInterpreter::layout_activation(Method* method,
-                                            int tempcount,
-                                            int popframe_extra_args,
-                                            int moncount,
-                                            int caller_actual_parameters,
-                                            int callee_param_count,
-                                            int callee_locals,
-                                            frame* caller,
-                                            frame* interpreter_frame,
-                                            bool is_top_frame,
-                                            bool is_bottom_frame) {
-  // The frame interpreter_frame is guaranteed to be the right size,
-  // as determined by a previous call to the size_activation() method.
-  // It is also guaranteed to be walkable even though it is in a
-  // skeletal state
-
-  int max_locals = method->max_locals() * Interpreter::stackElementWords;
-  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
-    Interpreter::stackElementWords;
-
-#ifdef ASSERT
-  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
-#endif
-
-  interpreter_frame->interpreter_frame_set_method(method);
-  // NOTE the difference in using sender_sp and
-  // interpreter_frame_sender_sp interpreter_frame_sender_sp is
-  // the original sp of the caller (the unextended_sp) and
-  // sender_sp is fp+8/16 (32bit/64bit) XXX
-  intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
-
-#ifdef ASSERT
-  if (caller->is_interpreted_frame()) {
-    assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
-  }
-#endif
-
-  interpreter_frame->interpreter_frame_set_locals(locals);
-  BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
-  BasicObjectLock* monbot = montop - moncount;
-  interpreter_frame->interpreter_frame_set_monitor_end(monbot);
-
-  // Set last_sp
-  intptr_t*  esp = (intptr_t*) monbot -
-    tempcount*Interpreter::stackElementWords -
-    popframe_extra_args;
-  interpreter_frame->interpreter_frame_set_last_sp(esp);
-
-  // All frames but the initial (oldest) interpreter frame we fill in have
-  // a value for sender_sp that allows walking the stack but isn't
-  // truly correct. Correct the value here.
-  if (extra_locals != 0 &&
-      interpreter_frame->sender_sp() ==
-      interpreter_frame->interpreter_frame_sender_sp()) {
-    interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
-                                                       extra_locals);
-  }
-  *interpreter_frame->interpreter_frame_cache_addr() =
-    method->constants()->cache();
-}
-
-#ifndef _LP64
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : // fall through
-    case T_LONG   : // fall through
-    case T_VOID   : i = 4; break;
-    case T_FLOAT  : i = 5; break;  // have to treat float and double separately for SSE
-    case T_DOUBLE : i = 6; break;
-    case T_OBJECT : // fall through
-    case T_ARRAY  : i = 7; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-#else
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
-         "index out of bounds");
-  return i;
-}
-#endif // _LP64
-
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     :
-      return false;
-    default:
-      return true;
-  }
-}
-
-// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-  const int entry_size = frame::interpreter_frame_monitor_size();
-
-  // total overhead size: entry_size + (saved rbp thru expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
-
-#ifndef _LP64
-  const int stub_code = 4;  // see generate_call_stub
-#else
-  const int stub_code = frame::entry_frame_after_call_words;
-#endif
-
-  const int method_stack = (method->max_locals() + method->max_stack()) *
-                           Interpreter::stackElementWords;
-  return (overhead_size + method_stack + stub_code);
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/zero/vm/abstractInterpreter_zero.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/bytecodeInterpreter.hpp"
+#include "interpreter/cppInterpreter.hpp"
+#include "runtime/frame.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  return true;
+}
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
+         "index out of bounds");
+  return i;
+}
+
+// Deoptimization helpers
+
+int AbstractInterpreter::size_activation(int       max_stack,
+                                         int       tempcount,
+                                         int       extra_args,
+                                         int       moncount,
+                                         int       callee_param_count,
+                                         int       callee_locals,
+                                         bool      is_top_frame) {
+  int header_words        = InterpreterFrame::header_words;
+  int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
+  int stack_words         = is_top_frame ? max_stack : tempcount;
+  int callee_extra_locals = callee_locals - callee_param_count;
+
+  return header_words + monitor_words + stack_words + callee_extra_locals;
+}
+
+void AbstractInterpreter::layout_activation(Method* method,
+                                            int       tempcount,
+                                            int       popframe_extra_args,
+                                            int       moncount,
+                                            int       caller_actual_parameters,
+                                            int       callee_param_count,
+                                            int       callee_locals,
+                                            frame*    caller,
+                                            frame*    interpreter_frame,
+                                            bool      is_top_frame,
+                                            bool      is_bottom_frame) {
+  assert(popframe_extra_args == 0, "what to do?");
+  assert(!is_top_frame || (!callee_locals && !callee_param_count),
+         "top frame should have no caller");
+
+  // This code must exactly match what InterpreterFrame::build
+  // does (the full InterpreterFrame::build, that is, not the
+  // one that creates empty frames for the deoptimizer).
+  //
+  // interpreter_frame will be filled in.  It's size is determined by
+  // a previous call to the size_activation() method,
+  //
+  // Note that tempcount is the current size of the expression
+  // stack.  For top most frames we will allocate a full sized
+  // expression stack and not the trimmed version that non-top
+  // frames have.
+
+  int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
+  intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
+  interpreterState istate = interpreter_frame->get_interpreterState();
+  intptr_t *monitor_base  = (intptr_t*) istate;
+  intptr_t *stack_base    = monitor_base - monitor_words;
+  intptr_t *stack         = stack_base - tempcount - 1;
+
+  BytecodeInterpreter::layout_interpreterState(istate,
+                                               caller,
+                                               NULL,
+                                               method,
+                                               locals,
+                                               stack,
+                                               stack_base,
+                                               monitor_base,
+                                               NULL,
+                                               is_top_frame);
+}
+
+// Helper for (runtime) stack overflow checks
+
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  return 0;
+}
--- a/hotspot/src/cpu/zero/vm/bytecodeInterpreter_zero.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/zero/vm/bytecodeInterpreter_zero.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -25,7 +25,6 @@
 
 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
-#include "interp_masm_zero.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/bytecodeInterpreter.inline.hpp"
 #include "interpreter/interpreter.hpp"
@@ -33,8 +32,6 @@
 #include "oops/methodData.hpp"
 #include "oops/method.hpp"
 #include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -68,4 +65,40 @@
   return NULL;
 }
 
+void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
+                                                  frame*    caller,
+                                                  frame*    current,
+                                                  Method* method,
+                                                  intptr_t* locals,
+                                                  intptr_t* stack,
+                                                  intptr_t* stack_base,
+                                                  intptr_t* monitor_base,
+                                                  intptr_t* frame_bottom,
+                                                  bool      is_top_frame) {
+  istate->set_locals(locals);
+  istate->set_method(method);
+  istate->set_self_link(istate);
+  istate->set_prev_link(NULL);
+  // thread will be set by a hacky repurposing of frame::patch_pc()
+  // bcp will be set by vframeArrayElement::unpack_on_stack()
+  istate->set_constants(method->constants()->cache());
+  istate->set_msg(BytecodeInterpreter::method_resume);
+  istate->set_bcp_advance(0);
+  istate->set_oop_temp(NULL);
+  istate->set_mdx(NULL);
+  if (caller->is_interpreted_frame()) {
+    interpreterState prev = caller->get_interpreterState();
+    prev->set_callee(method);
+    if (*prev->bcp() == Bytecodes::_invokeinterface)
+      prev->set_bcp_advance(5);
+    else
+      prev->set_bcp_advance(3);
+  }
+  istate->set_callee(NULL);
+  istate->set_monitor_base((BasicObjectLock *) monitor_base);
+  istate->set_stack_base(stack_base);
+  istate->set_stack(stack);
+  istate->set_stack_limit(stack_base - method->max_stack() - 1);
+}
+
 #endif // CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/cppInterpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "oops/method.hpp"
+#include "runtime/arguments.hpp"
+#include "interpreter/cppInterpreter.hpp"
+
+address CppInterpreterGenerator::generate_slow_signature_handler() {
+  _masm->advance(1);
+  return (address) InterpreterRuntime::slow_signature_handler;
+}
+
+address CppInterpreterGenerator::generate_math_entry(
+    AbstractInterpreter::MethodKind kind) {
+  if (!InlineIntrinsics)
+    return NULL;
+
+  Unimplemented();
+  return NULL;
+}
+
+address CppInterpreterGenerator::generate_abstract_entry() {
+  return generate_entry((address) ShouldNotCallThisEntry());
+}
+
+address CppInterpreterGenerator::generate_empty_entry() {
+  if (!UseFastEmptyMethods)
+    return NULL;
+
+  return generate_entry((address) CppInterpreter::empty_entry);
+}
+
+address CppInterpreterGenerator::generate_accessor_entry() {
+  if (!UseFastAccessorMethods)
+    return NULL;
+
+  return generate_entry((address) CppInterpreter::accessor_entry);
+}
+
+address CppInterpreterGenerator::generate_Reference_get_entry(void) {
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    // We need to generate have a routine that generates code to:
+    //   * load the value in the referent field
+    //   * passes that value to the pre-barrier.
+    //
+    // In the case of G1 this will record the value of the
+    // referent in an SATB buffer if marking is active.
+    // This will cause concurrent marking to mark the referent
+    // field as live.
+    Unimplemented();
+  }
+#endif // INCLUDE_ALL_GCS
+
+  // If G1 is not enabled then attempt to go through the normal entry point
+  // Reference.get could be instrumented by jvmti
+  return NULL;
+}
+
+address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
+  return generate_entry((address) CppInterpreter::native_entry);
+}
+
+address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
+  return generate_entry((address) CppInterpreter::normal_entry);
+}
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -747,92 +747,6 @@
   return (InterpreterFrame *) fp;
 }
 
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
-         "index out of bounds");
-  return i;
-}
-
-BasicType CppInterpreter::result_type_of(Method* method) {
-  BasicType t;
-  switch (method->result_index()) {
-    case 0 : t = T_BOOLEAN; break;
-    case 1 : t = T_CHAR;    break;
-    case 2 : t = T_BYTE;    break;
-    case 3 : t = T_SHORT;   break;
-    case 4 : t = T_INT;     break;
-    case 5 : t = T_LONG;    break;
-    case 6 : t = T_VOID;    break;
-    case 7 : t = T_FLOAT;   break;
-    case 8 : t = T_DOUBLE;  break;
-    case 9 : t = T_OBJECT;  break;
-    default: ShouldNotReachHere();
-  }
-  assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
-         "out of step with AbstractInterpreter::BasicType_as_index");
-  return t;
-}
-
-address CppInterpreterGenerator::generate_empty_entry() {
-  if (!UseFastEmptyMethods)
-    return NULL;
-
-  return generate_entry((address) CppInterpreter::empty_entry);
-}
-
-address CppInterpreterGenerator::generate_accessor_entry() {
-  if (!UseFastAccessorMethods)
-    return NULL;
-
-  return generate_entry((address) CppInterpreter::accessor_entry);
-}
-
-address CppInterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  if (UseG1GC) {
-    // We need to generate have a routine that generates code to:
-    //   * load the value in the referent field
-    //   * passes that value to the pre-barrier.
-    //
-    // In the case of G1 this will record the value of the
-    // referent in an SATB buffer if marking is active.
-    // This will cause concurrent marking to mark the referent
-    // field as live.
-    Unimplemented();
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the normal entry point
-  // Reference.get could be instrumented by jvmti
-  return NULL;
-}
-
-address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
-  return generate_entry((address) CppInterpreter::native_entry);
-}
-
-address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
-  return generate_entry((address) CppInterpreter::normal_entry);
-}
-
-
-// Deoptimization helpers
-
 InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
   ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
 
@@ -858,101 +772,24 @@
   return (InterpreterFrame *) fp;
 }
 
-int AbstractInterpreter::size_activation(int       max_stack,
-                                         int       tempcount,
-                                         int       extra_args,
-                                         int       moncount,
-                                         int       callee_param_count,
-                                         int       callee_locals,
-                                         bool      is_top_frame) {
-  int header_words        = InterpreterFrame::header_words;
-  int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
-  int stack_words         = is_top_frame ? max_stack : tempcount;
-  int callee_extra_locals = callee_locals - callee_param_count;
-
-  return header_words + monitor_words + stack_words + callee_extra_locals;
-}
-
-void AbstractInterpreter::layout_activation(Method* method,
-                                            int       tempcount,
-                                            int       popframe_extra_args,
-                                            int       moncount,
-                                            int       caller_actual_parameters,
-                                            int       callee_param_count,
-                                            int       callee_locals,
-                                            frame*    caller,
-                                            frame*    interpreter_frame,
-                                            bool      is_top_frame,
-                                            bool      is_bottom_frame) {
-  assert(popframe_extra_args == 0, "what to do?");
-  assert(!is_top_frame || (!callee_locals && !callee_param_count),
-         "top frame should have no caller");
-
-  // This code must exactly match what InterpreterFrame::build
-  // does (the full InterpreterFrame::build, that is, not the
-  // one that creates empty frames for the deoptimizer).
-  //
-  // interpreter_frame will be filled in.  It's size is determined by
-  // a previous call to the size_activation() method,
-  //
-  // Note that tempcount is the current size of the expression
-  // stack.  For top most frames we will allocate a full sized
-  // expression stack and not the trimmed version that non-top
-  // frames have.
-
-  int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
-  intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
-  interpreterState istate = interpreter_frame->get_interpreterState();
-  intptr_t *monitor_base  = (intptr_t*) istate;
-  intptr_t *stack_base    = monitor_base - monitor_words;
-  intptr_t *stack         = stack_base - tempcount - 1;
-
-  BytecodeInterpreter::layout_interpreterState(istate,
-                                               caller,
-                                               NULL,
-                                               method,
-                                               locals,
-                                               stack,
-                                               stack_base,
-                                               monitor_base,
-                                               NULL,
-                                               is_top_frame);
-}
-
-void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
-                                                  frame*    caller,
-                                                  frame*    current,
-                                                  Method* method,
-                                                  intptr_t* locals,
-                                                  intptr_t* stack,
-                                                  intptr_t* stack_base,
-                                                  intptr_t* monitor_base,
-                                                  intptr_t* frame_bottom,
-                                                  bool      is_top_frame) {
-  istate->set_locals(locals);
-  istate->set_method(method);
-  istate->set_self_link(istate);
-  istate->set_prev_link(NULL);
-  // thread will be set by a hacky repurposing of frame::patch_pc()
-  // bcp will be set by vframeArrayElement::unpack_on_stack()
-  istate->set_constants(method->constants()->cache());
-  istate->set_msg(BytecodeInterpreter::method_resume);
-  istate->set_bcp_advance(0);
-  istate->set_oop_temp(NULL);
-  istate->set_mdx(NULL);
-  if (caller->is_interpreted_frame()) {
-    interpreterState prev = caller->get_interpreterState();
-    prev->set_callee(method);
-    if (*prev->bcp() == Bytecodes::_invokeinterface)
-      prev->set_bcp_advance(5);
-    else
-      prev->set_bcp_advance(3);
+BasicType CppInterpreter::result_type_of(Method* method) {
+  BasicType t;
+  switch (method->result_index()) {
+    case 0 : t = T_BOOLEAN; break;
+    case 1 : t = T_CHAR;    break;
+    case 2 : t = T_BYTE;    break;
+    case 3 : t = T_SHORT;   break;
+    case 4 : t = T_INT;     break;
+    case 5 : t = T_LONG;    break;
+    case 6 : t = T_VOID;    break;
+    case 7 : t = T_FLOAT;   break;
+    case 8 : t = T_DOUBLE;  break;
+    case 9 : t = T_OBJECT;  break;
+    default: ShouldNotReachHere();
   }
-  istate->set_callee(NULL);
-  istate->set_monitor_base((BasicObjectLock *) monitor_base);
-  istate->set_stack_base(stack_base);
-  istate->set_stack(stack);
-  istate->set_stack_limit(stack_base - method->max_stack() - 1);
+  assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
+         "out of step with AbstractInterpreter::BasicType_as_index");
+  return t;
 }
 
 address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
@@ -964,12 +801,6 @@
   return NULL;
 }
 
-// Helper for (runtime) stack overflow checks
-
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-  return 0;
-}
-
 // Helper for figuring out if frames are interpreter frames
 
 bool CppInterpreter::contains(address pc) {
--- a/hotspot/src/cpu/zero/vm/interp_masm_zero.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "interp_masm_zero.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/markOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiRedefineClassesTrace.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/basicLock.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/thread.inline.hpp"
-
-// This file is intentionally empty
--- a/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/cppInterpreterGenerator.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef CC_INTERP
-#include "interpreter/cppInterpreter.hpp"
-#endif
-
-address AbstractInterpreterGenerator::generate_slow_signature_handler() {
-  _masm->advance(1);
-  return (address) InterpreterRuntime::slow_signature_handler;
-}
-
-address CppInterpreterGenerator::generate_math_entry(
-    AbstractInterpreter::MethodKind kind) {
-  if (!InlineIntrinsics)
-    return NULL;
-
-  Unimplemented();
-  return NULL;
-}
-
-address CppInterpreterGenerator::generate_abstract_entry() {
-  return generate_entry((address) ShouldNotCallThisEntry());
-}
-
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  return true;
-}
--- a/hotspot/src/cpu/zero/vm/register_definitions_zero.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "asm/register.hpp"
-#include "interp_masm_zero.hpp"
-#include "register_zero.hpp"
-
-// This file is intentionally empty
--- a/hotspot/src/cpu/zero/vm/stack_zero.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/zero/vm/stack_zero.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -25,9 +25,16 @@
 
 #include "precompiled.hpp"
 #include "interpreter/interpreterRuntime.hpp"
+#include "runtime/thread.hpp"
 #include "stack_zero.hpp"
 #include "stack_zero.inline.hpp"
 
+// Inlined causes circular inclusion with thread.hpp
+ZeroStack::ZeroStack()
+    : _base(NULL), _top(NULL), _sp(NULL) {
+    _shadow_pages_size = JavaThread::stack_shadow_zone_size();
+  }
+
 int ZeroStack::suggest_size(Thread *thread) const {
   assert(needs_setup(), "already set up");
   int abi_available = abi_stack_available(thread);
--- a/hotspot/src/cpu/zero/vm/stack_zero.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/zero/vm/stack_zero.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -38,10 +38,7 @@
   int _shadow_pages_size; // how much ABI stack must we keep free?
 
  public:
-  ZeroStack()
-    : _base(NULL), _top(NULL), _sp(NULL) {
-    _shadow_pages_size = JavaThread::stack_shadow_zone_size();
-  }
+  ZeroStack();
 
   bool needs_setup() const {
     return _base == NULL;
--- a/hotspot/src/cpu/zero/vm/stack_zero.inline.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/cpu/zero/vm/stack_zero.inline.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -49,11 +49,10 @@
 // value can be negative.
 inline int ZeroStack::abi_stack_available(Thread *thread) const {
   guarantee(Thread::current() == thread, "should run in the same thread");
-  assert(thread->stack_size() -
-         (thread->stack_base() - (address) &stack_used +
-          JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size()) ==
-         (address)&stack_used - thread->stack_overflow_limit(), "sanity");
-  return (address)&stack_used - stack_overflow_limit();
+  int stack_used = thread->stack_base() - (address) &stack_used
+    + (JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size());
+  int stack_free = thread->stack_size() - stack_used;
+  return stack_free;
 }
 
 #endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -65,7 +65,7 @@
 }
 
 // Bang the shadow pages if they need to be touched to be mapped.
-inline void os::bang_stack_shadow_pages() {
+inline void os::map_stack_shadow_pages() {
 }
 
 inline void os::dll_unload(void *lib) {
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
 
 
 // Bang the shadow pages if they need to be touched to be mapped.
-inline void os::bang_stack_shadow_pages() {
+inline void os::map_stack_shadow_pages() {
 }
 
 inline void os::dll_unload(void *lib) {
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 
 
 // Bang the shadow pages if they need to be touched to be mapped.
-inline void os::bang_stack_shadow_pages() {
+inline void os::map_stack_shadow_pages() {
 }
 
 inline void os::dll_unload(void *lib) {
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,7 @@
 
 
 // Bang the shadow pages if they need to be touched to be mapped.
-inline void os::bang_stack_shadow_pages() {
+inline void os::map_stack_shadow_pages() {
 }
 inline void os::dll_unload(void *lib) { ::dlclose(lib); }
 
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,7 @@
 }
 
 // Bang the shadow pages if they need to be touched to be mapped.
-inline void os::bang_stack_shadow_pages() {
+inline void os::map_stack_shadow_pages() {
   // Write to each page of our new frame to force OS mapping.
   // If we decrement stack pointer more than one page
   // the OS may not map an intervening page into our space
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/bytecodeInterpreter.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/forte.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/timer.hpp"
+
+# define __ _masm->
+
+//------------------------------------------------------------------------------------------------------------------------
+// Implementation of platform independent aspects of Interpreter
+
+void AbstractInterpreter::initialize() {
+  if (_code != NULL) return;
+
+  // make sure 'imported' classes are initialized
+  if (CountBytecodes || TraceBytecodes || StopInterpreterAt) BytecodeCounter::reset();
+  if (PrintBytecodeHistogram)                                BytecodeHistogram::reset();
+  if (PrintBytecodePairHistogram)                            BytecodePairHistogram::reset();
+
+  InvocationCounter::reinitialize(DelayCompilationDuringStartup);
+
+}
+
+void AbstractInterpreter::print() {
+  tty->cr();
+  tty->print_cr("----------------------------------------------------------------------");
+  tty->print_cr("Interpreter");
+  tty->cr();
+  tty->print_cr("code size        = %6dK bytes", (int)_code->used_space()/1024);
+  tty->print_cr("total space      = %6dK bytes", (int)_code->total_space()/1024);
+  tty->print_cr("wasted space     = %6dK bytes", (int)_code->available_space()/1024);
+  tty->cr();
+  tty->print_cr("# of codelets    = %6d"      , _code->number_of_stubs());
+  if (_code->number_of_stubs() != 0) {
+    tty->print_cr("avg codelet size = %6d bytes", _code->used_space() / _code->number_of_stubs());
+    tty->cr();
+  }
+  _code->print();
+  tty->print_cr("----------------------------------------------------------------------");
+  tty->cr();
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Implementation of interpreter
+
+StubQueue* AbstractInterpreter::_code                                       = NULL;
+bool       AbstractInterpreter::_notice_safepoints                          = false;
+address    AbstractInterpreter::_rethrow_exception_entry                    = NULL;
+
+address    AbstractInterpreter::_native_entry_begin                         = NULL;
+address    AbstractInterpreter::_native_entry_end                           = NULL;
+address    AbstractInterpreter::_slow_signature_handler;
+address    AbstractInterpreter::_entry_table            [AbstractInterpreter::number_of_method_entries];
+address    AbstractInterpreter::_native_abi_to_tosca    [AbstractInterpreter::number_of_result_handlers];
+
+//------------------------------------------------------------------------------------------------------------------------
+// Generation of complete interpreter
+
+AbstractInterpreterGenerator::AbstractInterpreterGenerator(StubQueue* _code) {
+  _masm                      = NULL;
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Entry points
+
+AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) {
+  // Abstract method?
+  if (m->is_abstract()) return abstract;
+
+  // Method handle primitive?
+  if (m->is_method_handle_intrinsic()) {
+    vmIntrinsics::ID id = m->intrinsic_id();
+    assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic");
+    MethodKind kind = (MethodKind)( method_handle_invoke_FIRST +
+                                    ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) );
+    assert(kind <= method_handle_invoke_LAST, "parallel enum ranges");
+    return kind;
+  }
+
+#ifndef CC_INTERP
+  if (UseCRC32Intrinsics && m->is_native()) {
+    // Use optimized stub code for CRC32 native methods.
+    switch (m->intrinsic_id()) {
+      case vmIntrinsics::_updateCRC32            : return java_util_zip_CRC32_update;
+      case vmIntrinsics::_updateBytesCRC32       : return java_util_zip_CRC32_updateBytes;
+      case vmIntrinsics::_updateByteBufferCRC32  : return java_util_zip_CRC32_updateByteBuffer;
+    }
+  }
+  if (UseCRC32CIntrinsics) {
+    // Use optimized stub code for CRC32C methods.
+    switch (m->intrinsic_id()) {
+      case vmIntrinsics::_updateBytesCRC32C             : return java_util_zip_CRC32C_updateBytes;
+      case vmIntrinsics::_updateDirectByteBufferCRC32C  : return java_util_zip_CRC32C_updateDirectByteBuffer;
+    }
+  }
+
+  switch(m->intrinsic_id()) {
+  case vmIntrinsics::_intBitsToFloat:      return java_lang_Float_intBitsToFloat;
+  case vmIntrinsics::_floatToRawIntBits:   return java_lang_Float_floatToRawIntBits;
+  case vmIntrinsics::_longBitsToDouble:    return java_lang_Double_longBitsToDouble;
+  case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits;
+  }
+
+#endif // CC_INTERP
+
+  // Native method?
+  // Note: This test must come _before_ the test for intrinsic
+  //       methods. See also comments below.
+  if (m->is_native()) {
+    assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out");
+    return m->is_synchronized() ? native_synchronized : native;
+  }
+
+  // Synchronized?
+  if (m->is_synchronized()) {
+    return zerolocals_synchronized;
+  }
+
+  if (RegisterFinalizersAtInit && m->code_size() == 1 &&
+      m->intrinsic_id() == vmIntrinsics::_Object_init) {
+    // We need to execute the special return bytecode to check for
+    // finalizer registration so create a normal frame.
+    return zerolocals;
+  }
+
+  // Empty method?
+  if (m->is_empty_method()) {
+    return empty;
+  }
+
+  // Special intrinsic method?
+  // Note: This test must come _after_ the test for native methods,
+  //       otherwise we will run into problems with JDK 1.2, see also
+  //       TemplateInterpreterGenerator::generate_method_entry() for
+  //       for details.
+  switch (m->intrinsic_id()) {
+    case vmIntrinsics::_dsin  : return java_lang_math_sin  ;
+    case vmIntrinsics::_dcos  : return java_lang_math_cos  ;
+    case vmIntrinsics::_dtan  : return java_lang_math_tan  ;
+    case vmIntrinsics::_dabs  : return java_lang_math_abs  ;
+    case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
+    case vmIntrinsics::_dlog  : return java_lang_math_log  ;
+    case vmIntrinsics::_dlog10: return java_lang_math_log10;
+    case vmIntrinsics::_dpow  : return java_lang_math_pow  ;
+    case vmIntrinsics::_dexp  : return java_lang_math_exp  ;
+
+    case vmIntrinsics::_Reference_get:
+                                return java_lang_ref_reference_get;
+  }
+
+  // Accessor method?
+  if (m->is_getter()) {
+    // TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters.
+    // See CppInterpreter::accessor_entry in cppInterpreter_zero.cpp. This should be fixed in Zero,
+    // then the call above updated to ::is_accessor
+    assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
+    return accessor;
+  }
+
+  // Note: for now: zero locals for all non-empty methods
+  return zerolocals;
+}
+
+
+void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) {
+  assert(kind >= method_handle_invoke_FIRST &&
+         kind <= method_handle_invoke_LAST, "late initialization only for MH entry points");
+  assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry");
+  _entry_table[kind] = entry;
+}
+
+
+// Return true if the interpreter can prove that the given bytecode has
+// not yet been executed (in Java semantics, not in actual operation).
+bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) {
+  Bytecodes::Code code = method()->code_at(bci);
+
+  if (!Bytecodes::must_rewrite(code)) {
+    // might have been reached
+    return false;
+  }
+
+  // the bytecode might not be rewritten if the method is an accessor, etc.
+  address ientry = method->interpreter_entry();
+  if (ientry != entry_for_kind(AbstractInterpreter::zerolocals) &&
+      ientry != entry_for_kind(AbstractInterpreter::zerolocals_synchronized))
+    return false;  // interpreter does not run this method!
+
+  // otherwise, we can be sure this bytecode has never been executed
+  return true;
+}
+
+
+#ifndef PRODUCT
+void AbstractInterpreter::print_method_kind(MethodKind kind) {
+  switch (kind) {
+    case zerolocals             : tty->print("zerolocals"             ); break;
+    case zerolocals_synchronized: tty->print("zerolocals_synchronized"); break;
+    case native                 : tty->print("native"                 ); break;
+    case native_synchronized    : tty->print("native_synchronized"    ); break;
+    case empty                  : tty->print("empty"                  ); break;
+    case accessor               : tty->print("accessor"               ); break;
+    case abstract               : tty->print("abstract"               ); break;
+    case java_lang_math_sin     : tty->print("java_lang_math_sin"     ); break;
+    case java_lang_math_cos     : tty->print("java_lang_math_cos"     ); break;
+    case java_lang_math_tan     : tty->print("java_lang_math_tan"     ); break;
+    case java_lang_math_abs     : tty->print("java_lang_math_abs"     ); break;
+    case java_lang_math_sqrt    : tty->print("java_lang_math_sqrt"    ); break;
+    case java_lang_math_log     : tty->print("java_lang_math_log"     ); break;
+    case java_lang_math_log10   : tty->print("java_lang_math_log10"   ); break;
+    case java_util_zip_CRC32_update           : tty->print("java_util_zip_CRC32_update"); break;
+    case java_util_zip_CRC32_updateBytes      : tty->print("java_util_zip_CRC32_updateBytes"); break;
+    case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break;
+    case java_util_zip_CRC32C_updateBytes     : tty->print("java_util_zip_CRC32C_updateBytes"); break;
+    case java_util_zip_CRC32C_updateDirectByteBuffer: tty->print("java_util_zip_CRC32C_updateDirectByteByffer"); break;
+    default:
+      if (kind >= method_handle_invoke_FIRST &&
+          kind <= method_handle_invoke_LAST) {
+        const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind));
+        if (kind_name[0] == '_')  kind_name = &kind_name[1];  // '_invokeExact' => 'invokeExact'
+        tty->print("method_handle_%s", kind_name);
+        break;
+      }
+      ShouldNotReachHere();
+      break;
+  }
+}
+#endif // PRODUCT
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Deoptimization support
+
+/**
+ * If a deoptimization happens, this function returns the point of next bytecode to continue execution.
+ */
+address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
+  assert(method->contains(bcp), "just checkin'");
+
+  // Get the original and rewritten bytecode.
+  Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
+  assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
+
+  const int bci = method->bci_from(bcp);
+
+  // compute continuation length
+  const int length = Bytecodes::length_at(method, bcp);
+
+  // compute result type
+  BasicType type = T_ILLEGAL;
+
+  switch (code) {
+    case Bytecodes::_invokevirtual  :
+    case Bytecodes::_invokespecial  :
+    case Bytecodes::_invokestatic   :
+    case Bytecodes::_invokeinterface: {
+      Thread *thread = Thread::current();
+      ResourceMark rm(thread);
+      methodHandle mh(thread, method);
+      type = Bytecode_invoke(mh, bci).result_type();
+      // since the cache entry might not be initialized:
+      // (NOT needed for the old calling convension)
+      if (!is_top_frame) {
+        int index = Bytes::get_native_u2(bcp+1);
+        method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
+      }
+      break;
+    }
+
+   case Bytecodes::_invokedynamic: {
+      Thread *thread = Thread::current();
+      ResourceMark rm(thread);
+      methodHandle mh(thread, method);
+      type = Bytecode_invoke(mh, bci).result_type();
+      // since the cache entry might not be initialized:
+      // (NOT needed for the old calling convension)
+      if (!is_top_frame) {
+        int index = Bytes::get_native_u4(bcp+1);
+        method->constants()->invokedynamic_cp_cache_entry_at(index)->set_parameter_size(callee_parameters);
+      }
+      break;
+    }
+
+    case Bytecodes::_ldc   :
+    case Bytecodes::_ldc_w : // fall through
+    case Bytecodes::_ldc2_w:
+      {
+        Thread *thread = Thread::current();
+        ResourceMark rm(thread);
+        methodHandle mh(thread, method);
+        type = Bytecode_loadconstant(mh, bci).result_type();
+        break;
+      }
+
+    default:
+      type = Bytecodes::result_type(code);
+      break;
+  }
+
+  // return entry point for computed continuation state & bytecode length
+  return
+    is_top_frame
+    ? Interpreter::deopt_entry (as_TosState(type), length)
+    : Interpreter::return_entry(as_TosState(type), length, code);
+}
+
+// If deoptimization happens, this function returns the point where the interpreter reexecutes
+// the bytecode.
+// Note: Bytecodes::_athrow is a special case in that it does not return
+//       Interpreter::deopt_entry(vtos, 0) like others
+address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) {
+  assert(method->contains(bcp), "just checkin'");
+  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
+#if defined(COMPILER1) || INCLUDE_JVMCI
+  if(code == Bytecodes::_athrow ) {
+    return Interpreter::rethrow_exception_entry();
+  }
+#endif /* COMPILER1 || INCLUDE_JVMCI */
+  return Interpreter::deopt_entry(vtos, 0);
+}
+
+// If deoptimization happens, the interpreter should reexecute these bytecodes.
+// This function mainly helps the compilers to set up the reexecute bit.
+bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
+  switch (code) {
+    case Bytecodes::_lookupswitch:
+    case Bytecodes::_tableswitch:
+    case Bytecodes::_fast_binaryswitch:
+    case Bytecodes::_fast_linearswitch:
+    // recompute condtional expression folded into _if<cond>
+    case Bytecodes::_lcmp      :
+    case Bytecodes::_fcmpl     :
+    case Bytecodes::_fcmpg     :
+    case Bytecodes::_dcmpl     :
+    case Bytecodes::_dcmpg     :
+    case Bytecodes::_ifnull    :
+    case Bytecodes::_ifnonnull :
+    case Bytecodes::_goto      :
+    case Bytecodes::_goto_w    :
+    case Bytecodes::_ifeq      :
+    case Bytecodes::_ifne      :
+    case Bytecodes::_iflt      :
+    case Bytecodes::_ifge      :
+    case Bytecodes::_ifgt      :
+    case Bytecodes::_ifle      :
+    case Bytecodes::_if_icmpeq :
+    case Bytecodes::_if_icmpne :
+    case Bytecodes::_if_icmplt :
+    case Bytecodes::_if_icmpge :
+    case Bytecodes::_if_icmpgt :
+    case Bytecodes::_if_icmple :
+    case Bytecodes::_if_acmpeq :
+    case Bytecodes::_if_acmpne :
+    // special cases
+    case Bytecodes::_getfield  :
+    case Bytecodes::_putfield  :
+    case Bytecodes::_getstatic :
+    case Bytecodes::_putstatic :
+    case Bytecodes::_aastore   :
+#ifdef COMPILER1
+    //special case of reexecution
+    case Bytecodes::_athrow    :
+#endif
+      return true;
+
+    default:
+      return false;
+  }
+}
+
+void AbstractInterpreter::initialize_method_handle_entries() {
+  // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate:
+  for (int i = method_handle_invoke_FIRST; i <= method_handle_invoke_LAST; i++) {
+    MethodKind kind = (MethodKind) i;
+    _entry_table[kind] = _entry_table[Interpreter::abstract];
+  }
+}
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -284,6 +284,12 @@
     default:        ShouldNotReachHere();
     }
   }
+
+  static void initialize_method_handle_entries();
+
+  // PPC-only: Support abs and sqrt like in compiler.
+  // For others we can use a normal (native) entry.
+  static bool math_entry_available(MethodKind kind);
 };
 
 //------------------------------------------------------------------------------------------------------------------------
@@ -294,16 +300,6 @@
  protected:
   InterpreterMacroAssembler* _masm;
 
-  // shared code sequences
-  // Converter for native abi result to tosca result
-  address generate_result_handler_for(BasicType type);
-  address generate_slow_signature_handler();
-
-  void bang_stack_shadow_pages(bool native_call);
-
-  void generate_all();
-  void initialize_method_handle_entries();
-
  public:
   AbstractInterpreterGenerator(StubQueue* _code);
 };
--- a/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,12 +46,11 @@
     int code_size = InterpreterCodeSize;
     NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
     _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
-                          "Interpreter");
+                           "Interpreter");
     CppInterpreterGenerator g(_code);
     if (PrintInterpreter) print();
   }
 
-
   // Allow c++ interpreter to do one initialization now that switches are set, etc.
   BytecodeInterpreter start_msg(BytecodeInterpreter::initialize);
   if (JvmtiExport::can_post_interpreter_events())
@@ -73,114 +72,10 @@
 }
 
 
-CppInterpreterGenerator::CppInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
-  generate_all();
-}
-
-static const BasicType types[Interpreter::number_of_result_handlers] = {
-  T_BOOLEAN,
-  T_CHAR   ,
-  T_BYTE   ,
-  T_SHORT  ,
-  T_INT    ,
-  T_LONG   ,
-  T_VOID   ,
-  T_FLOAT  ,
-  T_DOUBLE ,
-  T_OBJECT
-};
-
-void CppInterpreterGenerator::generate_all() {
-  AbstractInterpreterGenerator::generate_all();
-
-
-#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
-
-  { CodeletMark cm(_masm, "(kind = frame_manager)");
-    // all non-native method kinds
-    method_entry(zerolocals);
-    method_entry(zerolocals_synchronized);
-    method_entry(empty);
-    method_entry(accessor);
-    method_entry(abstract);
-    method_entry(java_lang_math_sin   );
-    method_entry(java_lang_math_cos   );
-    method_entry(java_lang_math_tan   );
-    method_entry(java_lang_math_abs   );
-    method_entry(java_lang_math_sqrt  );
-    method_entry(java_lang_math_log   );
-    method_entry(java_lang_math_log10 );
-    method_entry(java_lang_math_pow );
-    method_entry(java_lang_math_exp );
-    method_entry(java_lang_ref_reference_get);
-
-    initialize_method_handle_entries();
-
-    Interpreter::_native_entry_begin = Interpreter::code()->code_end();
-    method_entry(native);
-    method_entry(native_synchronized);
-    Interpreter::_native_entry_end = Interpreter::code()->code_end();
-  }
-
-
-#undef method_entry
-}
 
 InterpreterCodelet* CppInterpreter::codelet_containing(address pc) {
   // FIXME: I'm pretty sure _code is null and this is never called, which is why it's copied.
   return (InterpreterCodelet*)_code->stub_containing(pc);
 }
 
-// Generate method entries
-address CppInterpreterGenerator::generate_method_entry(
-                                        AbstractInterpreter::MethodKind kind) {
-  // determine code generation flags
-  bool native = false;
-  bool synchronized = false;
-  address entry_point = NULL;
-
-  switch (kind) {
-  case Interpreter::zerolocals             :                                          break;
-  case Interpreter::zerolocals_synchronized:                synchronized = true;      break;
-  case Interpreter::native                 : native = true;                           break;
-  case Interpreter::native_synchronized    : native = true; synchronized = true;      break;
-  case Interpreter::empty                  : entry_point = generate_empty_entry();    break;
-  case Interpreter::accessor               : entry_point = generate_accessor_entry(); break;
-  case Interpreter::abstract               : entry_point = generate_abstract_entry(); break;
-
-  case Interpreter::java_lang_math_sin     : // fall thru
-  case Interpreter::java_lang_math_cos     : // fall thru
-  case Interpreter::java_lang_math_tan     : // fall thru
-  case Interpreter::java_lang_math_abs     : // fall thru
-  case Interpreter::java_lang_math_log     : // fall thru
-  case Interpreter::java_lang_math_log10   : // fall thru
-  case Interpreter::java_lang_math_sqrt    : // fall thru
-  case Interpreter::java_lang_math_pow     : // fall thru
-  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
-  case Interpreter::java_lang_ref_reference_get
-                                           : entry_point = generate_Reference_get_entry(); break;
-  default:
-    fatal("unexpected method kind: %d", kind);
-    break;
-  }
-
-  if (entry_point) {
-    return entry_point;
-  }
-
-  // We expect the normal and native entry points to be generated first so we can reuse them.
-  if (native) {
-    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
-    if (entry_point == NULL) {
-      entry_point = generate_native_entry(synchronized);
-    }
-  } else {
-    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
-    if (entry_point == NULL) {
-      entry_point = generate_normal_entry(synchronized);
-    }
-  }
-
-  return entry_point;
-}
 #endif // CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/bytecodeInterpreter.hpp"
+#include "interpreter/cppInterpreterGenerator.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+
+#ifdef CC_INTERP
+
+CppInterpreterGenerator::CppInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
+  generate_all();
+}
+
+void CppInterpreterGenerator::generate_all() {
+  { CodeletMark cm(_masm, "slow signature handler");
+    AbstractInterpreter::_slow_signature_handler = generate_slow_signature_handler();
+  }
+
+#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
+
+  { CodeletMark cm(_masm, "(kind = frame_manager)");
+    // all non-native method kinds
+    method_entry(zerolocals);
+    method_entry(zerolocals_synchronized);
+    method_entry(empty);
+    method_entry(accessor);
+    method_entry(abstract);
+    method_entry(java_lang_math_sin   );
+    method_entry(java_lang_math_cos   );
+    method_entry(java_lang_math_tan   );
+    method_entry(java_lang_math_abs   );
+    method_entry(java_lang_math_sqrt  );
+    method_entry(java_lang_math_log   );
+    method_entry(java_lang_math_log10 );
+    method_entry(java_lang_math_pow );
+    method_entry(java_lang_math_exp );
+    method_entry(java_lang_ref_reference_get);
+
+    AbstractInterpreter::initialize_method_handle_entries();
+
+    Interpreter::_native_entry_begin = Interpreter::code()->code_end();
+    method_entry(native);
+    method_entry(native_synchronized);
+    Interpreter::_native_entry_end = Interpreter::code()->code_end();
+  }
+
+#undef method_entry
+}
+
+// Generate method entries
+address CppInterpreterGenerator::generate_method_entry(
+                                        AbstractInterpreter::MethodKind kind) {
+  // determine code generation flags
+  bool native = false;
+  bool synchronized = false;
+  address entry_point = NULL;
+
+  switch (kind) {
+  case Interpreter::zerolocals             :                                          break;
+  case Interpreter::zerolocals_synchronized:                synchronized = true;      break;
+  case Interpreter::native                 : native = true;                           break;
+  case Interpreter::native_synchronized    : native = true; synchronized = true;      break;
+  case Interpreter::empty                  : entry_point = generate_empty_entry();    break;
+  case Interpreter::accessor               : entry_point = generate_accessor_entry(); break;
+  case Interpreter::abstract               : entry_point = generate_abstract_entry(); break;
+
+  case Interpreter::java_lang_math_sin     : // fall thru
+  case Interpreter::java_lang_math_cos     : // fall thru
+  case Interpreter::java_lang_math_tan     : // fall thru
+  case Interpreter::java_lang_math_abs     : // fall thru
+  case Interpreter::java_lang_math_log     : // fall thru
+  case Interpreter::java_lang_math_log10   : // fall thru
+  case Interpreter::java_lang_math_sqrt    : // fall thru
+  case Interpreter::java_lang_math_pow     : // fall thru
+  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
+  case Interpreter::java_lang_ref_reference_get
+                                           : entry_point = generate_Reference_get_entry(); break;
+  default:
+    fatal("unexpected method kind: %d", kind);
+    break;
+  }
+
+  if (entry_point) {
+    return entry_point;
+  }
+
+  // We expect the normal and native entry points to be generated first so we can reuse them.
+  if (native) {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
+    if (entry_point == NULL) {
+      entry_point = generate_native_entry(synchronized);
+    }
+  } else {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
+    if (entry_point == NULL) {
+      entry_point = generate_normal_entry(synchronized);
+    }
+  }
+
+  return entry_point;
+}
+#endif // CC_INTERP
--- a/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,8 @@
  private:
   void generate_all();
 
+  address generate_slow_signature_handler();
+
   address generate_method_entry(AbstractInterpreter::MethodKind kind);
   address generate_normal_entry(bool synchronized);
   address generate_native_entry(bool synchronized);
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,40 +111,6 @@
   *_masm = NULL;
 }
 
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of platform independent aspects of Interpreter
-
-void AbstractInterpreter::initialize() {
-  if (_code != NULL) return;
-
-  // make sure 'imported' classes are initialized
-  if (CountBytecodes || TraceBytecodes || StopInterpreterAt) BytecodeCounter::reset();
-  if (PrintBytecodeHistogram)                                BytecodeHistogram::reset();
-  if (PrintBytecodePairHistogram)                            BytecodePairHistogram::reset();
-
-  InvocationCounter::reinitialize(DelayCompilationDuringStartup);
-
-}
-
-void AbstractInterpreter::print() {
-  tty->cr();
-  tty->print_cr("----------------------------------------------------------------------");
-  tty->print_cr("Interpreter");
-  tty->cr();
-  tty->print_cr("code size        = %6dK bytes", (int)_code->used_space()/1024);
-  tty->print_cr("total space      = %6dK bytes", (int)_code->total_space()/1024);
-  tty->print_cr("wasted space     = %6dK bytes", (int)_code->available_space()/1024);
-  tty->cr();
-  tty->print_cr("# of codelets    = %6d"      , _code->number_of_stubs());
-  if (_code->number_of_stubs() != 0) {
-    tty->print_cr("avg codelet size = %6d bytes", _code->used_space() / _code->number_of_stubs());
-    tty->cr();
-  }
-  _code->print();
-  tty->print_cr("----------------------------------------------------------------------");
-  tty->cr();
-}
-
 
 void interpreter_init() {
   Interpreter::initialize();
@@ -166,384 +132,3 @@
                                              AbstractInterpreter::code()->code_end());
   }
 }
-
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of interpreter
-
-StubQueue* AbstractInterpreter::_code                                       = NULL;
-bool       AbstractInterpreter::_notice_safepoints                          = false;
-address    AbstractInterpreter::_rethrow_exception_entry                    = NULL;
-
-address    AbstractInterpreter::_native_entry_begin                         = NULL;
-address    AbstractInterpreter::_native_entry_end                           = NULL;
-address    AbstractInterpreter::_slow_signature_handler;
-address    AbstractInterpreter::_entry_table            [AbstractInterpreter::number_of_method_entries];
-address    AbstractInterpreter::_native_abi_to_tosca    [AbstractInterpreter::number_of_result_handlers];
-
-//------------------------------------------------------------------------------------------------------------------------
-// Generation of complete interpreter
-
-AbstractInterpreterGenerator::AbstractInterpreterGenerator(StubQueue* _code) {
-  _masm                      = NULL;
-}
-
-
-static const BasicType types[Interpreter::number_of_result_handlers] = {
-  T_BOOLEAN,
-  T_CHAR   ,
-  T_BYTE   ,
-  T_SHORT  ,
-  T_INT    ,
-  T_LONG   ,
-  T_VOID   ,
-  T_FLOAT  ,
-  T_DOUBLE ,
-  T_OBJECT
-};
-
-void AbstractInterpreterGenerator::generate_all() {
-
-
-  { CodeletMark cm(_masm, "slow signature handler");
-    Interpreter::_slow_signature_handler = generate_slow_signature_handler();
-  }
-
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Entry points
-
-AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m) {
-  // Abstract method?
-  if (m->is_abstract()) return abstract;
-
-  // Method handle primitive?
-  if (m->is_method_handle_intrinsic()) {
-    vmIntrinsics::ID id = m->intrinsic_id();
-    assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic");
-    MethodKind kind = (MethodKind)( method_handle_invoke_FIRST +
-                                    ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) );
-    assert(kind <= method_handle_invoke_LAST, "parallel enum ranges");
-    return kind;
-  }
-
-#ifndef CC_INTERP
-  if (UseCRC32Intrinsics && m->is_native()) {
-    // Use optimized stub code for CRC32 native methods.
-    switch (m->intrinsic_id()) {
-      case vmIntrinsics::_updateCRC32            : return java_util_zip_CRC32_update;
-      case vmIntrinsics::_updateBytesCRC32       : return java_util_zip_CRC32_updateBytes;
-      case vmIntrinsics::_updateByteBufferCRC32  : return java_util_zip_CRC32_updateByteBuffer;
-    }
-  }
-  if (UseCRC32CIntrinsics) {
-    // Use optimized stub code for CRC32C methods.
-    switch (m->intrinsic_id()) {
-      case vmIntrinsics::_updateBytesCRC32C             : return java_util_zip_CRC32C_updateBytes;
-      case vmIntrinsics::_updateDirectByteBufferCRC32C  : return java_util_zip_CRC32C_updateDirectByteBuffer;
-    }
-  }
-
-  switch(m->intrinsic_id()) {
-  case vmIntrinsics::_intBitsToFloat:      return java_lang_Float_intBitsToFloat;
-  case vmIntrinsics::_floatToRawIntBits:   return java_lang_Float_floatToRawIntBits;
-  case vmIntrinsics::_longBitsToDouble:    return java_lang_Double_longBitsToDouble;
-  case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits;
-  }
-
-#endif // CC_INTERP
-
-  // Native method?
-  // Note: This test must come _before_ the test for intrinsic
-  //       methods. See also comments below.
-  if (m->is_native()) {
-    assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out");
-    return m->is_synchronized() ? native_synchronized : native;
-  }
-
-  // Synchronized?
-  if (m->is_synchronized()) {
-    return zerolocals_synchronized;
-  }
-
-  if (RegisterFinalizersAtInit && m->code_size() == 1 &&
-      m->intrinsic_id() == vmIntrinsics::_Object_init) {
-    // We need to execute the special return bytecode to check for
-    // finalizer registration so create a normal frame.
-    return zerolocals;
-  }
-
-  // Empty method?
-  if (m->is_empty_method()) {
-    return empty;
-  }
-
-  // Special intrinsic method?
-  // Note: This test must come _after_ the test for native methods,
-  //       otherwise we will run into problems with JDK 1.2, see also
-  //       TemplateInterpreterGenerator::generate_method_entry() for
-  //       for details.
-  switch (m->intrinsic_id()) {
-    case vmIntrinsics::_dsin  : return java_lang_math_sin  ;
-    case vmIntrinsics::_dcos  : return java_lang_math_cos  ;
-    case vmIntrinsics::_dtan  : return java_lang_math_tan  ;
-    case vmIntrinsics::_dabs  : return java_lang_math_abs  ;
-    case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
-    case vmIntrinsics::_dlog  : return java_lang_math_log  ;
-    case vmIntrinsics::_dlog10: return java_lang_math_log10;
-    case vmIntrinsics::_dpow  : return java_lang_math_pow  ;
-    case vmIntrinsics::_dexp  : return java_lang_math_exp  ;
-
-    case vmIntrinsics::_Reference_get:
-                                return java_lang_ref_reference_get;
-  }
-
-  // Accessor method?
-  if (m->is_getter()) {
-    // TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters.
-    // See CppInterpreter::accessor_entry in cppInterpreter_zero.cpp. This should be fixed in Zero,
-    // then the call above updated to ::is_accessor
-    assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
-    return accessor;
-  }
-
-  // Note: for now: zero locals for all non-empty methods
-  return zerolocals;
-}
-
-
-void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) {
-  assert(kind >= method_handle_invoke_FIRST &&
-         kind <= method_handle_invoke_LAST, "late initialization only for MH entry points");
-  assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry");
-  _entry_table[kind] = entry;
-}
-
-
-// Return true if the interpreter can prove that the given bytecode has
-// not yet been executed (in Java semantics, not in actual operation).
-bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) {
-  Bytecodes::Code code = method()->code_at(bci);
-
-  if (!Bytecodes::must_rewrite(code)) {
-    // might have been reached
-    return false;
-  }
-
-  // the bytecode might not be rewritten if the method is an accessor, etc.
-  address ientry = method->interpreter_entry();
-  if (ientry != entry_for_kind(AbstractInterpreter::zerolocals) &&
-      ientry != entry_for_kind(AbstractInterpreter::zerolocals_synchronized))
-    return false;  // interpreter does not run this method!
-
-  // otherwise, we can be sure this bytecode has never been executed
-  return true;
-}
-
-
-#ifndef PRODUCT
-void AbstractInterpreter::print_method_kind(MethodKind kind) {
-  switch (kind) {
-    case zerolocals             : tty->print("zerolocals"             ); break;
-    case zerolocals_synchronized: tty->print("zerolocals_synchronized"); break;
-    case native                 : tty->print("native"                 ); break;
-    case native_synchronized    : tty->print("native_synchronized"    ); break;
-    case empty                  : tty->print("empty"                  ); break;
-    case accessor               : tty->print("accessor"               ); break;
-    case abstract               : tty->print("abstract"               ); break;
-    case java_lang_math_sin     : tty->print("java_lang_math_sin"     ); break;
-    case java_lang_math_cos     : tty->print("java_lang_math_cos"     ); break;
-    case java_lang_math_tan     : tty->print("java_lang_math_tan"     ); break;
-    case java_lang_math_abs     : tty->print("java_lang_math_abs"     ); break;
-    case java_lang_math_sqrt    : tty->print("java_lang_math_sqrt"    ); break;
-    case java_lang_math_log     : tty->print("java_lang_math_log"     ); break;
-    case java_lang_math_log10   : tty->print("java_lang_math_log10"   ); break;
-    case java_util_zip_CRC32_update           : tty->print("java_util_zip_CRC32_update"); break;
-    case java_util_zip_CRC32_updateBytes      : tty->print("java_util_zip_CRC32_updateBytes"); break;
-    case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break;
-    case java_util_zip_CRC32C_updateBytes     : tty->print("java_util_zip_CRC32C_updateBytes"); break;
-    case java_util_zip_CRC32C_updateDirectByteBuffer: tty->print("java_util_zip_CRC32C_updateDirectByteByffer"); break;
-    default:
-      if (kind >= method_handle_invoke_FIRST &&
-          kind <= method_handle_invoke_LAST) {
-        const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind));
-        if (kind_name[0] == '_')  kind_name = &kind_name[1];  // '_invokeExact' => 'invokeExact'
-        tty->print("method_handle_%s", kind_name);
-        break;
-      }
-      ShouldNotReachHere();
-      break;
-  }
-}
-#endif // PRODUCT
-
-
-//------------------------------------------------------------------------------------------------------------------------
-// Deoptimization support
-
-/**
- * If a deoptimization happens, this function returns the point of next bytecode to continue execution.
- */
-address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
-  assert(method->contains(bcp), "just checkin'");
-
-  // Get the original and rewritten bytecode.
-  Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
-  assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
-
-  const int bci = method->bci_from(bcp);
-
-  // compute continuation length
-  const int length = Bytecodes::length_at(method, bcp);
-
-  // compute result type
-  BasicType type = T_ILLEGAL;
-
-  switch (code) {
-    case Bytecodes::_invokevirtual  :
-    case Bytecodes::_invokespecial  :
-    case Bytecodes::_invokestatic   :
-    case Bytecodes::_invokeinterface: {
-      Thread *thread = Thread::current();
-      ResourceMark rm(thread);
-      methodHandle mh(thread, method);
-      type = Bytecode_invoke(mh, bci).result_type();
-      // since the cache entry might not be initialized:
-      // (NOT needed for the old calling convension)
-      if (!is_top_frame) {
-        int index = Bytes::get_native_u2(bcp+1);
-        method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
-      }
-      break;
-    }
-
-   case Bytecodes::_invokedynamic: {
-      Thread *thread = Thread::current();
-      ResourceMark rm(thread);
-      methodHandle mh(thread, method);
-      type = Bytecode_invoke(mh, bci).result_type();
-      // since the cache entry might not be initialized:
-      // (NOT needed for the old calling convension)
-      if (!is_top_frame) {
-        int index = Bytes::get_native_u4(bcp+1);
-        method->constants()->invokedynamic_cp_cache_entry_at(index)->set_parameter_size(callee_parameters);
-      }
-      break;
-    }
-
-    case Bytecodes::_ldc   :
-    case Bytecodes::_ldc_w : // fall through
-    case Bytecodes::_ldc2_w:
-      {
-        Thread *thread = Thread::current();
-        ResourceMark rm(thread);
-        methodHandle mh(thread, method);
-        type = Bytecode_loadconstant(mh, bci).result_type();
-        break;
-      }
-
-    default:
-      type = Bytecodes::result_type(code);
-      break;
-  }
-
-  // return entry point for computed continuation state & bytecode length
-  return
-    is_top_frame
-    ? Interpreter::deopt_entry (as_TosState(type), length)
-    : Interpreter::return_entry(as_TosState(type), length, code);
-}
-
-// If deoptimization happens, this function returns the point where the interpreter reexecutes
-// the bytecode.
-// Note: Bytecodes::_athrow is a special case in that it does not return
-//       Interpreter::deopt_entry(vtos, 0) like others
-address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) {
-  assert(method->contains(bcp), "just checkin'");
-  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
-#if defined(COMPILER1) || INCLUDE_JVMCI
-  if(code == Bytecodes::_athrow ) {
-    return Interpreter::rethrow_exception_entry();
-  }
-#endif /* COMPILER1 || INCLUDE_JVMCI */
-  return Interpreter::deopt_entry(vtos, 0);
-}
-
-// If deoptimization happens, the interpreter should reexecute these bytecodes.
-// This function mainly helps the compilers to set up the reexecute bit.
-bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
-  switch (code) {
-    case Bytecodes::_lookupswitch:
-    case Bytecodes::_tableswitch:
-    case Bytecodes::_fast_binaryswitch:
-    case Bytecodes::_fast_linearswitch:
-    // recompute condtional expression folded into _if<cond>
-    case Bytecodes::_lcmp      :
-    case Bytecodes::_fcmpl     :
-    case Bytecodes::_fcmpg     :
-    case Bytecodes::_dcmpl     :
-    case Bytecodes::_dcmpg     :
-    case Bytecodes::_ifnull    :
-    case Bytecodes::_ifnonnull :
-    case Bytecodes::_goto      :
-    case Bytecodes::_goto_w    :
-    case Bytecodes::_ifeq      :
-    case Bytecodes::_ifne      :
-    case Bytecodes::_iflt      :
-    case Bytecodes::_ifge      :
-    case Bytecodes::_ifgt      :
-    case Bytecodes::_ifle      :
-    case Bytecodes::_if_icmpeq :
-    case Bytecodes::_if_icmpne :
-    case Bytecodes::_if_icmplt :
-    case Bytecodes::_if_icmpge :
-    case Bytecodes::_if_icmpgt :
-    case Bytecodes::_if_icmple :
-    case Bytecodes::_if_acmpeq :
-    case Bytecodes::_if_acmpne :
-    // special cases
-    case Bytecodes::_getfield  :
-    case Bytecodes::_putfield  :
-    case Bytecodes::_getstatic :
-    case Bytecodes::_putstatic :
-    case Bytecodes::_aastore   :
-#ifdef COMPILER1
-    //special case of reexecution
-    case Bytecodes::_athrow    :
-#endif
-      return true;
-
-    default:
-      return false;
-  }
-}
-
-void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
-  // Quick & dirty stack overflow checking: bang the stack & handle trap.
-  // Note that we do the banging after the frame is setup, since the exception
-  // handling code expects to find a valid interpreter frame on the stack.
-  // Doing the banging earlier fails if the caller frame is not an interpreter
-  // frame.
-  // (Also, the exception throwing code expects to unlock any synchronized
-  // method receiever, so do the banging after locking the receiver.)
-
-  // Bang each page in the shadow zone. We can't assume it's been done for
-  // an interpreter frame with greater than a page of locals, so each page
-  // needs to be checked.  Only true for non-native.
-  if (UseStackBanging) {
-    const int page_size = os::vm_page_size();
-    const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
-    const int start_page = native_call ? n_shadow_pages : 1;
-    for (int pages = start_page; pages <= n_shadow_pages; pages++) {
-      __ bang_stack_with_offset(pages*page_size);
-    }
-  }
-}
-
-void AbstractInterpreterGenerator::initialize_method_handle_entries() {
-  // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate:
-  for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) {
-    Interpreter::MethodKind kind = (Interpreter::MethodKind) i;
-    Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
-  }
-}
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 #include "interpreter/templateInterpreter.hpp"
 #include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
+#include "memory/resourceArea.hpp"
 
 #ifndef CC_INTERP
 
@@ -219,376 +220,6 @@
 DispatchTable TemplateInterpreter::_safept_table;
 address    TemplateInterpreter::_wentry_point[DispatchTable::length];
 
-TemplateInterpreterGenerator::TemplateInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
-  _unimplemented_bytecode    = NULL;
-  _illegal_bytecode_sequence = NULL;
-  generate_all();
-}
-
-static const BasicType types[Interpreter::number_of_result_handlers] = {
-  T_BOOLEAN,
-  T_CHAR   ,
-  T_BYTE   ,
-  T_SHORT  ,
-  T_INT    ,
-  T_LONG   ,
-  T_VOID   ,
-  T_FLOAT  ,
-  T_DOUBLE ,
-  T_OBJECT
-};
-
-void TemplateInterpreterGenerator::generate_all() {
-  // Loop, in case we need several variants of the interpreter entries
-  do {
-    if (!CodeCacheExtensions::skip_code_generation()) {
-      // bypass code generation when useless
-      AbstractInterpreterGenerator::generate_all();
-
-      { CodeletMark cm(_masm, "error exits");
-        _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
-        _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
-      }
-
-#ifndef PRODUCT
-      if (TraceBytecodes) {
-        CodeletMark cm(_masm, "bytecode tracing support");
-        Interpreter::_trace_code =
-          EntryPoint(
-                     generate_trace_code(btos),
-                     generate_trace_code(ctos),
-                     generate_trace_code(stos),
-                     generate_trace_code(atos),
-                     generate_trace_code(itos),
-                     generate_trace_code(ltos),
-                     generate_trace_code(ftos),
-                     generate_trace_code(dtos),
-                     generate_trace_code(vtos)
-                     );
-      }
-#endif // !PRODUCT
-
-      { CodeletMark cm(_masm, "return entry points");
-        const int index_size = sizeof(u2);
-        for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
-          Interpreter::_return_entry[i] =
-            EntryPoint(
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(atos, i, index_size),
-                       generate_return_entry_for(itos, i, index_size),
-                       generate_return_entry_for(ltos, i, index_size),
-                       generate_return_entry_for(ftos, i, index_size),
-                       generate_return_entry_for(dtos, i, index_size),
-                       generate_return_entry_for(vtos, i, index_size)
-                       );
-        }
-      }
-
-      { CodeletMark cm(_masm, "invoke return entry points");
-        const TosState states[] = {itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos};
-        const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic);
-        const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface);
-        const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic);
-
-        for (int i = 0; i < Interpreter::number_of_return_addrs; i++) {
-          TosState state = states[i];
-          Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2));
-          Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2));
-          Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4));
-        }
-      }
-
-      { CodeletMark cm(_masm, "earlyret entry points");
-        Interpreter::_earlyret_entry =
-          EntryPoint(
-                     generate_earlyret_entry_for(btos),
-                     generate_earlyret_entry_for(ctos),
-                     generate_earlyret_entry_for(stos),
-                     generate_earlyret_entry_for(atos),
-                     generate_earlyret_entry_for(itos),
-                     generate_earlyret_entry_for(ltos),
-                     generate_earlyret_entry_for(ftos),
-                     generate_earlyret_entry_for(dtos),
-                     generate_earlyret_entry_for(vtos)
-                     );
-      }
-
-      { CodeletMark cm(_masm, "deoptimization entry points");
-        for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
-          Interpreter::_deopt_entry[i] =
-            EntryPoint(
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(atos, i),
-                       generate_deopt_entry_for(itos, i),
-                       generate_deopt_entry_for(ltos, i),
-                       generate_deopt_entry_for(ftos, i),
-                       generate_deopt_entry_for(dtos, i),
-                       generate_deopt_entry_for(vtos, i)
-                       );
-        }
-      }
-
-      { CodeletMark cm(_masm, "result handlers for native calls");
-        // The various result converter stublets.
-        int is_generated[Interpreter::number_of_result_handlers];
-        memset(is_generated, 0, sizeof(is_generated));
-
-        for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
-          BasicType type = types[i];
-          if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
-            Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
-          }
-        }
-      }
-
-      { CodeletMark cm(_masm, "continuation entry points");
-        Interpreter::_continuation_entry =
-          EntryPoint(
-                     generate_continuation_for(btos),
-                     generate_continuation_for(ctos),
-                     generate_continuation_for(stos),
-                     generate_continuation_for(atos),
-                     generate_continuation_for(itos),
-                     generate_continuation_for(ltos),
-                     generate_continuation_for(ftos),
-                     generate_continuation_for(dtos),
-                     generate_continuation_for(vtos)
-                     );
-      }
-
-      { CodeletMark cm(_masm, "safepoint entry points");
-        Interpreter::_safept_entry =
-          EntryPoint(
-                     generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-                     generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
-                     );
-      }
-
-      { CodeletMark cm(_masm, "exception handling");
-        // (Note: this is not safepoint safe because thread may return to compiled code)
-        generate_throw_exception();
-      }
-
-      { CodeletMark cm(_masm, "throw exception entrypoints");
-        Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
-        Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
-        Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
-        Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
-        Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
-        Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
-      }
-
-
-
-#define method_entry(kind)                                              \
-      { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
-        Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
-      }
-
-      // all non-native method kinds
-      method_entry(zerolocals)
-      method_entry(zerolocals_synchronized)
-      method_entry(empty)
-      method_entry(accessor)
-      method_entry(abstract)
-      method_entry(java_lang_math_sin  )
-      method_entry(java_lang_math_cos  )
-      method_entry(java_lang_math_tan  )
-      method_entry(java_lang_math_abs  )
-      method_entry(java_lang_math_sqrt )
-      method_entry(java_lang_math_log  )
-      method_entry(java_lang_math_log10)
-      method_entry(java_lang_math_exp  )
-      method_entry(java_lang_math_pow  )
-      method_entry(java_lang_ref_reference_get)
-
-      initialize_method_handle_entries();
-
-      // all native method kinds (must be one contiguous block)
-      Interpreter::_native_entry_begin = Interpreter::code()->code_end();
-      method_entry(native)
-      method_entry(native_synchronized)
-      Interpreter::_native_entry_end = Interpreter::code()->code_end();
-
-      if (UseCRC32Intrinsics) {
-        method_entry(java_util_zip_CRC32_update)
-        method_entry(java_util_zip_CRC32_updateBytes)
-        method_entry(java_util_zip_CRC32_updateByteBuffer)
-      }
-
-      if (UseCRC32CIntrinsics) {
-        method_entry(java_util_zip_CRC32C_updateBytes)
-        method_entry(java_util_zip_CRC32C_updateDirectByteBuffer)
-      }
-
-      method_entry(java_lang_Float_intBitsToFloat);
-      method_entry(java_lang_Float_floatToRawIntBits);
-      method_entry(java_lang_Double_longBitsToDouble);
-      method_entry(java_lang_Double_doubleToRawLongBits);
-
-#undef method_entry
-
-      // Bytecodes
-      set_entry_points_for_all_bytes();
-    }
-  } while (CodeCacheExtensions::needs_other_interpreter_variant());
-
-  // installation of code in other places in the runtime
-  // (ExcutableCodeManager calls not needed to copy the entries)
-  set_safepoints_for_all_bytes();
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_error_exit(const char* msg) {
-  address entry = __ pc();
-  __ stop(msg);
-  return entry;
-}
-
-
-//------------------------------------------------------------------------------------------------------------------------
-
-void TemplateInterpreterGenerator::set_entry_points_for_all_bytes() {
-  for (int i = 0; i < DispatchTable::length; i++) {
-    Bytecodes::Code code = (Bytecodes::Code)i;
-    if (Bytecodes::is_defined(code)) {
-      set_entry_points(code);
-    } else {
-      set_unimplemented(i);
-    }
-  }
-}
-
-
-void TemplateInterpreterGenerator::set_safepoints_for_all_bytes() {
-  for (int i = 0; i < DispatchTable::length; i++) {
-    Bytecodes::Code code = (Bytecodes::Code)i;
-    if (Bytecodes::is_defined(code)) Interpreter::_safept_table.set_entry(code, Interpreter::_safept_entry);
-  }
-}
-
-
-void TemplateInterpreterGenerator::set_unimplemented(int i) {
-  address e = _unimplemented_bytecode;
-  EntryPoint entry(e, e, e, e, e, e, e, e, e);
-  Interpreter::_normal_table.set_entry(i, entry);
-  Interpreter::_wentry_point[i] = _unimplemented_bytecode;
-}
-
-
-void TemplateInterpreterGenerator::set_entry_points(Bytecodes::Code code) {
-  if (CodeCacheExtensions::skip_template_interpreter_entries(code)) {
-    return;
-  }
-  CodeletMark cm(_masm, Bytecodes::name(code), code);
-  // initialize entry points
-  assert(_unimplemented_bytecode    != NULL, "should have been generated before");
-  assert(_illegal_bytecode_sequence != NULL, "should have been generated before");
-  address bep = _illegal_bytecode_sequence;
-  address cep = _illegal_bytecode_sequence;
-  address sep = _illegal_bytecode_sequence;
-  address aep = _illegal_bytecode_sequence;
-  address iep = _illegal_bytecode_sequence;
-  address lep = _illegal_bytecode_sequence;
-  address fep = _illegal_bytecode_sequence;
-  address dep = _illegal_bytecode_sequence;
-  address vep = _unimplemented_bytecode;
-  address wep = _unimplemented_bytecode;
-  // code for short & wide version of bytecode
-  if (Bytecodes::is_defined(code)) {
-    Template* t = TemplateTable::template_for(code);
-    assert(t->is_valid(), "just checking");
-    set_short_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);
-  }
-  if (Bytecodes::wide_is_defined(code)) {
-    Template* t = TemplateTable::template_for_wide(code);
-    assert(t->is_valid(), "just checking");
-    set_wide_entry_point(t, wep);
-  }
-  // set entry points
-  EntryPoint entry(bep, cep, sep, aep, iep, lep, fep, dep, vep);
-  Interpreter::_normal_table.set_entry(code, entry);
-  Interpreter::_wentry_point[code] = wep;
-  CodeCacheExtensions::completed_template_interpreter_entries(_masm, code);
-}
-
-
-void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
-  assert(t->is_valid(), "template must exist");
-  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions");
-  wep = __ pc(); generate_and_dispatch(t);
-}
-
-
-void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
-  assert(t->is_valid(), "template must exist");
-  switch (t->tos_in()) {
-    case btos:
-    case ctos:
-    case stos:
-      ShouldNotReachHere();  // btos/ctos/stos should use itos.
-      break;
-    case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
-    case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
-    case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
-    case ftos: vep = __ pc(); __ pop(ftos); fep = __ pc(); generate_and_dispatch(t); break;
-    case dtos: vep = __ pc(); __ pop(dtos); dep = __ pc(); generate_and_dispatch(t); break;
-    case vtos: set_vtos_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);     break;
-    default  : ShouldNotReachHere();                                                 break;
-  }
-}
-
-
-//------------------------------------------------------------------------------------------------------------------------
-
-void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
-  if (PrintBytecodeHistogram)                                    histogram_bytecode(t);
-#ifndef PRODUCT
-  // debugging code
-  if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
-  if (PrintBytecodePairHistogram)                                histogram_bytecode_pair(t);
-  if (TraceBytecodes)                                            trace_bytecode(t);
-  if (StopInterpreterAt > 0)                                     stop_interpreter_at();
-  __ verify_FPU(1, t->tos_in());
-#endif // !PRODUCT
-  int step = 0;
-  if (!t->does_dispatch()) {
-    step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
-    if (tos_out == ilgl) tos_out = t->tos_out();
-    // compute bytecode size
-    assert(step > 0, "just checkin'");
-    // setup stuff for dispatching next bytecode
-    if (ProfileInterpreter && VerifyDataPointer
-        && MethodData::bytecode_has_profile(t->bytecode())) {
-      __ verify_method_data_pointer();
-    }
-    __ dispatch_prolog(tos_out, step);
-  }
-  // generate template
-  t->generate(_masm);
-  // advance
-  if (t->does_dispatch()) {
-#ifdef ASSERT
-    // make sure execution doesn't go beyond this point if code is broken
-    __ should_not_reach_here();
-#endif // ASSERT
-  } else {
-    // dispatch to next bytecode
-    __ dispatch_epilog(tos_out, step);
-  }
-}
 
 //------------------------------------------------------------------------------------------------------------------------
 // Entry points
@@ -724,85 +355,4 @@
   return (InterpreterCodelet*)_code->stub_containing(pc);
 }
 
-// Generate method entries
-address TemplateInterpreterGenerator::generate_method_entry(
-                                        AbstractInterpreter::MethodKind kind) {
-  // determine code generation flags
-  bool native = false;
-  bool synchronized = false;
-  address entry_point = NULL;
-
-  switch (kind) {
-  case Interpreter::zerolocals             :                                          break;
-  case Interpreter::zerolocals_synchronized:                synchronized = true;      break;
-  case Interpreter::native                 : native = true;                           break;
-  case Interpreter::native_synchronized    : native = true; synchronized = true;      break;
-  case Interpreter::empty                  : break;
-  case Interpreter::accessor               : break;
-  case Interpreter::abstract               : entry_point = generate_abstract_entry(); break;
-
-  case Interpreter::java_lang_math_sin     : // fall thru
-  case Interpreter::java_lang_math_cos     : // fall thru
-  case Interpreter::java_lang_math_tan     : // fall thru
-  case Interpreter::java_lang_math_abs     : // fall thru
-  case Interpreter::java_lang_math_log     : // fall thru
-  case Interpreter::java_lang_math_log10   : // fall thru
-  case Interpreter::java_lang_math_sqrt    : // fall thru
-  case Interpreter::java_lang_math_pow     : // fall thru
-  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
-  case Interpreter::java_lang_ref_reference_get
-                                           : entry_point = generate_Reference_get_entry(); break;
-  case Interpreter::java_util_zip_CRC32_update
-                                           : native = true; entry_point = generate_CRC32_update_entry();  break;
-  case Interpreter::java_util_zip_CRC32_updateBytes
-                                           : // fall thru
-  case Interpreter::java_util_zip_CRC32_updateByteBuffer
-                                           : native = true; entry_point = generate_CRC32_updateBytes_entry(kind); break;
-  case Interpreter::java_util_zip_CRC32C_updateBytes
-                                           : // fall thru
-  case Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer
-                                           : entry_point = generate_CRC32C_updateBytes_entry(kind); break;
-#ifdef IA32
-  // On x86_32 platforms, a special entry is generated for the following four methods.
-  // On other platforms the normal entry is used to enter these methods.
-  case Interpreter::java_lang_Float_intBitsToFloat
-                                           : native = true; entry_point = generate_Float_intBitsToFloat_entry(); break;
-  case Interpreter::java_lang_Float_floatToRawIntBits
-                                           : native = true; entry_point = generate_Float_floatToRawIntBits_entry(); break;
-  case Interpreter::java_lang_Double_longBitsToDouble
-                                           : native = true; entry_point = generate_Double_longBitsToDouble_entry(); break;
-  case Interpreter::java_lang_Double_doubleToRawLongBits
-                                           : native = true; entry_point = generate_Double_doubleToRawLongBits_entry(); break;
-#else
-  case Interpreter::java_lang_Float_intBitsToFloat:
-  case Interpreter::java_lang_Float_floatToRawIntBits:
-  case Interpreter::java_lang_Double_longBitsToDouble:
-  case Interpreter::java_lang_Double_doubleToRawLongBits:
-    native = true;
-    break;
-#endif // defined(TARGET_ARCH_x86) && !defined(_LP64)
-  default:
-    fatal("unexpected method kind: %d", kind);
-    break;
-  }
-
-  if (entry_point) {
-    return entry_point;
-  }
-
-  // We expect the normal and native entry points to be generated first so we can reuse them.
-  if (native) {
-    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
-    if (entry_point == NULL) {
-      entry_point = generate_native_entry(synchronized);
-    }
-  } else {
-    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
-    if (entry_point == NULL) {
-      entry_point = generate_normal_entry(synchronized);
-    }
-  }
-
-  return entry_point;
-}
 #endif // !CC_INTERP
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -193,13 +193,6 @@
 
   // Size of interpreter code.  Max size with JVMTI
   static int InterpreterCodeSize;
-
-#ifdef PPC
- public:
-  // PPC-only: Support abs and sqrt like in compiler.
-  // For others we can use a normal (native) entry.
-  static bool math_entry_available(AbstractInterpreter::MethodKind kind);
-#endif
 };
 
 #endif // !CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCacheExtensions.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreter.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
+#include "interpreter/templateTable.hpp"
+
+#ifndef CC_INTERP
+
+# define __ _masm->
+
+TemplateInterpreterGenerator::TemplateInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
+  _unimplemented_bytecode    = NULL;
+  _illegal_bytecode_sequence = NULL;
+  generate_all();
+}
+
+static const BasicType types[Interpreter::number_of_result_handlers] = {
+  T_BOOLEAN,
+  T_CHAR   ,
+  T_BYTE   ,
+  T_SHORT  ,
+  T_INT    ,
+  T_LONG   ,
+  T_VOID   ,
+  T_FLOAT  ,
+  T_DOUBLE ,
+  T_OBJECT
+};
+
+void TemplateInterpreterGenerator::generate_all() {
+  // Loop, in case we need several variants of the interpreter entries
+  do {
+    if (!CodeCacheExtensions::skip_code_generation()) {
+      // bypass code generation when useless
+      { CodeletMark cm(_masm, "slow signature handler");
+        AbstractInterpreter::_slow_signature_handler = generate_slow_signature_handler();
+      }
+
+      { CodeletMark cm(_masm, "error exits");
+        _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
+        _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
+      }
+
+#ifndef PRODUCT
+      if (TraceBytecodes) {
+        CodeletMark cm(_masm, "bytecode tracing support");
+        Interpreter::_trace_code =
+          EntryPoint(
+                     generate_trace_code(btos),
+                     generate_trace_code(ctos),
+                     generate_trace_code(stos),
+                     generate_trace_code(atos),
+                     generate_trace_code(itos),
+                     generate_trace_code(ltos),
+                     generate_trace_code(ftos),
+                     generate_trace_code(dtos),
+                     generate_trace_code(vtos)
+                     );
+      }
+#endif // !PRODUCT
+
+      { CodeletMark cm(_masm, "return entry points");
+        const int index_size = sizeof(u2);
+        for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
+          Interpreter::_return_entry[i] =
+            EntryPoint(
+                       generate_return_entry_for(itos, i, index_size),
+                       generate_return_entry_for(itos, i, index_size),
+                       generate_return_entry_for(itos, i, index_size),
+                       generate_return_entry_for(atos, i, index_size),
+                       generate_return_entry_for(itos, i, index_size),
+                       generate_return_entry_for(ltos, i, index_size),
+                       generate_return_entry_for(ftos, i, index_size),
+                       generate_return_entry_for(dtos, i, index_size),
+                       generate_return_entry_for(vtos, i, index_size)
+                       );
+        }
+      }
+
+      { CodeletMark cm(_masm, "invoke return entry points");
+        const TosState states[] = {itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos};
+        const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic);
+        const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface);
+        const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic);
+
+        for (int i = 0; i < Interpreter::number_of_return_addrs; i++) {
+          TosState state = states[i];
+          Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2));
+          Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2));
+          Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4));
+        }
+      }
+
+      { CodeletMark cm(_masm, "earlyret entry points");
+        Interpreter::_earlyret_entry =
+          EntryPoint(
+                     generate_earlyret_entry_for(btos),
+                     generate_earlyret_entry_for(ctos),
+                     generate_earlyret_entry_for(stos),
+                     generate_earlyret_entry_for(atos),
+                     generate_earlyret_entry_for(itos),
+                     generate_earlyret_entry_for(ltos),
+                     generate_earlyret_entry_for(ftos),
+                     generate_earlyret_entry_for(dtos),
+                     generate_earlyret_entry_for(vtos)
+                     );
+      }
+
+      { CodeletMark cm(_masm, "deoptimization entry points");
+        for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
+          Interpreter::_deopt_entry[i] =
+            EntryPoint(
+                       generate_deopt_entry_for(itos, i),
+                       generate_deopt_entry_for(itos, i),
+                       generate_deopt_entry_for(itos, i),
+                       generate_deopt_entry_for(atos, i),
+                       generate_deopt_entry_for(itos, i),
+                       generate_deopt_entry_for(ltos, i),
+                       generate_deopt_entry_for(ftos, i),
+                       generate_deopt_entry_for(dtos, i),
+                       generate_deopt_entry_for(vtos, i)
+                       );
+        }
+      }
+
+      { CodeletMark cm(_masm, "result handlers for native calls");
+        // The various result converter stublets.
+        int is_generated[Interpreter::number_of_result_handlers];
+        memset(is_generated, 0, sizeof(is_generated));
+
+        for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
+          BasicType type = types[i];
+          if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
+            Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
+          }
+        }
+      }
+
+      { CodeletMark cm(_masm, "continuation entry points");
+        Interpreter::_continuation_entry =
+          EntryPoint(
+                     generate_continuation_for(btos),
+                     generate_continuation_for(ctos),
+                     generate_continuation_for(stos),
+                     generate_continuation_for(atos),
+                     generate_continuation_for(itos),
+                     generate_continuation_for(ltos),
+                     generate_continuation_for(ftos),
+                     generate_continuation_for(dtos),
+                     generate_continuation_for(vtos)
+                     );
+      }
+
+      { CodeletMark cm(_masm, "safepoint entry points");
+        Interpreter::_safept_entry =
+          EntryPoint(
+                     generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+                     generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
+                     );
+      }
+
+      { CodeletMark cm(_masm, "exception handling");
+        // (Note: this is not safepoint safe because thread may return to compiled code)
+        generate_throw_exception();
+      }
+
+      { CodeletMark cm(_masm, "throw exception entrypoints");
+        Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
+        Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
+        Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
+        Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
+        Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
+        Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
+      }
+
+
+
+#define method_entry(kind)                                              \
+      { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
+        Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
+      }
+
+      // all non-native method kinds
+      method_entry(zerolocals)
+      method_entry(zerolocals_synchronized)
+      method_entry(empty)
+      method_entry(accessor)
+      method_entry(abstract)
+      method_entry(java_lang_math_sin  )
+      method_entry(java_lang_math_cos  )
+      method_entry(java_lang_math_tan  )
+      method_entry(java_lang_math_abs  )
+      method_entry(java_lang_math_sqrt )
+      method_entry(java_lang_math_log  )
+      method_entry(java_lang_math_log10)
+      method_entry(java_lang_math_exp  )
+      method_entry(java_lang_math_pow  )
+      method_entry(java_lang_ref_reference_get)
+
+      AbstractInterpreter::initialize_method_handle_entries();
+
+      // all native method kinds (must be one contiguous block)
+      Interpreter::_native_entry_begin = Interpreter::code()->code_end();
+      method_entry(native)
+      method_entry(native_synchronized)
+      Interpreter::_native_entry_end = Interpreter::code()->code_end();
+
+      if (UseCRC32Intrinsics) {
+        method_entry(java_util_zip_CRC32_update)
+        method_entry(java_util_zip_CRC32_updateBytes)
+        method_entry(java_util_zip_CRC32_updateByteBuffer)
+      }
+
+      if (UseCRC32CIntrinsics) {
+        method_entry(java_util_zip_CRC32C_updateBytes)
+        method_entry(java_util_zip_CRC32C_updateDirectByteBuffer)
+      }
+
+      method_entry(java_lang_Float_intBitsToFloat);
+      method_entry(java_lang_Float_floatToRawIntBits);
+      method_entry(java_lang_Double_longBitsToDouble);
+      method_entry(java_lang_Double_doubleToRawLongBits);
+
+#undef method_entry
+
+      // Bytecodes
+      set_entry_points_for_all_bytes();
+    }
+  } while (CodeCacheExtensions::needs_other_interpreter_variant());
+
+  // installation of code in other places in the runtime
+  // (ExcutableCodeManager calls not needed to copy the entries)
+  set_safepoints_for_all_bytes();
+}
+
+//------------------------------------------------------------------------------------------------------------------------
+
+address TemplateInterpreterGenerator::generate_error_exit(const char* msg) {
+  address entry = __ pc();
+  __ stop(msg);
+  return entry;
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+
+void TemplateInterpreterGenerator::set_entry_points_for_all_bytes() {
+  for (int i = 0; i < DispatchTable::length; i++) {
+    Bytecodes::Code code = (Bytecodes::Code)i;
+    if (Bytecodes::is_defined(code)) {
+      set_entry_points(code);
+    } else {
+      set_unimplemented(i);
+    }
+  }
+}
+
+
+void TemplateInterpreterGenerator::set_safepoints_for_all_bytes() {
+  for (int i = 0; i < DispatchTable::length; i++) {
+    Bytecodes::Code code = (Bytecodes::Code)i;
+    if (Bytecodes::is_defined(code)) Interpreter::_safept_table.set_entry(code, Interpreter::_safept_entry);
+  }
+}
+
+
+void TemplateInterpreterGenerator::set_unimplemented(int i) {
+  address e = _unimplemented_bytecode;
+  EntryPoint entry(e, e, e, e, e, e, e, e, e);
+  Interpreter::_normal_table.set_entry(i, entry);
+  Interpreter::_wentry_point[i] = _unimplemented_bytecode;
+}
+
+
+void TemplateInterpreterGenerator::set_entry_points(Bytecodes::Code code) {
+  if (CodeCacheExtensions::skip_template_interpreter_entries(code)) {
+    return;
+  }
+  CodeletMark cm(_masm, Bytecodes::name(code), code);
+  // initialize entry points
+  assert(_unimplemented_bytecode    != NULL, "should have been generated before");
+  assert(_illegal_bytecode_sequence != NULL, "should have been generated before");
+  address bep = _illegal_bytecode_sequence;
+  address cep = _illegal_bytecode_sequence;
+  address sep = _illegal_bytecode_sequence;
+  address aep = _illegal_bytecode_sequence;
+  address iep = _illegal_bytecode_sequence;
+  address lep = _illegal_bytecode_sequence;
+  address fep = _illegal_bytecode_sequence;
+  address dep = _illegal_bytecode_sequence;
+  address vep = _unimplemented_bytecode;
+  address wep = _unimplemented_bytecode;
+  // code for short & wide version of bytecode
+  if (Bytecodes::is_defined(code)) {
+    Template* t = TemplateTable::template_for(code);
+    assert(t->is_valid(), "just checking");
+    set_short_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);
+  }
+  if (Bytecodes::wide_is_defined(code)) {
+    Template* t = TemplateTable::template_for_wide(code);
+    assert(t->is_valid(), "just checking");
+    set_wide_entry_point(t, wep);
+  }
+  // set entry points
+  EntryPoint entry(bep, cep, sep, aep, iep, lep, fep, dep, vep);
+  Interpreter::_normal_table.set_entry(code, entry);
+  Interpreter::_wentry_point[code] = wep;
+  CodeCacheExtensions::completed_template_interpreter_entries(_masm, code);
+}
+
+
+void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
+  assert(t->is_valid(), "template must exist");
+  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions");
+  wep = __ pc(); generate_and_dispatch(t);
+}
+
+
+void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
+  assert(t->is_valid(), "template must exist");
+  switch (t->tos_in()) {
+    case btos:
+    case ctos:
+    case stos:
+      ShouldNotReachHere();  // btos/ctos/stos should use itos.
+      break;
+    case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
+    case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
+    case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
+    case ftos: vep = __ pc(); __ pop(ftos); fep = __ pc(); generate_and_dispatch(t); break;
+    case dtos: vep = __ pc(); __ pop(dtos); dep = __ pc(); generate_and_dispatch(t); break;
+    case vtos: set_vtos_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);     break;
+    default  : ShouldNotReachHere();                                                 break;
+  }
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+
+void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
+  if (PrintBytecodeHistogram)                                    histogram_bytecode(t);
+#ifndef PRODUCT
+  // debugging code
+  if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
+  if (PrintBytecodePairHistogram)                                histogram_bytecode_pair(t);
+  if (TraceBytecodes)                                            trace_bytecode(t);
+  if (StopInterpreterAt > 0)                                     stop_interpreter_at();
+  __ verify_FPU(1, t->tos_in());
+#endif // !PRODUCT
+  int step = 0;
+  if (!t->does_dispatch()) {
+    step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
+    if (tos_out == ilgl) tos_out = t->tos_out();
+    // compute bytecode size
+    assert(step > 0, "just checkin'");
+    // setup stuff for dispatching next bytecode
+    if (ProfileInterpreter && VerifyDataPointer
+        && MethodData::bytecode_has_profile(t->bytecode())) {
+      __ verify_method_data_pointer();
+    }
+    __ dispatch_prolog(tos_out, step);
+  }
+  // generate template
+  t->generate(_masm);
+  // advance
+  if (t->does_dispatch()) {
+#ifdef ASSERT
+    // make sure execution doesn't go beyond this point if code is broken
+    __ should_not_reach_here();
+#endif // ASSERT
+  } else {
+    // dispatch to next bytecode
+    __ dispatch_epilog(tos_out, step);
+  }
+}
+
+// Generate method entries
+address TemplateInterpreterGenerator::generate_method_entry(
+                                        AbstractInterpreter::MethodKind kind) {
+  // determine code generation flags
+  bool native = false;
+  bool synchronized = false;
+  address entry_point = NULL;
+
+  switch (kind) {
+  case Interpreter::zerolocals             :                                          break;
+  case Interpreter::zerolocals_synchronized:                synchronized = true;      break;
+  case Interpreter::native                 : native = true;                           break;
+  case Interpreter::native_synchronized    : native = true; synchronized = true;      break;
+  case Interpreter::empty                  : break;
+  case Interpreter::accessor               : break;
+  case Interpreter::abstract               : entry_point = generate_abstract_entry(); break;
+
+  case Interpreter::java_lang_math_sin     : // fall thru
+  case Interpreter::java_lang_math_cos     : // fall thru
+  case Interpreter::java_lang_math_tan     : // fall thru
+  case Interpreter::java_lang_math_abs     : // fall thru
+  case Interpreter::java_lang_math_log     : // fall thru
+  case Interpreter::java_lang_math_log10   : // fall thru
+  case Interpreter::java_lang_math_sqrt    : // fall thru
+  case Interpreter::java_lang_math_pow     : // fall thru
+  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
+  case Interpreter::java_lang_ref_reference_get
+                                           : entry_point = generate_Reference_get_entry(); break;
+  case Interpreter::java_util_zip_CRC32_update
+                                           : native = true; entry_point = generate_CRC32_update_entry();  break;
+  case Interpreter::java_util_zip_CRC32_updateBytes
+                                           : // fall thru
+  case Interpreter::java_util_zip_CRC32_updateByteBuffer
+                                           : native = true; entry_point = generate_CRC32_updateBytes_entry(kind); break;
+  case Interpreter::java_util_zip_CRC32C_updateBytes
+                                           : // fall thru
+  case Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer
+                                           : entry_point = generate_CRC32C_updateBytes_entry(kind); break;
+#ifdef IA32
+  // On x86_32 platforms, a special entry is generated for the following four methods.
+  // On other platforms the normal entry is used to enter these methods.
+  case Interpreter::java_lang_Float_intBitsToFloat
+                                           : native = true; entry_point = generate_Float_intBitsToFloat_entry(); break;
+  case Interpreter::java_lang_Float_floatToRawIntBits
+                                           : native = true; entry_point = generate_Float_floatToRawIntBits_entry(); break;
+  case Interpreter::java_lang_Double_longBitsToDouble
+                                           : native = true; entry_point = generate_Double_longBitsToDouble_entry(); break;
+  case Interpreter::java_lang_Double_doubleToRawLongBits
+                                           : native = true; entry_point = generate_Double_doubleToRawLongBits_entry(); break;
+#else
+  case Interpreter::java_lang_Float_intBitsToFloat:
+  case Interpreter::java_lang_Float_floatToRawIntBits:
+  case Interpreter::java_lang_Double_longBitsToDouble:
+  case Interpreter::java_lang_Double_doubleToRawLongBits:
+    native = true;
+    break;
+#endif // defined(TARGET_ARCH_x86) && !defined(_LP64)
+  default:
+    fatal("unexpected method kind: %d", kind);
+    break;
+  }
+
+  if (entry_point) {
+    return entry_point;
+  }
+
+  // We expect the normal and native entry points to be generated first so we can reuse them.
+  if (native) {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
+    if (entry_point == NULL) {
+      entry_point = generate_native_entry(synchronized);
+    }
+  } else {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
+    if (entry_point == NULL) {
+      entry_point = generate_normal_entry(synchronized);
+    }
+  }
+
+  return entry_point;
+}
+#endif // !CC_INTERP
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,6 +61,8 @@
 
   void lock_method();
 
+  void bang_stack_shadow_pages(bool native_call);
+
   // Instruction generation
   void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
   void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
@@ -113,7 +115,6 @@
 #endif // SPARC
 
 #ifdef AARCH64
-  void bang_stack_shadow_pages(bool native_call);
   void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
 #endif // AARCH64
 
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -385,7 +385,7 @@
     return;
   } else {
     // Touch pages checked if the OS needs them to be touched to be mapped.
-    os::bang_stack_shadow_pages();
+    os::map_stack_shadow_pages();
   }
 
 #if INCLUDE_JVMCI
--- a/hotspot/src/share/vm/runtime/os.hpp	Tue Jan 12 16:01:54 2016 +0100
+++ b/hotspot/src/share/vm/runtime/os.hpp	Tue Jan 12 13:14:41 2016 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -262,7 +262,7 @@
   // pages for stack overflow checking.
   static bool uses_stack_guard_pages();
   static bool allocate_stack_guard_pages();
-  static void bang_stack_shadow_pages();
+  static void map_stack_shadow_pages();
   static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method);
 
   // OS interface to Virtual Memory