8074457: Remove the non-Zero CPP Interpreter
authorcoleenp
Tue, 22 Dec 2015 11:11:29 -0500
changeset 35214 d86005e0b4c2
parent 35211 3771329165d4
child 35215 f9536fc8548c
8074457: Remove the non-Zero CPP Interpreter Summary: Remove cppInterpreter assembly files and reorganize InterpreterGenerator includes Reviewed-by: goetz, bdelsart
hotspot/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.cpp
hotspot/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp
hotspot/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.inline.hpp
hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp
hotspot/src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp
hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp
hotspot/src/cpu/aarch64/vm/frame_aarch64.hpp
hotspot/src/cpu/aarch64/vm/frame_aarch64.inline.hpp
hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp
hotspot/src/cpu/aarch64/vm/interpreter_aarch64.hpp
hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.hpp
hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.hpp
hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp
hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp
hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp
hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp
hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp
hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp
hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp
hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.hpp
hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.inline.hpp
hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
hotspot/src/cpu/sparc/vm/cppInterpreterGenerator_sparc.hpp
hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.hpp
hotspot/src/cpu/sparc/vm/frame_sparc.cpp
hotspot/src/cpu/sparc/vm/frame_sparc.hpp
hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp
hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp
hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/interpreter_sparc.hpp
hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp
hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.hpp
hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.hpp
hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp
hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.hpp
hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp
hotspot/src/cpu/x86/vm/c2_globals_x86.hpp
hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp
hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp
hotspot/src/cpu/x86/vm/cppInterpreter_x86.hpp
hotspot/src/cpu/x86/vm/frame_x86.cpp
hotspot/src/cpu/x86/vm/frame_x86.hpp
hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
hotspot/src/cpu/x86/vm/interp_masm_x86.cpp
hotspot/src/cpu/x86/vm/interp_masm_x86.hpp
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp
hotspot/src/cpu/x86/vm/interpreter_x86.hpp
hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp
hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.hpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp
hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp
hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp
hotspot/src/cpu/x86/vm/templateTable_x86.cpp
hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp
hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
hotspot/src/cpu/zero/vm/frame_zero.inline.hpp
hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp
hotspot/src/cpu/zero/vm/interpreter_zero.cpp
hotspot/src/cpu/zero/vm/interpreter_zero.hpp
hotspot/src/cpu/zero/vm/methodHandles_zero.cpp
hotspot/src/cpu/zero/vm/templateInterpreterGenerator_zero.hpp
hotspot/src/cpu/zero/vm/templateInterpreter_zero.cpp
hotspot/src/cpu/zero/vm/templateInterpreter_zero.hpp
hotspot/src/cpu/zero/vm/templateTable_zero.cpp
hotspot/src/cpu/zero/vm/templateTable_zero.hpp
hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
hotspot/src/share/vm/interpreter/bytecodeHistogram.hpp
hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp
hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp
hotspot/src/share/vm/interpreter/cppInterpreter.cpp
hotspot/src/share/vm/interpreter/cppInterpreter.hpp
hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp
hotspot/src/share/vm/interpreter/interpreter.cpp
hotspot/src/share/vm/interpreter/interpreter.hpp
hotspot/src/share/vm/interpreter/interpreterGenerator.hpp
hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
hotspot/src/share/vm/interpreter/templateInterpreter.cpp
hotspot/src/share/vm/interpreter/templateInterpreter.hpp
hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp
hotspot/src/share/vm/interpreter/templateTable.hpp
hotspot/src/share/vm/prims/methodHandles.hpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/frame.inline.hpp
hotspot/src/share/vm/runtime/javaFrameAnchor.hpp
--- a/hotspot/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "interpreter/bytecodeInterpreter.hpp"
-#include "interpreter/bytecodeInterpreter.inline.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include "interp_masm_aarch64.hpp"
-
-#ifdef CC_INTERP
-
-#endif // CC_INTERP (all)
--- a/hotspot/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_AARCH64_VM_BYTECODEINTERPRETER_AARCH64_HPP
-#define CPU_AARCH64_VM_BYTECODEINTERPRETER_AARCH64_HPP
-
-// Platform specific for C++ based Interpreter
-
-private:
-
-    interpreterState _self_link;          /*  Previous interpreter state  */ /* sometimes points to self??? */
-    address   _result_handler;            /* temp for saving native result handler */
-    intptr_t* _sender_sp;                 /* sender's sp before stack (locals) extension */
-
-    address   _extra_junk1;               /* temp to save on recompiles */
-    address   _extra_junk2;               /* temp to save on recompiles */
-    address   _extra_junk3;               /* temp to save on recompiles */
-    // address dummy_for_native2;         /* a native frame result handler would be here... */
-    // address dummy_for_native1;         /* native result type stored here in a interpreter native frame */
-    address   _extra_junk4;               /* temp to save on recompiles */
-    address   _extra_junk5;               /* temp to save on recompiles */
-    address   _extra_junk6;               /* temp to save on recompiles */
-public:
-                                                         // we have an interpreter frame...
-inline intptr_t* sender_sp() {
-  return _sender_sp;
-}
-
-// The interpreter always has the frame anchor fully setup so we don't
-// have to do anything going to vm from the interpreter. On return
-// we do have to clear the flags in case they we're modified to
-// maintain the stack walking invariants.
-//
-#define SET_LAST_JAVA_FRAME()
-
-#define RESET_LAST_JAVA_FRAME()
-
-/*
- * Macros for accessing the stack.
- */
-#undef STACK_INT
-#undef STACK_FLOAT
-#undef STACK_ADDR
-#undef STACK_OBJECT
-#undef STACK_DOUBLE
-#undef STACK_LONG
-
-// JavaStack Implementation
-
-#define GET_STACK_SLOT(offset)    (*((intptr_t*) &topOfStack[-(offset)]))
-#define STACK_SLOT(offset)    ((address) &topOfStack[-(offset)])
-#define STACK_ADDR(offset)    (*((address *) &topOfStack[-(offset)]))
-#define STACK_INT(offset)     (*((jint*) &topOfStack[-(offset)]))
-#define STACK_FLOAT(offset)   (*((jfloat *) &topOfStack[-(offset)]))
-#define STACK_OBJECT(offset)  (*((oop *) &topOfStack [-(offset)]))
-#define STACK_DOUBLE(offset)  (((VMJavaVal64*) &topOfStack[-(offset)])->d)
-#define STACK_LONG(offset)    (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
-
-#define SET_STACK_SLOT(value, offset)   (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
-#define SET_STACK_ADDR(value, offset)   (*((address *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_INT(value, offset)    (*((jint *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_FLOAT(value, offset)  (*((jfloat *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
-#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d =  \
-                                                 ((VMJavaVal64*)(addr))->d)
-#define SET_STACK_LONG(value, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
-#define SET_STACK_LONG_FROM_ADDR(addr, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l =  \
-                                                 ((VMJavaVal64*)(addr))->l)
-// JavaLocals implementation
-
-#define LOCALS_SLOT(offset)    ((intptr_t*)&locals[-(offset)])
-#define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
-#define LOCALS_INT(offset)     ((jint)(locals[-(offset)]))
-#define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
-#define LOCALS_OBJECT(offset)  ((oop)locals[-(offset)])
-#define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
-#define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
-#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
-#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
-
-#define SET_LOCALS_SLOT(value, offset)    (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
-#define SET_LOCALS_ADDR(value, offset)    (*((address *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_INT(value, offset)     (*((jint *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_FLOAT(value, offset)   (*((jfloat *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_OBJECT(value, offset)  (*((oop *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_DOUBLE(value, offset)  (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
-#define SET_LOCALS_LONG(value, offset)    (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
-#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
-                                                  ((VMJavaVal64*)(addr))->d)
-#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
-                                                ((VMJavaVal64*)(addr))->l)
-
-#endif // CPU_AARCH64_VM_BYTECODEINTERPRETER_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,286 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_AARCH64_VM_BYTECODEINTERPRETER_AARCH64_INLINE_HPP
-#define CPU_AARCH64_VM_BYTECODEINTERPRETER_AARCH64_INLINE_HPP
-
-// Inline interpreter functions for IA32
-
-inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
-inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
-inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
-inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
-inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return fmod(op1, op2); }
-
-inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
-
-inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
-  return ( op1 < op2 ? -1 :
-               op1 > op2 ? 1 :
-                   op1 == op2 ? 0 :
-                       (direction == -1 || direction == 1) ? direction : 0);
-
-}
-
-inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
-  // x86 can do unaligned copies but not 64bits at a time
-  to[0] = from[0]; to[1] = from[1];
-}
-
-// The long operations depend on compiler support for "long long" on x86
-
-inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
-  return op1 + op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
-  return op1 & op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
-  // QQQ what about check and throw...
-  return op1 / op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
-  return op1 * op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
-  return op1 | op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
-  return op1 - op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
-  return op1 ^ op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
-  return op1 % op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
-  // CVM did this 0x3f mask, is the really needed??? QQQ
-  return ((unsigned long long) op1) >> (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
-  return op1 >> (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
-  return op1 << (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
-  return -op;
-}
-
-inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
-  return ~op;
-}
-
-inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
-  return (op <= 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
-  return (op >= 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
-  return (op == 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
-  return (op1 == op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
-  return (op1 != op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
-  return (op1 >= op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
-  return (op1 <= op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
-  return (op1 < op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
-  return (op1 > op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
-  return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
-}
-
-// Long conversions
-
-inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
-  return (jdouble) val;
-}
-
-inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
-  return (jfloat) val;
-}
-
-inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
-  return (jint) val;
-}
-
-// Double Arithmetic
-
-inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
-  return op1 + op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
-  // Divide by zero... QQQ
-  return op1 / op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
-  return op1 * op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
-  return -op;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
-  return fmod(op1, op2);
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
-  return op1 - op2;
-}
-
-inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
-  return ( op1 < op2 ? -1 :
-               op1 > op2 ? 1 :
-                   op1 == op2 ? 0 :
-                       (direction == -1 || direction == 1) ? direction : 0);
-}
-
-// Double Conversions
-
-inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
-  return (jfloat) val;
-}
-
-// Float Conversions
-
-inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
-  return (jdouble) op;
-}
-
-// Integer Arithmetic
-
-inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
-  return op1 + op2;
-}
-
-inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
-  return op1 & op2;
-}
-
-inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
-  /* it's possible we could catch this special case implicitly */
-  if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
-  else return op1 / op2;
-}
-
-inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
-  return op1 * op2;
-}
-
-inline jint BytecodeInterpreter::VMintNeg(jint op) {
-  return -op;
-}
-
-inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
-  return op1 | op2;
-}
-
-inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
-  /* it's possible we could catch this special case implicitly */
-  if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
-  else return op1 % op2;
-}
-
-inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
-  return op1 << op2;
-}
-
-inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
-  return op1 >> (op2 & 0x1f);
-}
-
-inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
-  return op1 - op2;
-}
-
-inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
-  return ((juint) op1) >> (op2 & 0x1f);
-}
-
-inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
-  return op1 ^ op2;
-}
-
-inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
-  return (jdouble) val;
-}
-
-inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
-  return (jfloat) val;
-}
-
-inline jlong BytecodeInterpreter::VMint2Long(jint val) {
-  return (jlong) val;
-}
-
-inline jchar BytecodeInterpreter::VMint2Char(jint val) {
-  return (jchar) val;
-}
-
-inline jshort BytecodeInterpreter::VMint2Short(jint val) {
-  return (jshort) val;
-}
-
-inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
-  return (jbyte) val;
-}
-
-#endif // CPU_AARCH64_VM_BYTECODEINTERPRETER_AARCH64_INLINE_HPP
--- a/hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -40,11 +40,7 @@
 define_pd_global(bool, PreferInterpreterNativeStubs, false);
 define_pd_global(bool, ProfileTraps,                 true);
 define_pd_global(bool, UseOnStackReplacement,        true);
-#ifdef CC_INTERP
-define_pd_global(bool, ProfileInterpreter,           false);
-#else
 define_pd_global(bool, ProfileInterpreter,           true);
-#endif // CC_INTERP
 define_pd_global(bool, TieredCompilation,            trueInTiered);
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            100000);
--- a/hotspot/src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_AARCH64_VM_CPPINTERPRETERGENERATOR_AARCH64_HPP
-#define CPU_AARCH64_VM_CPPINTERPRETERGENERATOR_AARCH64_HPP
-
- protected:
-
-  void generate_more_monitors();
-  void generate_deopt_handling();
-  void lock_method(void);
-
-#endif // CPU_AARCH64_VM_CPPINTERPRETERGENERATOR_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -314,27 +314,6 @@
 }
 
 // sender_sp
-#ifdef CC_INTERP
-intptr_t* frame::interpreter_frame_sender_sp() const {
-  assert(is_interpreted_frame(), "interpreted frame expected");
-  // QQQ why does this specialize method exist if frame::sender_sp() does same thing?
-  // seems odd and if we always know interpreted vs. non then sender_sp() is really
-  // doing too much work.
-  return get_interpreterState()->sender_sp();
-}
-
-// monitor elements
-
-BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
-  return get_interpreterState()->monitor_base();
-}
-
-BasicObjectLock* frame::interpreter_frame_monitor_end() const {
-  return (BasicObjectLock*) get_interpreterState()->stack_base();
-}
-
-#else // CC_INTERP
-
 intptr_t* frame::interpreter_frame_sender_sp() const {
   assert(is_interpreted_frame(), "interpreted frame expected");
   return (intptr_t*) at(interpreter_frame_sender_sp_offset);
@@ -368,7 +347,6 @@
 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
     *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
 }
-#endif // CC_INTERP
 
 frame frame::sender_for_entry_frame(RegisterMap* map) const {
   assert(map != NULL, "map must be set");
@@ -528,9 +506,6 @@
 }
 
 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
-// QQQ
-#ifdef CC_INTERP
-#else
   assert(is_interpreted_frame(), "Not an interpreted frame");
   // These are reasonable sanity checks
   if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
@@ -584,17 +559,10 @@
   if (locals > thread->stack_base() || locals < (address) fp()) return false;
 
   // We'd have to be pretty unlucky to be mislead at this point
-
-#endif // CC_INTERP
   return true;
 }
 
 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
-#ifdef CC_INTERP
-  // Needed for JVMTI. The result should always be in the
-  // interpreterState object
-  interpreterState istate = get_interpreterState();
-#endif // CC_INTERP
   assert(is_interpreted_frame(), "interpreted frame expected");
   Method* method = interpreter_frame_method();
   BasicType type = method->result_type();
@@ -620,11 +588,7 @@
     case T_ARRAY   : {
       oop obj;
       if (method->is_native()) {
-#ifdef CC_INTERP
-        obj = istate->_oop_temp;
-#else
         obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
-#endif // CC_INTERP
       } else {
         oop* obj_p = (oop*)tos_addr;
         obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -63,44 +63,6 @@
 //                               <- sender sp
 // ------------------------------ Asm interpreter ----------------------------------------
 
-// ------------------------------ C++ interpreter ----------------------------------------
-//
-// Layout of C++ interpreter frame: (While executing in BytecodeInterpreter::run)
-//
-//                             <- SP (current esp/rsp)
-//    [local variables         ] BytecodeInterpreter::run local variables
-//    ...                        BytecodeInterpreter::run local variables
-//    [local variables         ] BytecodeInterpreter::run local variables
-//    [old frame pointer       ]   fp [ BytecodeInterpreter::run's ebp/rbp ]
-//    [return pc               ]  (return to frame manager)
-//    [interpreter_state*      ]  (arg to BytecodeInterpreter::run)   --------------
-//    [expression stack        ] <- last_Java_sp                           |
-//    [...                     ] * <- interpreter_state.stack              |
-//    [expression stack        ] * <- interpreter_state.stack_base         |
-//    [monitors                ]   \                                       |
-//     ...                          | monitor block size                   |
-//    [monitors                ]   / <- interpreter_state.monitor_base     |
-//    [struct interpretState   ] <-----------------------------------------|
-//    [return pc               ] (return to callee of frame manager [1]
-//    [locals and parameters   ]
-//                               <- sender sp
-
-// [1] When the c++ interpreter calls a new method it returns to the frame
-//     manager which allocates a new frame on the stack. In that case there
-//     is no real callee of this newly allocated frame. The frame manager is
-//     aware of the  additional frame(s) and will pop them as nested calls
-//     complete. Howevers tTo make it look good in the debugger the frame
-//     manager actually installs a dummy pc pointing to RecursiveInterpreterActivation
-//     with a fake interpreter_state* parameter to make it easy to debug
-//     nested calls.
-
-// Note that contrary to the layout for the assembly interpreter the
-// expression stack allocated for the C++ interpreter is full sized.
-// However this is not as bad as it seems as the interpreter frame_manager
-// will truncate the unused space on succesive method calls.
-//
-// ------------------------------ C++ interpreter ----------------------------------------
-
  public:
   enum {
     pc_return_offset                                 =  0,
@@ -109,8 +71,6 @@
     return_addr_offset                               =  1,
     sender_sp_offset                                 =  2,
 
-#ifndef CC_INTERP
-
     // Interpreter frames
     interpreter_frame_oop_temp_offset                =  3, // for native calls only
 
@@ -127,8 +87,6 @@
     interpreter_frame_monitor_block_top_offset       = interpreter_frame_initial_sp_offset,
     interpreter_frame_monitor_block_bottom_offset    = interpreter_frame_initial_sp_offset,
 
-#endif // CC_INTERP
-
     // Entry frames
     // n.b. these values are determined by the layout defined in
     // stubGenerator for the Java call stub
@@ -193,13 +151,7 @@
   // helper to update a map with callee-saved RBP
   static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
 
-#ifndef CC_INTERP
   // deoptimization support
   void interpreter_frame_set_last_sp(intptr_t* sp);
-#endif // CC_INTERP
-
-#ifdef CC_INTERP
-  inline interpreterState get_interpreterState() const;
-#endif // CC_INTERP
 
 #endif // CPU_AARCH64_VM_FRAME_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.inline.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -157,59 +157,6 @@
 inline address* frame::sender_pc_addr()      const { return (address*) addr_at( return_addr_offset); }
 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
 
-#ifdef CC_INTERP
-
-inline interpreterState frame::get_interpreterState() const {
-  return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
-}
-
-inline intptr_t*    frame::sender_sp()        const {
-  // Hmm this seems awfully expensive QQQ, is this really called with interpreted frames?
-  if (is_interpreted_frame()) {
-    assert(false, "should never happen");
-    return get_interpreterState()->sender_sp();
-  } else {
-    return            addr_at(sender_sp_offset);
-  }
-}
-
-inline intptr_t** frame::interpreter_frame_locals_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return &(get_interpreterState()->_locals);
-}
-
-inline intptr_t* frame::interpreter_frame_bcx_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return (intptr_t*) &(get_interpreterState()->_bcp);
-}
-
-
-// Constant pool cache
-
-inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return &(get_interpreterState()->_constants);
-}
-
-// Method
-
-inline methodOop* frame::interpreter_frame_method_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return &(get_interpreterState()->_method);
-}
-
-inline intptr_t* frame::interpreter_frame_mdx_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return (intptr_t*) &(get_interpreterState()->_mdx);
-}
-
-// top of expression stack
-inline intptr_t* frame::interpreter_frame_tos_address() const {
-  assert(is_interpreted_frame(), "wrong frame type");
-  return get_interpreterState()->_stack + 1;
-}
-
-#else /* asm interpreter */
 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
 
 inline intptr_t** frame::interpreter_frame_locals_addr() const {
@@ -259,8 +206,6 @@
   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
 }
 
-#endif /* CC_INTERP */
-
 inline int frame::pd_oop_map_offset_adjustment() const {
   return 0;
 }
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -47,8 +47,6 @@
   b(entry);
 }
 
-#ifndef CC_INTERP
-
 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
   if (JvmtiExport::can_pop_frame()) {
     Label L;
@@ -595,8 +593,6 @@
   andr(sp, esp, -16);
 }
 
-#endif // C_INTERP
-
 // Lock object
 //
 // Args:
@@ -758,8 +754,6 @@
   }
 }
 
-#ifndef CC_INTERP
-
 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
                                                          Label& zero_continue) {
   assert(ProfileInterpreter, "must be profiling interpreter");
@@ -1345,7 +1339,6 @@
 }
 
 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
-#endif // !CC_INTERP
 
 
 void InterpreterMacroAssembler::notify_method_entry() {
@@ -1392,24 +1385,23 @@
     // is changed then the interpreter_frame_result implementation will
     // need to be updated too.
 
-    // For c++ interpreter the result is always stored at a known location in the frame
-    // template interpreter will leave it on the top of the stack.
-    NOT_CC_INTERP(push(state);)
+    // template interpreter will leave the result on the top of the stack.
+    push(state);
     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
     cbz(r3, L);
     call_VM(noreg,
             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
     bind(L);
-    NOT_CC_INTERP(pop(state));
+    pop(state);
   }
 
   {
     SkipIfEqual skip(this, &DTraceMethodProbes, false);
-    NOT_CC_INTERP(push(state));
+    push(state);
     get_method(c_rarg1);
     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
                  rthread, c_rarg1);
-    NOT_CC_INTERP(pop(state));
+    pop(state);
   }
 }
 
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -35,7 +35,6 @@
 
 
 class InterpreterMacroAssembler: public MacroAssembler {
-#ifndef CC_INTERP
  protected:
 
  protected:
@@ -59,7 +58,6 @@
 
   // base routine for all dispatches
   void dispatch_base(TosState state, address* table, bool verifyoop = true);
-#endif // CC_INTERP
 
  public:
   InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
@@ -68,15 +66,6 @@
 
   void jump_to_entry(address entry);
 
-#ifdef CC_INTERP
-  void save_bcp()                                          { /*  not needed in c++ interpreter and harmless */ }
-  void restore_bcp()                                       { /*  not needed in c++ interpreter and harmless */ }
-
-  // Helpers for runtime call arguments/results
-  void get_method(Register reg);
-
-#else
-
   // Interpreter-specific registers
   void save_bcp() {
     str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize));
@@ -202,7 +191,6 @@
                          bool throw_monitor_exception = true,
                          bool install_monitor_exception = true,
                          bool notify_jvmdi = true);
-#endif // CC_INTERP
 
   // FIXME: Give us a valid frame at a null check.
   virtual void null_check(Register reg, int offset = -1) {
@@ -220,8 +208,6 @@
   void lock_object  (Register lock_reg);
   void unlock_object(Register lock_reg);
 
-#ifndef CC_INTERP
-
   // Interpreter profiling operations
   void set_method_data_pointer_for_bcp();
   void test_method_data_pointer(Register mdp, Label& zero_continue);
@@ -280,8 +266,6 @@
   // only if +VerifyFPU  && (state == ftos || state == dtos)
   void verify_FPU(int stack_depth, TosState state = ftos);
 
-#endif // !CC_INTERP
-
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
 
   // support for jvmti/dtrace
--- a/hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_AARCH64_VM_INTERPRETERGENERATOR_AARCH64_HPP
-#define CPU_AARCH64_VM_INTERPRETERGENERATOR_AARCH64_HPP
-
-
-// Generation of Interpreter
-//
-  friend class AbstractInterpreterGenerator;
-
-protected:
-
-  void bang_stack_shadow_pages(bool native_call);
-
-private:
-
-  address generate_normal_entry(bool synchronized);
-  address generate_native_entry(bool synchronized);
-  address generate_abstract_entry(void);
-  address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_accessor_entry(void) { return NULL; }
-  address generate_empty_entry(void) { return NULL; }
-  void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
-  address generate_Reference_get_entry();
-  address generate_CRC32_update_entry();
-  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
-  address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
-  void generate_stack_overflow_check(void);
-
-  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
-  void generate_counter_overflow(Label* do_continue);
-
-#endif // CPU_AARCH64_VM_INTERPRETERGENERATOR_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -27,9 +27,9 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -123,7 +123,7 @@
 // Various method entries
 //
 
-address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
   // rmethod: Method*
   // r13: sender sp
   // esp: args
@@ -202,7 +202,7 @@
   // static jdouble dexp(jdouble x);
   // static jdouble dpow(jdouble x, jdouble y);
 
-void InterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
+void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
   address fn;
   switch (kind) {
   case Interpreter::java_lang_math_sin :
@@ -237,7 +237,7 @@
 
 // Abstract method entry
 // Attempt to execute abstract method. Throw exception
-address InterpreterGenerator::generate_abstract_entry(void) {
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
   // rmethod: Method*
   // r13: sender SP
 
--- a/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_AARCH64_VM_INTERPRETER_AARCH64_HPP
-#define CPU_AARCH64_VM_INTERPRETER_AARCH64_HPP
-
- public:
-
-  // Offset from rsp (which points to the last stack element)
-  static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
-
-  // Stack index relative to tos (which points at value)
-  static int expr_index_at(int i)        { return stackElementWords * i; }
-
-  // Already negated by c++ interpreter
-  static int local_index_at(int i) {
-    assert(i <= 0, "local direction already negated");
-    return stackElementWords * i;
-  }
-
-#endif // CPU_AARCH64_VM_INTERPRETER_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -47,20 +47,13 @@
   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
   // may customize this version by overriding it for its purposes (e.g., to save/restore
   // additional registers when doing a VM call).
-#ifdef CC_INTERP
-  // c++ interpreter never wants to use interp_masm version of call_VM
-  #define VIRTUAL
-#else
-  #define VIRTUAL virtual
-#endif
-
-  VIRTUAL void call_VM_leaf_base(
+  virtual void call_VM_leaf_base(
     address entry_point,               // the entry point
     int     number_of_arguments,        // the number of arguments to pop after the call
     Label *retaddr = NULL
   );
 
-  VIRTUAL void call_VM_leaf_base(
+  virtual void call_VM_leaf_base(
     address entry_point,               // the entry point
     int     number_of_arguments,        // the number of arguments to pop after the call
     Label &retaddr) {
@@ -75,7 +68,7 @@
   // returns the register which contains the thread upon return. If a thread register has been
   // specified, the return value will correspond to that register. If no last_java_sp is specified
   // (noreg) than rsp will be used instead.
-  VIRTUAL void call_VM_base(           // returns the register containing the thread upon return
+  virtual void call_VM_base(           // returns the register containing the thread upon return
     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
     Register java_thread,              // the thread if computed before     ; use noreg otherwise
     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
@@ -1004,8 +997,6 @@
         Register table0, Register table1, Register table2, Register table3,
         Register tmp, Register tmp2, Register tmp3);
 
-#undef VIRTUAL
-
   // Stack push and pop individual 64 bit registers
   void push(Register src);
   void pop(Register dst);
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -27,9 +27,9 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "interpreter/bytecodeTracer.hpp"
 #include "oops/arrayOop.hpp"
@@ -59,8 +59,6 @@
 
 #define __ _masm->
 
-#ifndef CC_INTERP
-
 //-----------------------------------------------------------------------------
 
 extern "C" void entry(CodeBuffer*);
@@ -304,7 +302,7 @@
 //
 // rmethod: method
 //
-void InterpreterGenerator::generate_counter_incr(
+void TemplateInterpreterGenerator::generate_counter_incr(
         Label* overflow,
         Label* profile_method,
         Label* profile_method_continue) {
@@ -382,7 +380,7 @@
   }
 }
 
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
+void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 
   // Asm interpreter on entry
   // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
@@ -401,7 +399,7 @@
                               InterpreterRuntime::frequency_counter_overflow),
              c_rarg1);
 
-  __ b(*do_continue);
+  __ b(do_continue);
 }
 
 // See if we've got enough room on the stack for locals plus overhead.
@@ -418,7 +416,7 @@
 //
 // Kills:
 //      r0
-void InterpreterGenerator::generate_stack_overflow_check(void) {
+void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 
   // monitor entry size: see picture of stack set
   // (generate_method_entry) and frame_amd64.hpp
@@ -634,7 +632,7 @@
 //
 
 // Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
+address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
   // Code: _aload_0, _getfield, _areturn
   // parameter size = 1
@@ -712,7 +710,7 @@
  * Method entry for static native methods:
  *   int java.util.zip.CRC32.update(int crc, int b)
  */
-address InterpreterGenerator::generate_CRC32_update_entry() {
+address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
 
@@ -766,7 +764,7 @@
  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
  */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
 
@@ -821,7 +819,12 @@
   return NULL;
 }
 
-void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
+// Not supported
+address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  return NULL;
+}
+
+void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
   // Bang each page in the shadow zone. We can't assume it's been done for
   // an interpreter frame with greater than a page of locals, so each page
   // needs to be checked.  Only true for non-native.
@@ -840,7 +843,7 @@
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the
 // native method than the typical interpreter frame setup.
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
+address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
   // determine code generation flags
   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
 
@@ -1269,7 +1272,7 @@
   if (inc_counter) {
     // Handle overflow of counter and compile method
     __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
+    generate_counter_overflow(continue_after_compile);
   }
 
   return entry_point;
@@ -1278,7 +1281,7 @@
 //
 // Generic interpreted method entry to (asm) interpreter
 //
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
   // determine code generation flags
   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
 
@@ -1440,7 +1443,7 @@
     }
     // Handle overflow of counter and compile method
     __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
+    generate_counter_overflow(continue_after_compile);
   }
 
   return entry_point;
@@ -1726,17 +1729,6 @@
 }
 
 //-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-//-----------------------------------------------------------------------------
 
 // Non-product code
 #ifndef PRODUCT
@@ -1923,4 +1915,3 @@
 
 #endif // BUILTIN_SIM
 #endif // !PRODUCT
-#endif // ! CC_INTERP
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_AARCH64_VM_TEMPLATEINTERPRETERGENERATOR_AARCH64_HPP
-#define CPU_AARCH64_VM_TEMPLATEINTERPRETERGENERATOR_AARCH64_HPP
-
- protected:
-
-void generate_fixed_frame(bool native_call);
-
- // address generate_asm_interpreter_entry(bool synchronized);
-
-#endif // CPU_AARCH64_VM_TEMPLATEINTERPRETERGENERATOR_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -31,6 +31,12 @@
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
 
 int AbstractInterpreter::BasicType_as_index(BasicType type) {
   int i = 0;
@@ -97,7 +103,7 @@
                                          int callee_locals,
                                          bool is_top_frame) {
   // Note: This calculation must exactly parallel the frame setup
-  // in InterpreterGenerator::generate_method_entry.
+  // in TemplateInterpreterGenerator::generate_method_entry.
 
   // fixed size of an interpreter frame:
   int overhead = frame::sender_sp_offset -
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_AARCH64_VM_TEMPLATEINTERPRETER_AARCH64_HPP
-#define CPU_AARCH64_VM_TEMPLATEINTERPRETER_AARCH64_HPP
-
-
-  protected:
-
-  // Size of interpreter code.  Increase if too small.  Interpreter will
-  // fail with a guarantee ("not enough space for interpreter generation");
-  // if too small.
-  // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI
-  const static int InterpreterCodeSize = 200 * 1024;
-
-#endif // CPU_AARCH64_VM_TEMPLATEINTERPRETER_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -39,8 +39,6 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
 
-#ifndef CC_INTERP
-
 #define __ _masm->
 
 // Platform-dependent initialization
@@ -3795,4 +3793,3 @@
   __ load_unsigned_byte(r1, at_bcp(3));
   __ lea(esp, Address(esp, r1, Address::uxtw(3)));
 }
-#endif // !CC_INTERP
--- a/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -27,7 +27,7 @@
 #define CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
 
 #ifdef CC_INTERP
-#error "CC_INTERP no more supported. Removed in change 8145117."
+#error "CC_INTERP is no longer supported. Removed in change 8145117."
 #endif
 
 // Size of PPC Instructions
--- a/hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2015 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
-#define CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
-
- friend class AbstractInterpreterGenerator;
-
- private:
-
-  address generate_abstract_entry(void);
-  address generate_accessor_entry(void) { return NULL; }
-  address generate_empty_entry(void) { return NULL; }
-  address generate_Reference_get_entry(void);
-
-  address generate_CRC32_update_entry();
-  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
-  address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
-
-#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -27,9 +27,9 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -416,7 +416,7 @@
 
 // Abstract method entry.
 //
-address InterpreterGenerator::generate_abstract_entry(void) {
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
   address entry = __ pc();
 
   //
@@ -474,7 +474,7 @@
 //    It contains a GC barrier which puts the reference into the satb buffer
 //    to indicate that someone holds a strong reference to the object the
 //    weak ref points to!
-address InterpreterGenerator::generate_Reference_get_entry(void) {
+address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
   // Code: _aload_0, _getfield, _areturn
   // parameter size = 1
   //
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2015 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_PPC_VM_INTERPRETER_PPC_HPP
-#define CPU_PPC_VM_INTERPRETER_PPC_HPP
-
- public:
-
-  // Stack index relative to tos (which points at value).
-  static int expr_index_at(int i) {
-    return stackElementWords * i;
-  }
-
-  // Already negated by c++ interpreter.
-  static int local_index_at(int i) {
-    assert(i <= 0, "local direction already negated");
-    return stackElementWords * i;
-  }
-
-  // The offset in bytes to access a expression stack slot
-  // relative to the esp pointer.
-  static int expr_offset_in_bytes(int slot) {
-    return stackElementSize * slot + wordSize;
-  }
-
-#endif // CPU_PPC_VM_INTERPRETER_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -27,9 +27,9 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -1245,7 +1245,7 @@
  * Method entry for static native methods:
  *   int java.util.zip.CRC32.update(int crc, int b)
  */
-address InterpreterGenerator::generate_CRC32_update_entry() {
+address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
   if (UseCRC32Intrinsics) {
     address start = __ pc();  // Remember stub start address (is rtn value).
     Label slow_path;
@@ -1305,7 +1305,7 @@
  *   int java.util.zip.CRC32.updateBytes(     int crc, byte[] b,  int off, int len)
  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
  */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
   if (UseCRC32Intrinsics) {
     address start = __ pc();  // Remember stub start address (is rtn value).
     Label slow_path;
@@ -1391,6 +1391,11 @@
   return NULL;
 }
 
+// Not supported
+address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  return NULL;
+}
+
 // =============================================================================
 // Exceptions
 
@@ -1643,16 +1648,6 @@
 }
 
 //-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-  generate_all(); // Down here so it can be "virtual".
-}
-
-//-----------------------------------------------------------------------------
 
 // Non-product code
 #ifndef PRODUCT
--- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, 2014 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
-#define CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
-
- protected:
-  address generate_normal_entry(bool synchronized);
-  address generate_native_entry(bool synchronized);
-  address generate_math_entry(AbstractInterpreter::MethodKind kind);
-
-  void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
-  void unlock_method(bool check_exceptions = true);
-
-  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
-  void generate_counter_overflow(Label& continue_entry);
-
-  void generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals);
-  void generate_stack_overflow_check(Register Rframe_size, Register Rscratch1);
-
-#endif // CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -31,6 +31,12 @@
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+int TemplateInterpreter::InterpreterCodeSize = 230*K;
 
 int AbstractInterpreter::BasicType_as_index(BasicType type) {
   int i = 0;
@@ -79,7 +85,7 @@
                                          int callee_locals,
                                          bool is_top_frame) {
   // Note: This calculation must exactly parallel the frame setup
-  // in InterpreterGenerator::generate_fixed_frame.
+  // in TemplateInterpreterGenerator::generate_fixed_frame.
   assert(Interpreter::stackElementWords == 1, "sanity");
   const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
   const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013, 2015 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
-#define CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
-
- protected:
-
-  // Size of interpreter code. Increase if too small.  Interpreter will
-  // fail with a guarantee ("not enough space for interpreter generation");
-  // if too small.
-  // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI
-  const static int InterpreterCodeSize = 230*K;
-
- public:
-  // Support abs and sqrt like in compiler.
-  // For others we can use a normal (native) entry.
-  static bool math_entry_available(AbstractInterpreter::MethodKind kind);
-#endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
-
-
--- a/hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "interp_masm_sparc.hpp"
-#include "interpreter/bytecodeInterpreter.hpp"
-#include "interpreter/bytecodeInterpreter.inline.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-
-// KILL THIS FILE
--- a/hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_BYTECODEINTERPRETER_SPARC_HPP
-#define CPU_SPARC_VM_BYTECODEINTERPRETER_SPARC_HPP
-
-// Platform specific for C++ based Interpreter
-#define LOTS_OF_REGS    /* Lets interpreter use plenty of registers */
-
-private:
-
-    // save the bottom of the stack after frame manager setup. For ease of restoration after return
-    // from recursive interpreter call
-    intptr_t*  _frame_bottom;             /* saved bottom of frame manager frame */
-    intptr_t* _last_Java_pc;              /* pc to return to in frame manager */
-    interpreterState _self_link;          /*  Previous interpreter state  */ /* sometimes points to self??? */
-    double    _native_fresult;            /* save result of native calls that might return floats */
-    intptr_t  _native_lresult;            /* save result of native calls that might return handle/longs */
-public:
-
-    static void pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp);
-
-
-#define SET_LAST_JAVA_FRAME()
-
-#define RESET_LAST_JAVA_FRAME() THREAD->frame_anchor()->set_flags(0);
-
-/*
- * Macros for accessing the stack.
- */
-#undef STACK_INT
-#undef STACK_FLOAT
-#undef STACK_ADDR
-#undef STACK_OBJECT
-#undef STACK_DOUBLE
-#undef STACK_LONG
-// JavaStack Implementation
-
-
-#define GET_STACK_SLOT(offset)    (*((intptr_t*) &topOfStack[-(offset)]))
-#define STACK_SLOT(offset)    ((address) &topOfStack[-(offset)])
-#define STACK_ADDR(offset)    (*((address *) &topOfStack[-(offset)]))
-#define STACK_INT(offset)     (*((jint*) &topOfStack[-(offset)]))
-#define STACK_FLOAT(offset)   (*((jfloat *) &topOfStack[-(offset)]))
-#define STACK_OBJECT(offset)  (*((oop *) &topOfStack [-(offset)]))
-#define STACK_DOUBLE(offset)  (((VMJavaVal64*) &topOfStack[-(offset)])->d)
-#define STACK_LONG(offset)    (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
-
-#define SET_STACK_SLOT(value, offset)   (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
-#define SET_STACK_ADDR(value, offset)   (*((address *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_INT(value, offset)    (*((jint *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_FLOAT(value, offset)  (*((jfloat *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
-#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d =  \
-                                                 ((VMJavaVal64*)(addr))->d)
-#define SET_STACK_LONG(value, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
-#define SET_STACK_LONG_FROM_ADDR(addr, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l =  \
-                                                 ((VMJavaVal64*)(addr))->l)
-
-#define LOCALS_SLOT(offset)    ((intptr_t*)&locals[-(offset)])
-#define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
-#define LOCALS_INT(offset)     (*((jint*)&locals[-(offset)]))
-#define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
-#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
-#define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
-#define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
-#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
-#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
-
-#define SET_LOCALS_SLOT(value, offset)    (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
-#define SET_LOCALS_ADDR(value, offset)    (*((address *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_INT(value, offset)     (*((jint *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_FLOAT(value, offset)   (*((jfloat *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_OBJECT(value, offset)  (*((oop *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_DOUBLE(value, offset)  (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
-#define SET_LOCALS_LONG(value, offset)    (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
-#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
-                                                  ((VMJavaVal64*)(addr))->d)
-#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
-                                                ((VMJavaVal64*)(addr))->l)
-
-#endif // CPU_SPARC_VM_BYTECODEINTERPRETER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,338 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_BYTECODEINTERPRETER_SPARC_INLINE_HPP
-#define CPU_SPARC_VM_BYTECODEINTERPRETER_SPARC_INLINE_HPP
-
-// Inline interpreter functions for sparc
-
-inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
-inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
-inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
-inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
-inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return fmod(op1, op2); }
-
-inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
-
-inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
-  return ( op1 < op2 ? -1 :
-               op1 > op2 ? 1 :
-                   op1 == op2 ? 0 :
-                       (direction == -1 || direction == 1) ? direction : 0);
-
-}
-
-inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
-  // x86 can do unaligned copies but not 64bits at a time
-  to[0] = from[0]; to[1] = from[1];
-}
-
-// The long operations depend on compiler support for "long long" on x86
-
-inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
-  return op1 + op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
-  return op1 & op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
-  // QQQ what about check and throw...
-  return op1 / op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
-  return op1 * op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
-  return op1 | op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
-  return op1 - op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
-  return op1 ^ op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
-  return op1 % op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
-  // CVM did this 0x3f mask, is the really needed??? QQQ
-  return ((unsigned long long) op1) >> (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
-  return op1 >> (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
-  return op1 << (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
-  return -op;
-}
-
-inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
-  return ~op;
-}
-
-inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
-  return (op <= 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
-  return (op >= 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
-  return (op == 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
-  return (op1 == op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
-  return (op1 != op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
-  return (op1 >= op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
-  return (op1 <= op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
-  return (op1 < op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
-  return (op1 > op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
-  return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
-}
-
-// Long conversions
-
-inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
-  return (jdouble) val;
-}
-
-inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
-  return (jfloat) val;
-}
-
-inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
-  return (jint) val;
-}
-
-// Double Arithmetic
-
-inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
-  return op1 + op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
-  // Divide by zero... QQQ
-  return op1 / op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
-  return op1 * op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
-  return -op;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
-  return fmod(op1, op2);
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
-  return op1 - op2;
-}
-
-inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
-  return ( op1 < op2 ? -1 :
-               op1 > op2 ? 1 :
-                   op1 == op2 ? 0 :
-                       (direction == -1 || direction == 1) ? direction : 0);
-}
-
-// Double Conversions
-
-inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
-  return (jfloat) val;
-}
-
-// Float Conversions
-
-inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
-  return (jdouble) op;
-}
-
-// Integer Arithmetic
-
-inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
-  return op1 + op2;
-}
-
-inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
-  return op1 & op2;
-}
-
-inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
-  /* it's possible we could catch this special case implicitly */
-  if (op1 == 0x80000000 && op2 == -1) return op1;
-  else return op1 / op2;
-}
-
-inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
-  return op1 * op2;
-}
-
-inline jint BytecodeInterpreter::VMintNeg(jint op) {
-  return -op;
-}
-
-inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
-  return op1 | op2;
-}
-
-inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
-  /* it's possible we could catch this special case implicitly */
-  if (op1 == 0x80000000 && op2 == -1) return 0;
-  else return op1 % op2;
-}
-
-inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
-  return op1 << (op2 & 0x1f);
-}
-
-inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
-  return op1 >> (op2 & 0x1f);
-}
-
-inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
-  return op1 - op2;
-}
-
-inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
-  return ((juint) op1) >> (op2 & 0x1f);
-}
-
-inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
-  return op1 ^ op2;
-}
-
-inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
-  return (jdouble) val;
-}
-
-inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
-  return (jfloat) val;
-}
-
-inline jlong BytecodeInterpreter::VMint2Long(jint val) {
-  return (jlong) val;
-}
-
-inline jchar BytecodeInterpreter::VMint2Char(jint val) {
-  return (jchar) val;
-}
-
-inline jshort BytecodeInterpreter::VMint2Short(jint val) {
-  return (jshort) val;
-}
-
-inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
-  return (jbyte) val;
-}
-
-// The implementations are platform dependent. We have to worry about alignment
-// issues on some machines which can change on the same platform depending on
-// whether it is an LP64 machine also.
-
-// We know that on LP32 mode that longs/doubles are the only thing that gives
-// us alignment headaches. We also know that the worst we have is 32bit alignment
-// so thing are not really too bad.
-// (Also sparcworks compiler does the right thing for free if we don't use -arch..
-// switches. Only gcc gives us a hard time. In LP64 mode I think we have no issue
-// with alignment.
-
-#ifdef _GNU_SOURCE
-  #define ALIGN_CONVERTER        /* Needs alignment converter */
-#else
-  #undef ALIGN_CONVERTER        /* No alignment converter */
-#endif /* _GNU_SOURCE */
-
-#ifdef ALIGN_CONVERTER
-class u8_converter {
-
-  private:
-
-  public:
-  static jdouble get_jdouble(address p) {
-    VMJavaVal64 tmp;
-    tmp.v[0] = ((uint32_t*)p)[0];
-    tmp.v[1] = ((uint32_t*)p)[1];
-    return tmp.d;
-  }
-
-  static void put_jdouble(address p, jdouble d) {
-    VMJavaVal64 tmp;
-    tmp.d = d;
-    ((uint32_t*)p)[0] = tmp.v[0];
-    ((uint32_t*)p)[1] = tmp.v[1];
-  }
-
-  static jlong get_jlong(address p) {
-    VMJavaVal64 tmp;
-    tmp.v[0] = ((uint32_t*)p)[0];
-    tmp.v[1] = ((uint32_t*)p)[1];
-    return tmp.l;
-  }
-
-  static void put_jlong(address p, jlong l) {
-    VMJavaVal64 tmp;
-    tmp.l = l;
-    ((uint32_t*)p)[0] = tmp.v[0];
-    ((uint32_t*)p)[1] = tmp.v[1];
-  }
-};
-#endif /* ALIGN_CONVERTER */
-
-#endif // CPU_SPARC_VM_BYTECODEINTERPRETER_SPARC_INLINE_HPP
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -37,11 +37,7 @@
 define_pd_global(bool, PreferInterpreterNativeStubs, false);
 define_pd_global(bool, ProfileTraps,                 true);
 define_pd_global(bool, UseOnStackReplacement,        true);
-#ifdef CC_INTERP
-define_pd_global(bool, ProfileInterpreter,           false);
-#else
 define_pd_global(bool, ProfileInterpreter,           true);
-#endif // CC_INTERP
 define_pd_global(bool, TieredCompilation,            trueInTiered);
 define_pd_global(intx, CompileThreshold,             10000);
 
--- a/hotspot/src/cpu/sparc/vm/cppInterpreterGenerator_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_CPPINTERPRETERGENERATOR_SPARC_HPP
-#define CPU_SPARC_VM_CPPINTERPRETERGENERATOR_SPARC_HPP
-
-  static address frame_manager_return;
-  static address frame_manager_sync_return;
-
-
-  void generate_more_monitors();
-  void generate_deopt_handling();
-  void lock_method(void);
-  void adjust_callers_stack(Register args);
-  void generate_compute_interpreter_state(const Register state,
-                                          const Register prev_state,
-                                          bool native);
-
-#endif // CPU_SPARC_VM_CPPINTERPRETERGENERATOR_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2201 +0,0 @@
-/*
- * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/cppInterpreter.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-#ifdef SHARK
-#include "shark/shark_globals.hpp"
-#endif
-
-#ifdef CC_INTERP
-
-// Routine exists to make tracebacks look decent in debugger
-// while "shadow" interpreter frames are on stack. It is also
-// used to distinguish interpreter frames.
-
-extern "C" void RecursiveInterpreterActivation(interpreterState istate) {
-  ShouldNotReachHere();
-}
-
-bool CppInterpreter::contains(address pc) {
-  return ( _code->contains(pc) ||
-         ( pc == (CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset)));
-}
-
-#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
-#define __ _masm->
-
-Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
-
-static address unctrap_frame_manager_entry  = NULL;
-
-static address interpreter_return_address  = NULL;
-static address deopt_frame_manager_return_atos  = NULL;
-static address deopt_frame_manager_return_btos  = NULL;
-static address deopt_frame_manager_return_itos  = NULL;
-static address deopt_frame_manager_return_ltos  = NULL;
-static address deopt_frame_manager_return_ftos  = NULL;
-static address deopt_frame_manager_return_dtos  = NULL;
-static address deopt_frame_manager_return_vtos  = NULL;
-
-const Register prevState = G1_scratch;
-
-void InterpreterGenerator::save_native_result(void) {
-  // result potentially in O0/O1: save it across calls
-  __ stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
-#ifdef _LP64
-  __ stx(O0, STATE(_native_lresult));
-#else
-  __ std(O0, STATE(_native_lresult));
-#endif
-}
-
-void InterpreterGenerator::restore_native_result(void) {
-
-  // Restore any method result value
-  __ ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
-#ifdef _LP64
-  __ ldx(STATE(_native_lresult), O0);
-#else
-  __ ldd(STATE(_native_lresult), O0);
-#endif
-}
-
-// A result handler converts/unboxes a native call result into
-// a java interpreter/compiler result. The current frame is an
-// interpreter frame. The activation frame unwind code must be
-// consistent with that of TemplateTable::_return(...). In the
-// case of native methods, the caller's SP was not modified.
-address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  address entry = __ pc();
-  Register Itos_i  = Otos_i ->after_save();
-  Register Itos_l  = Otos_l ->after_save();
-  Register Itos_l1 = Otos_l1->after_save();
-  Register Itos_l2 = Otos_l2->after_save();
-  switch (type) {
-    case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
-    case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
-    case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
-    case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
-    case T_LONG   :
-#ifndef _LP64
-                    __ mov(O1, Itos_l2);  // move other half of long
-#endif              // ifdef or no ifdef, fall through to the T_INT case
-    case T_INT    : __ mov(O0, Itos_i);                         break;
-    case T_VOID   : /* nothing to do */                         break;
-    case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
-    case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
-    case T_OBJECT :
-      __ ld_ptr(STATE(_oop_temp), Itos_i);
-      __ verify_oop(Itos_i);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret();                           // return from interpreter activation
-  __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
-  NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
-  return entry;
-}
-
-// tosca based result to c++ interpreter stack based result.
-// Result goes to address in L1_scratch
-
-address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
-  // A result is in the native abi result register from a native method call.
-  // We need to return this result to the interpreter by pushing the result on the interpreter's
-  // stack. This is relatively simple the destination is in L1_scratch
-  // i.e. L1_scratch is the first free element on the stack. If we "push" a return value we must
-  // adjust L1_scratch
-  address entry = __ pc();
-  switch (type) {
-    case T_BOOLEAN:
-      // !0 => true; 0 => false
-      __ subcc(G0, O0, G0);
-      __ addc(G0, 0, O0);
-      __ st(O0, L1_scratch, 0);
-      __ sub(L1_scratch, wordSize, L1_scratch);
-      break;
-
-    // cannot use and3, 0xFFFF too big as immediate value!
-    case T_CHAR   :
-      __ sll(O0, 16, O0);
-      __ srl(O0, 16, O0);
-      __ st(O0, L1_scratch, 0);
-      __ sub(L1_scratch, wordSize, L1_scratch);
-      break;
-
-    case T_BYTE   :
-      __ sll(O0, 24, O0);
-      __ sra(O0, 24, O0);
-      __ st(O0, L1_scratch, 0);
-      __ sub(L1_scratch, wordSize, L1_scratch);
-      break;
-
-    case T_SHORT  :
-      __ sll(O0, 16, O0);
-      __ sra(O0, 16, O0);
-      __ st(O0, L1_scratch, 0);
-      __ sub(L1_scratch, wordSize, L1_scratch);
-      break;
-    case T_LONG   :
-#ifndef _LP64
-#if defined(COMPILER2)
-  // All return values are where we want them, except for Longs.  C2 returns
-  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
-  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
-  // build even if we are returning from interpreted we just do a little
-  // stupid shuffing.
-  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
-  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
-  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-      __ stx(G1, L1_scratch, -wordSize);
-#else
-      // native result is in O0, O1
-      __ st(O1, L1_scratch, 0);                      // Low order
-      __ st(O0, L1_scratch, -wordSize);              // High order
-#endif /* COMPILER2 */
-#else
-      __ stx(O0, L1_scratch, -wordSize);
-#endif
-      __ sub(L1_scratch, 2*wordSize, L1_scratch);
-      break;
-
-    case T_INT    :
-      __ st(O0, L1_scratch, 0);
-      __ sub(L1_scratch, wordSize, L1_scratch);
-      break;
-
-    case T_VOID   : /* nothing to do */
-      break;
-
-    case T_FLOAT  :
-      __ stf(FloatRegisterImpl::S, F0, L1_scratch, 0);
-      __ sub(L1_scratch, wordSize, L1_scratch);
-      break;
-
-    case T_DOUBLE :
-      // Every stack slot is aligned on 64 bit, However is this
-      // the correct stack slot on 64bit?? QQQ
-      __ stf(FloatRegisterImpl::D, F0, L1_scratch, -wordSize);
-      __ sub(L1_scratch, 2*wordSize, L1_scratch);
-      break;
-    case T_OBJECT :
-      __ verify_oop(O0);
-      __ st_ptr(O0, L1_scratch, 0);
-      __ sub(L1_scratch, wordSize, L1_scratch);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ retl();                          // return from interpreter activation
-  __ delayed()->nop();                // schedule this better
-  NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
-  return entry;
-}
-
-address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
-  // A result is in the java expression stack of the interpreted method that has just
-  // returned. Place this result on the java expression stack of the caller.
-  //
-  // The current interpreter activation in Lstate is for the method just returning its
-  // result. So we know that the result of this method is on the top of the current
-  // execution stack (which is pre-pushed) and will be return to the top of the caller
-  // stack. The top of the callers stack is the bottom of the locals of the current
-  // activation.
-  // Because of the way activation are managed by the frame manager the value of esp is
-  // below both the stack top of the current activation and naturally the stack top
-  // of the calling activation. This enable this routine to leave the return address
-  // to the frame manager on the stack and do a vanilla return.
-  //
-  // On entry: O0 - points to source (callee stack top)
-  //           O1 - points to destination (caller stack top [i.e. free location])
-  // destroys O2, O3
-  //
-
-  address entry = __ pc();
-  switch (type) {
-    case T_VOID:  break;
-      break;
-    case T_FLOAT  :
-    case T_BOOLEAN:
-    case T_CHAR   :
-    case T_BYTE   :
-    case T_SHORT  :
-    case T_INT    :
-      // 1 word result
-      __ ld(O0, 0, O2);
-      __ st(O2, O1, 0);
-      __ sub(O1, wordSize, O1);
-      break;
-    case T_DOUBLE  :
-    case T_LONG    :
-      // return top two words on current expression stack to caller's expression stack
-      // The caller's expression stack is adjacent to the current frame manager's intepretState
-      // except we allocated one extra word for this intepretState so we won't overwrite it
-      // when we return a two word result.
-#ifdef _LP64
-      __ ld_ptr(O0, 0, O2);
-      __ st_ptr(O2, O1, -wordSize);
-#else
-      __ ld(O0, 0, O2);
-      __ ld(O0, wordSize, O3);
-      __ st(O3, O1, 0);
-      __ st(O2, O1, -wordSize);
-#endif
-      __ sub(O1, 2*wordSize, O1);
-      break;
-    case T_OBJECT :
-      __ ld_ptr(O0, 0, O2);
-      __ verify_oop(O2);                                               // verify it
-      __ st_ptr(O2, O1, 0);
-      __ sub(O1, wordSize, O1);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ retl();
-  __ delayed()->nop(); // QQ schedule this better
-  return entry;
-}
-
-address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
-  // A result is in the java expression stack of the interpreted method that has just
-  // returned. Place this result in the native abi that the caller expects.
-  // We are in a new frame registers we set must be in caller (i.e. callstub) frame.
-  //
-  // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
-  // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
-  // and so rather than return result onto caller's java expression stack we return the
-  // result in the expected location based on the native abi.
-  // On entry: O0 - source (stack top)
-  // On exit result in expected output register
-  // QQQ schedule this better
-
-  address entry = __ pc();
-  switch (type) {
-    case T_VOID:  break;
-      break;
-    case T_FLOAT  :
-      __ ldf(FloatRegisterImpl::S, O0, 0, F0);
-      break;
-    case T_BOOLEAN:
-    case T_CHAR   :
-    case T_BYTE   :
-    case T_SHORT  :
-    case T_INT    :
-      // 1 word result
-      __ ld(O0, 0, O0->after_save());
-      break;
-    case T_DOUBLE  :
-      __ ldf(FloatRegisterImpl::D, O0, 0, F0);
-      break;
-    case T_LONG    :
-      // return top two words on current expression stack to caller's expression stack
-      // The caller's expression stack is adjacent to the current frame manager's interpretState
-      // except we allocated one extra word for this intepretState so we won't overwrite it
-      // when we return a two word result.
-#ifdef _LP64
-      __ ld_ptr(O0, 0, O0->after_save());
-#else
-      __ ld(O0, wordSize, O1->after_save());
-      __ ld(O0, 0, O0->after_save());
-#endif
-#if defined(COMPILER2) && !defined(_LP64)
-      // C2 expects long results in G1 we can't tell if we're returning to interpreted
-      // or compiled so just be safe use G1 and O0/O1
-
-      // Shift bits into high (msb) of G1
-      __ sllx(Otos_l1->after_save(), 32, G1);
-      // Zero extend low bits
-      __ srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
-      __ or3 (Otos_l2->after_save(), G1, G1);
-#endif /* COMPILER2 */
-      break;
-    case T_OBJECT :
-      __ ld_ptr(O0, 0, O0->after_save());
-      __ verify_oop(O0->after_save());                                               // verify it
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ retl();
-  __ delayed()->nop();
-  return entry;
-}
-
-address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
-  // make it look good in the debugger
-  return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset;
-}
-
-address CppInterpreter::deopt_entry(TosState state, int length) {
-  address ret = NULL;
-  if (length != 0) {
-    switch (state) {
-      case atos: ret = deopt_frame_manager_return_atos; break;
-      case btos: ret = deopt_frame_manager_return_btos; break;
-      case ctos:
-      case stos:
-      case itos: ret = deopt_frame_manager_return_itos; break;
-      case ltos: ret = deopt_frame_manager_return_ltos; break;
-      case ftos: ret = deopt_frame_manager_return_ftos; break;
-      case dtos: ret = deopt_frame_manager_return_dtos; break;
-      case vtos: ret = deopt_frame_manager_return_vtos; break;
-    }
-  } else {
-    ret = unctrap_frame_manager_entry;  // re-execute the bytecode ( e.g. uncommon trap)
-  }
-  assert(ret != NULL, "Not initialized");
-  return ret;
-}
-
-//
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// Lmethod: method
-// ??: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  Label done;
-  const Register Rcounters = G3_scratch;
-
-  __ ld_ptr(STATE(_method), G5_method);
-  __ get_method_counters(G5_method, Rcounters, done);
-
-  // Update standard invocation counters
-  __ increment_invocation_counter(Rcounters, O0, G4_scratch);
-  if (ProfileInterpreter) {
-    Address interpreter_invocation_counter(Rcounters,
-            in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
-    __ ld(interpreter_invocation_counter, G4_scratch);
-    __ inc(G4_scratch);
-    __ st(G4_scratch, interpreter_invocation_counter);
-  }
-
-  AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
-  __ load_contents(invocation_limit, G3_scratch);
-  __ cmp(O0, G3_scratch);
-  __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
-  __ delayed()->nop();
-  __ bind(done);
-}
-
-address InterpreterGenerator::generate_empty_entry(void) {
-
-  // A method that does nothing but return...
-
-  address entry = __ pc();
-  Label slow_path;
-
-  // do nothing for empty methods (do not even increment invocation counter)
-  if ( UseFastEmptyMethods) {
-    // If we need a safepoint check, generate full interpreter entry.
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-    __ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
-    __ delayed()->nop();
-
-    // Code: _return
-    __ retl();
-    __ delayed()->mov(O5_savedSP, SP);
-    return entry;
-  }
-  return NULL;
-}
-
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  if (UseG1GC) {
-    // We need to generate have a routine that generates code to:
-    //   * load the value in the referent field
-    //   * passes that value to the pre-barrier.
-    //
-    // In the case of G1 this will record the value of the
-    // referent in an SATB buffer if marking is active.
-    // This will cause concurrent marking to mark the referent
-    // field as live.
-    Unimplemented();
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-//
-// Interpreter stub for calling a native method. (C++ interpreter)
-// This sets up a somewhat different looking stack for calling the native method
-// than the typical interpreter frame setup.
-//
-
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  address entry = __ pc();
-
-  // the following temporary registers are used during frame creation
-  const Register Gtmp1 = G3_scratch ;
-  const Register Gtmp2 = G1_scratch;
-  const Register RconstMethod = Gtmp1;
-  const Address constMethod(G5_method, in_bytes(Method::const_offset()));
-  const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
-
-  bool inc_counter  = UseCompiler || CountCompiledCalls;
-
-  // make sure registers are different!
-  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
-
-  const Address access_flags      (G5_method, in_bytes(Method::access_flags_offset()));
-
-  Label Lentry;
-  __ bind(Lentry);
-
-  const Register Glocals_size = G3;
-  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
-
-  // make sure method is native & not abstract
-  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
-#ifdef ASSERT
-  __ ld(access_flags, Gtmp1);
-  {
-    Label L;
-    __ btst(JVM_ACC_NATIVE, Gtmp1);
-    __ br(Assembler::notZero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  { Label L;
-    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
-    __ br(Assembler::zero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute abstract method as non-abstract");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  __ ld_ptr(constMethod, RconstMethod);
-  __ lduh(size_of_parameters, Gtmp1);
-  __ sll(Gtmp1, LogBytesPerWord, Gtmp2);       // parameter size in bytes
-  __ add(Gargs, Gtmp2, Gargs);                 // points to first local + BytesPerWord
-  // NEW
-  __ add(Gargs, -wordSize, Gargs);             // points to first local[0]
-  // generate the code to allocate the interpreter stack frame
-  // NEW FRAME ALLOCATED HERE
-  // save callers original sp
-  // __ mov(SP, I5_savedSP->after_restore());
-
-  generate_compute_interpreter_state(Lstate, G0, true);
-
-  // At this point Lstate points to new interpreter state
-  //
-
-  const Address do_not_unlock_if_synchronized(G2_thread,
-      in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-  // runtime, exception handling i.e. unlock_if_synchronized_method will
-  // check this thread local flag.
-  // This flag has two effects, one is to force an unwind in the topmost
-  // interpreter frame and not perform an unlock while doing so.
-
-  __ movbool(true, G3_scratch);
-  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
-
-
-  // increment invocation counter and check for overflow
-  //
-  // Note: checking for negative value instead of overflow
-  //       so we have a 'sticky' overflow test (may be of
-  //       importance as soon as we have true MT/MP)
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-  Label Lcontinue;
-  __ bind(Lcontinue);
-
-  bang_stack_shadow_pages(true);
-  // reset the _do_not_unlock_if_synchronized flag
-  __ stbool(G0, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check, so method is not locked
-  // if counter overflows.
-
-  if (synchronized) {
-    lock_method();
-    // Don't see how G2_thread is preserved here...
-    // __ verify_thread(); QQQ destroys L0,L1 can't use
-  } else {
-#ifdef ASSERT
-    { Label ok;
-      __ ld_ptr(STATE(_method), G5_method);
-      __ ld(access_flags, O0);
-      __ btst(JVM_ACC_SYNCHRONIZED, O0);
-      __ br( Assembler::zero, false, Assembler::pt, ok);
-      __ delayed()->nop();
-      __ stop("method needs synchronization");
-      __ bind(ok);
-    }
-#endif // ASSERT
-  }
-
-  // start execution
-
-//   __ verify_thread(); kills L1,L2 can't  use at the moment
-
-  // jvmti/jvmpi support
-  __ notify_method_entry();
-
-  // native call
-
-  // (note that O0 is never an oop--at most it is a handle)
-  // It is important not to smash any handles created by this call,
-  // until any oop handle in O0 is dereferenced.
-
-  // (note that the space for outgoing params is preallocated)
-
-  // get signature handler
-
-  Label pending_exception_present;
-
-  { Label L;
-    __ ld_ptr(STATE(_method), G5_method);
-    __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
-    __ tst(G3_scratch);
-    __ brx(Assembler::notZero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
-    __ ld_ptr(STATE(_method), G5_method);
-
-    Address exception_addr(G2_thread, in_bytes(Thread::pending_exception_offset()));
-    __ ld_ptr(exception_addr, G3_scratch);
-    __ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
-    __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
-    __ bind(L);
-  }
-
-  // Push a new frame so that the args will really be stored in
-  // Copy a few locals across so the new frame has the variables
-  // we need but these values will be dead at the jni call and
-  // therefore not gc volatile like the values in the current
-  // frame (Lstate in particular)
-
-  // Flush the state pointer to the register save area
-  // Which is the only register we need for a stack walk.
-  __ st_ptr(Lstate, SP, (Lstate->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
-
-  __ mov(Lstate, O1);         // Need to pass the state pointer across the frame
-
-  // Calculate current frame size
-  __ sub(SP, FP, O3);         // Calculate negative of current frame size
-  __ save(SP, O3, SP);        // Allocate an identical sized frame
-
-  __ mov(I1, Lstate);          // In the "natural" register.
-
-  // Note I7 has leftover trash. Slow signature handler will fill it in
-  // should we get there. Normal jni call will set reasonable last_Java_pc
-  // below (and fix I7 so the stack trace doesn't have a meaningless frame
-  // in it).
-
-
-  // call signature handler
-  __ ld_ptr(STATE(_method), Lmethod);
-  __ ld_ptr(STATE(_locals), Llocals);
-
-  __ callr(G3_scratch, 0);
-  __ delayed()->nop();
-  __ ld_ptr(STATE(_thread), G2_thread);        // restore thread (shouldn't be needed)
-
-  { Label not_static;
-
-    __ ld_ptr(STATE(_method), G5_method);
-    __ ld(access_flags, O0);
-    __ btst(JVM_ACC_STATIC, O0);
-    __ br( Assembler::zero, false, Assembler::pt, not_static);
-    __ delayed()->
-      // get native function entry point(O0 is a good temp until the very end)
-       ld_ptr(Address(G5_method, in_bytes(Method::native_function_offset())), O0);
-    // for static methods insert the mirror argument
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-
-    __ ld_ptr(Address(G5_method, in_bytes(Method:: const_offset())), O1);
-    __ ld_ptr(Address(O1, in_bytes(ConstMethod::constants_offset())), O1);
-    __ ld_ptr(Address(O1, ConstantPool::pool_holder_offset_in_bytes()), O1);
-    __ ld_ptr(O1, mirror_offset, O1);
-    // where the mirror handle body is allocated:
-#ifdef ASSERT
-    if (!PrintSignatureHandlers)  // do not dirty the output with this
-    { Label L;
-      __ tst(O1);
-      __ brx(Assembler::notZero, false, Assembler::pt, L);
-      __ delayed()->nop();
-      __ stop("mirror is missing");
-      __ bind(L);
-    }
-#endif // ASSERT
-    __ st_ptr(O1, STATE(_oop_temp));
-    __ add(STATE(_oop_temp), O1);            // this is really an LEA not an add
-    __ bind(not_static);
-  }
-
-  // At this point, arguments have been copied off of stack into
-  // their JNI positions, which are O1..O5 and SP[68..].
-  // Oops are boxed in-place on the stack, with handles copied to arguments.
-  // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
-
-#ifdef ASSERT
-  { Label L;
-    __ tst(O0);
-    __ brx(Assembler::notZero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("native entry point is missing");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  //
-  // setup the java frame anchor
-  //
-  // The scavenge function only needs to know that the PC of this frame is
-  // in the interpreter method entry code, it doesn't need to know the exact
-  // PC and hence we can use O7 which points to the return address from the
-  // previous call in the code stream (signature handler function)
-  //
-  // The other trick is we set last_Java_sp to FP instead of the usual SP because
-  // we have pushed the extra frame in order to protect the volatile register(s)
-  // in that frame when we return from the jni call
-  //
-
-
-  __ set_last_Java_frame(FP, O7);
-  __ mov(O7, I7);  // make dummy interpreter frame look like one above,
-                   // not meaningless information that'll confuse me.
-
-  // flush the windows now. We don't care about the current (protection) frame
-  // only the outer frames
-
-  __ flushw();
-
-  // mark windows as flushed
-  Address flags(G2_thread,
-                in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
-  __ set(JavaFrameAnchor::flushed, G3_scratch);
-  __ st(G3_scratch, flags);
-
-  // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
-
-  Address thread_state(G2_thread, in_bytes(JavaThread::thread_state_offset()));
-#ifdef ASSERT
-  { Label L;
-    __ ld(thread_state, G3_scratch);
-    __ cmp(G3_scratch, _thread_in_Java);
-    __ br(Assembler::equal, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif // ASSERT
-  __ set(_thread_in_native, G3_scratch);
-  __ st(G3_scratch, thread_state);
-
-  // Call the jni method, using the delay slot to set the JNIEnv* argument.
-  __ callr(O0, 0);
-  __ delayed()->
-     add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
-  __ ld_ptr(STATE(_thread), G2_thread);  // restore thread
-
-  // must we block?
-
-  // Block, if necessary, before resuming in _thread_in_Java state.
-  // In order for GC to work, don't clear the last_Java_sp until after blocking.
-  { Label no_block;
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
-
-    // Switch thread to "native transition" state before reading the synchronization state.
-    // This additional state is necessary because reading and testing the synchronization
-    // state is not atomic w.r.t. GC, as this scenario demonstrates:
-    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
-    //     VM thread changes sync state to synchronizing and suspends threads for GC.
-    //     Thread A is resumed to finish this native method, but doesn't block here since it
-    //     didn't see any synchronization is progress, and escapes.
-    __ set(_thread_in_native_trans, G3_scratch);
-    __ st(G3_scratch, thread_state);
-    if(os::is_MP()) {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
-    }
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-
-
-    Label L;
-    Address suspend_state(G2_thread, in_bytes(JavaThread::suspend_flags_offset()));
-    __ br(Assembler::notEqual, false, Assembler::pn, L);
-    __ delayed()->
-      ld(suspend_state, G3_scratch);
-    __ cmp(G3_scratch, 0);
-    __ br(Assembler::equal, false, Assembler::pt, no_block);
-    __ delayed()->nop();
-    __ bind(L);
-
-    // Block.  Save any potential method result value before the operation and
-    // use a leaf call to leave the last_Java_frame setup undisturbed.
-    save_native_result();
-    __ call_VM_leaf(noreg,
-                    CAST_FROM_FN_PTR(address, JavaThread::check_safepoint_and_suspend_for_native_trans),
-                    G2_thread);
-    __ ld_ptr(STATE(_thread), G2_thread);  // restore thread
-    // Restore any method result value
-    restore_native_result();
-    __ bind(no_block);
-  }
-
-  // Clear the frame anchor now
-
-  __ reset_last_Java_frame();
-
-  // Move the result handler address
-  __ mov(Lscratch, G3_scratch);
-  // return possible result to the outer frame
-#ifndef __LP64
-  __ mov(O0, I0);
-  __ restore(O1, G0, O1);
-#else
-  __ restore(O0, G0, O0);
-#endif /* __LP64 */
-
-  // Move result handler to expected register
-  __ mov(G3_scratch, Lscratch);
-
-
-  // thread state is thread_in_native_trans. Any safepoint blocking has
-  // happened in the trampoline we are ready to switch to thread_in_Java.
-
-  __ set(_thread_in_Java, G3_scratch);
-  __ st(G3_scratch, thread_state);
-
-  // If we have an oop result store it where it will be safe for any further gc
-  // until we return now that we've released the handle it might be protected by
-
-  {
-    Label no_oop, store_result;
-
-    __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
-    __ cmp(G3_scratch, Lscratch);
-    __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
-    __ delayed()->nop();
-    __ addcc(G0, O0, O0);
-    __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
-    __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
-    __ mov(G0, O0);
-
-    __ bind(store_result);
-    // Store it where gc will look for it and result handler expects it.
-    __ st_ptr(O0, STATE(_oop_temp));
-
-    __ bind(no_oop);
-
-  }
-
-  // reset handle block
-  __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
-  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
-
-
-  // handle exceptions (exception handling will handle unlocking!)
-  { Label L;
-    Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
-
-    __ ld_ptr(exception_addr, Gtemp);
-    __ tst(Gtemp);
-    __ brx(Assembler::equal, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ bind(pending_exception_present);
-    // With c++ interpreter we just leave it pending caller will do the correct thing. However...
-    // Like x86 we ignore the result of the native call and leave the method locked. This
-    // seems wrong to leave things locked.
-
-    __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
-    __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
-
-    __ bind(L);
-  }
-
-  // jvmdi/jvmpi support (preserves thread register)
-  __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
-
-  if (synchronized) {
-    // save and restore any potential method result value around the unlocking operation
-    save_native_result();
-
-    const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
-    // Get the initial monitor we allocated
-    __ sub(Lstate, entry_size, O1);                        // initial monitor
-    __ unlock_object(O1);
-    restore_native_result();
-  }
-
-#if defined(COMPILER2) && !defined(_LP64)
-
-  // C2 expects long results in G1 we can't tell if we're returning to interpreted
-  // or compiled so just be safe.
-
-  __ sllx(O0, 32, G1);          // Shift bits into high G1
-  __ srl (O1, 0, O1);           // Zero extend O1
-  __ or3 (O1, G1, G1);          // OR 64 bits into G1
-
-#endif /* COMPILER2 && !_LP64 */
-
-#ifdef ASSERT
-  {
-    Label ok;
-    __ cmp(I5_savedSP, FP);
-    __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
-    __ delayed()->nop();
-    __ stop("bad I5_savedSP value");
-    __ should_not_reach_here();
-    __ bind(ok);
-  }
-#endif
-  // Calls result handler which POPS FRAME
-  if (TraceJumps) {
-    // Move target to register that is recordable
-    __ mov(Lscratch, G3_scratch);
-    __ JMP(G3_scratch, 0);
-  } else {
-    __ jmp(Lscratch, 0);
-  }
-  __ delayed()->nop();
-
-  if (inc_counter) {
-    // handle invocation counter overflow
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(Lcontinue);
-  }
-
-
-  return entry;
-}
-
-void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
-                                                              const Register prev_state,
-                                                              bool native) {
-
-  // On entry
-  // G5_method - caller's method
-  // Gargs - points to initial parameters (i.e. locals[0])
-  // G2_thread - valid? (C1 only??)
-  // "prev_state" - contains any previous frame manager state which we must save a link
-  //
-  // On return
-  // "state" is a pointer to the newly allocated  state object. We must allocate and initialize
-  // a new interpretState object and the method expression stack.
-
-  assert_different_registers(state, prev_state);
-  assert_different_registers(prev_state, G3_scratch);
-  const Register Gtmp = G3_scratch;
-  const Address constMethod       (G5_method, in_bytes(Method::const_offset()));
-  const Address access_flags      (G5_method, in_bytes(Method::access_flags_offset()));
-
-  // slop factor is two extra slots on the expression stack so that
-  // we always have room to store a result when returning from a call without parameters
-  // that returns a result.
-
-  const int slop_factor = 2*wordSize;
-
-  const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
-                         Method::extra_stack_entries() + // extra stack for jsr 292
-                         frame::memory_parameter_word_sp_offset +  // register save area + param window
-                         (native ?  frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
-
-  // XXX G5_method valid
-
-  // Now compute new frame size
-
-  if (native) {
-    const Register RconstMethod = Gtmp;
-    const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
-    __ ld_ptr(constMethod, RconstMethod);
-    __ lduh( size_of_parameters, Gtmp );
-    __ calc_mem_param_words(Gtmp, Gtmp);     // space for native call parameters passed on the stack in words
-  } else {
-    // Full size expression stack
-    __ ld_ptr(constMethod, Gtmp);
-    __ lduh(Gtmp, in_bytes(ConstMethod::max_stack_offset()), Gtmp);
-  }
-  __ add(Gtmp, fixed_size, Gtmp);           // plus the fixed portion
-
-  __ neg(Gtmp);                               // negative space for stack/parameters in words
-  __ and3(Gtmp, -WordsPerLong, Gtmp);        // make multiple of 2 (SP must be 2-word aligned)
-  __ sll(Gtmp, LogBytesPerWord, Gtmp);       // negative space for frame in bytes
-
-  // Need to do stack size check here before we fault on large frames
-
-  Label stack_ok;
-
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-
-
-  __ ld_ptr(G2_thread, in_bytes(Thread::stack_base_offset()), O0);
-  __ ld_ptr(G2_thread, in_bytes(Thread::stack_size_offset()), O1);
-  // compute stack bottom
-  __ sub(O0, O1, O0);
-
-  // Avoid touching the guard pages
-  // Also a fudge for frame size of BytecodeInterpreter::run
-  // It varies from 1k->4k depending on build type
-  const int fudge = 6 * K;
-
-  __ set(fudge + (max_pages * os::vm_page_size()), O1);
-
-  __ add(O0, O1, O0);
-  __ sub(O0, Gtmp, O0);
-  __ cmp(SP, O0);
-  __ brx(Assembler::greaterUnsigned, false, Assembler::pt, stack_ok);
-  __ delayed()->nop();
-
-     // throw exception return address becomes throwing pc
-
-  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
-  __ stop("never reached");
-
-  __ bind(stack_ok);
-
-  __ save(SP, Gtmp, SP);                      // setup new frame and register window
-
-  // New window I7 call_stub or previous activation
-  // O6 - register save area, BytecodeInterpreter just below it, args/locals just above that
-  //
-  __ sub(FP, sizeof(BytecodeInterpreter), state);        // Point to new Interpreter state
-  __ add(state, STACK_BIAS, state );         // Account for 64bit bias
-
-#define XXX_STATE(field_name) state, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
-
-  // Initialize a new Interpreter state
-  // orig_sp - caller's original sp
-  // G2_thread - thread
-  // Gargs - &locals[0] (unbiased?)
-  // G5_method - method
-  // SP (biased) - accounts for full size java stack, BytecodeInterpreter object, register save area, and register parameter save window
-
-
-  __ set(0xdead0004, O1);
-
-
-  __ st_ptr(Gargs, XXX_STATE(_locals));
-  __ st_ptr(G0, XXX_STATE(_oop_temp));
-
-  __ st_ptr(state, XXX_STATE(_self_link));                // point to self
-  __ st_ptr(prev_state->after_save(), XXX_STATE(_prev_link)); // Chain interpreter states
-  __ st_ptr(G2_thread, XXX_STATE(_thread));               // Store javathread
-
-  if (native) {
-    __ st_ptr(G0, XXX_STATE(_bcp));
-  } else {
-    __ ld_ptr(G5_method, in_bytes(Method::const_offset()), O2); // get ConstMethod*
-    __ add(O2, in_bytes(ConstMethod::codes_offset()), O2);        // get bcp
-    __ st_ptr(O2, XXX_STATE(_bcp));
-  }
-
-  __ st_ptr(G0, XXX_STATE(_mdx));
-  __ st_ptr(G5_method, XXX_STATE(_method));
-
-  __ set((int) BytecodeInterpreter::method_entry, O1);
-  __ st(O1, XXX_STATE(_msg));
-
-  __ ld_ptr(constMethod, O3);
-  __ ld_ptr(O3, in_bytes(ConstMethod::constants_offset()), O3);
-  __ ld_ptr(O3, ConstantPool::cache_offset_in_bytes(), O2);
-  __ st_ptr(O2, XXX_STATE(_constants));
-
-  __ st_ptr(G0, XXX_STATE(_result._to_call._callee));
-
-  // Monitor base is just start of BytecodeInterpreter object;
-  __ mov(state, O2);
-  __ st_ptr(O2, XXX_STATE(_monitor_base));
-
-  // Do we need a monitor for synchonized method?
-  {
-    __ ld(access_flags, O1);
-    Label done;
-    Label got_obj;
-    __ btst(JVM_ACC_SYNCHRONIZED, O1);
-    __ br( Assembler::zero, false, Assembler::pt, done);
-
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ delayed()->btst(JVM_ACC_STATIC, O1);
-    __ ld_ptr(XXX_STATE(_locals), O1);
-    __ br( Assembler::zero, true, Assembler::pt, got_obj);
-    __ delayed()->ld_ptr(O1, 0, O1);                  // get receiver for not-static case
-    __ ld_ptr(constMethod, O1);
-    __ ld_ptr( O1, in_bytes(ConstMethod::constants_offset()), O1);
-    __ ld_ptr( O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
-    // lock the mirror, not the Klass*
-    __ ld_ptr( O1, mirror_offset, O1);
-
-    __ bind(got_obj);
-
-  #ifdef ASSERT
-    __ tst(O1);
-    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
-  #endif // ASSERT
-
-    const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
-    __ sub(SP, entry_size, SP);                         // account for initial monitor
-    __ sub(O2, entry_size, O2);                        // initial monitor
-    __ st_ptr(O1, O2, BasicObjectLock::obj_offset_in_bytes()); // and allocate it for interpreter use
-    __ bind(done);
-  }
-
-  // Remember initial frame bottom
-
-  __ st_ptr(SP, XXX_STATE(_frame_bottom));
-
-  __ st_ptr(O2, XXX_STATE(_stack_base));
-
-  __ sub(O2, wordSize, O2);                    // prepush
-  __ st_ptr(O2, XXX_STATE(_stack));                // PREPUSH
-
-  // Full size expression stack
-  __ ld_ptr(constMethod, O3);
-  __ lduh(O3, in_bytes(ConstMethod::max_stack_offset()), O3);
-  __ inc(O3, Method::extra_stack_entries());
-  __ sll(O3, LogBytesPerWord, O3);
-  __ sub(O2, O3, O3);
-//  __ sub(O3, wordSize, O3);                    // so prepush doesn't look out of bounds
-  __ st_ptr(O3, XXX_STATE(_stack_limit));
-
-  if (!native) {
-    //
-    // Code to initialize locals
-    //
-    Register init_value = noreg;    // will be G0 if we must clear locals
-    // Now zero locals
-    if (true /* zerolocals */ || ClearInterpreterLocals) {
-      // explicitly initialize locals
-      init_value = G0;
-    } else {
-    #ifdef ASSERT
-      // initialize locals to a garbage pattern for better debugging
-      init_value = O3;
-      __ set( 0x0F0F0F0F, init_value );
-    #endif // ASSERT
-    }
-    if (init_value != noreg) {
-      Label clear_loop;
-      const Register RconstMethod = O1;
-      const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
-      const Address size_of_locals    (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
-
-      // NOTE: If you change the frame layout, this code will need to
-      // be updated!
-      __ ld_ptr( constMethod, RconstMethod );
-      __ lduh( size_of_locals, O2 );
-      __ lduh( size_of_parameters, O1 );
-      __ sll( O2, LogBytesPerWord, O2);
-      __ sll( O1, LogBytesPerWord, O1 );
-      __ ld_ptr(XXX_STATE(_locals), L2_scratch);
-      __ sub( L2_scratch, O2, O2 );
-      __ sub( L2_scratch, O1, O1 );
-
-      __ bind( clear_loop );
-      __ inc( O2, wordSize );
-
-      __ cmp( O2, O1 );
-      __ br( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
-      __ delayed()->st_ptr( init_value, O2, 0 );
-    }
-  }
-}
-// Find preallocated  monitor and lock method (C++ interpreter)
-//
-void CppInterpreterGenerator::lock_method() {
-// Lock the current method.
-// Destroys registers L2_scratch, L3_scratch, O0
-//
-// Find everything relative to Lstate
-
-#ifdef ASSERT
-  __ ld_ptr(STATE(_method), L2_scratch);
-  __ ld(L2_scratch, in_bytes(Method::access_flags_offset()), O0);
-
- { Label ok;
-   __ btst(JVM_ACC_SYNCHRONIZED, O0);
-   __ br( Assembler::notZero, false, Assembler::pt, ok);
-   __ delayed()->nop();
-   __ stop("method doesn't need synchronization");
-   __ bind(ok);
-  }
-#endif // ASSERT
-
-  // monitor is already allocated at stack base
-  // and the lockee is already present
-  __ ld_ptr(STATE(_stack_base), L2_scratch);
-  __ ld_ptr(L2_scratch, BasicObjectLock::obj_offset_in_bytes(), O0);   // get object
-  __ lock_object(L2_scratch, O0);
-
-}
-
-//  Generate code for handling resuming a deopted method
-void CppInterpreterGenerator::generate_deopt_handling() {
-
-  Label return_from_deopt_common;
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_atos  = __ pc();
-
-  // O0/O1 live
-  __ ba(return_from_deopt_common);
-  __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_OBJECT), L3_scratch);    // Result stub address array index
-
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_btos  = __ pc();
-
-  // O0/O1 live
-  __ ba(return_from_deopt_common);
-  __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_BOOLEAN), L3_scratch);    // Result stub address array index
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_itos  = __ pc();
-
-  // O0/O1 live
-  __ ba(return_from_deopt_common);
-  __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_INT), L3_scratch);    // Result stub address array index
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-
-  deopt_frame_manager_return_ltos  = __ pc();
-#if !defined(_LP64) && defined(COMPILER2)
-  // All return values are where we want them, except for Longs.  C2 returns
-  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
-  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
-  // build even if we are returning from interpreted we just do a little
-  // stupid shuffing.
-  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
-  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
-  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-
-  __ srl (G1, 0,O1);
-  __ srlx(G1,32,O0);
-#endif /* !_LP64 && COMPILER2 */
-  // O0/O1 live
-  __ ba(return_from_deopt_common);
-  __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_LONG), L3_scratch);    // Result stub address array index
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-
-  deopt_frame_manager_return_ftos  = __ pc();
-  // O0/O1 live
-  __ ba(return_from_deopt_common);
-  __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_FLOAT), L3_scratch);    // Result stub address array index
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_dtos  = __ pc();
-
-  // O0/O1 live
-  __ ba(return_from_deopt_common);
-  __ delayed()->set(AbstractInterpreter::BasicType_as_index(T_DOUBLE), L3_scratch);    // Result stub address array index
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_vtos  = __ pc();
-
-  // O0/O1 live
-  __ set(AbstractInterpreter::BasicType_as_index(T_VOID), L3_scratch);
-
-  // Deopt return common
-  // an index is present that lets us move any possible result being
-  // return to the interpreter's stack
-  //
-  __ bind(return_from_deopt_common);
-
-  // Result if any is in native abi result (O0..O1/F0..F1). The java expression
-  // stack is in the state that the  calling convention left it.
-  // Copy the result from native abi result and place it on java expression stack.
-
-  // Current interpreter state is present in Lstate
-
-  // Get current pre-pushed top of interpreter stack
-  // Any result (if any) is in native abi
-  // result type index is in L3_scratch
-
-  __ ld_ptr(STATE(_stack), L1_scratch);                                          // get top of java expr stack
-
-  __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
-  __ sll(L3_scratch, LogBytesPerWord, L3_scratch);
-  __ ld_ptr(L4_scratch, L3_scratch, Lscratch);                                       // get typed result converter address
-  __ jmpl(Lscratch, G0, O7);                                         // and convert it
-  __ delayed()->nop();
-
-  // L1_scratch points to top of stack (prepushed)
-  __ st_ptr(L1_scratch, STATE(_stack));
-}
-
-// Generate the code to handle a more_monitors message from the c++ interpreter
-void CppInterpreterGenerator::generate_more_monitors() {
-
-  Label entry, loop;
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-  // 1. compute new pointers                                // esp: old expression stack top
-  __ delayed()->ld_ptr(STATE(_stack_base), L4_scratch);            // current expression stack bottom
-  __ sub(L4_scratch, entry_size, L4_scratch);
-  __ st_ptr(L4_scratch, STATE(_stack_base));
-
-  __ sub(SP, entry_size, SP);                  // Grow stack
-  __ st_ptr(SP, STATE(_frame_bottom));
-
-  __ ld_ptr(STATE(_stack_limit), L2_scratch);
-  __ sub(L2_scratch, entry_size, L2_scratch);
-  __ st_ptr(L2_scratch, STATE(_stack_limit));
-
-  __ ld_ptr(STATE(_stack), L1_scratch);                // Get current stack top
-  __ sub(L1_scratch, entry_size, L1_scratch);
-  __ st_ptr(L1_scratch, STATE(_stack));
-  __ ba(entry);
-  __ delayed()->add(L1_scratch, wordSize, L1_scratch);        // first real entry (undo prepush)
-
-  // 2. move expression stack
-
-  __ bind(loop);
-  __ st_ptr(L3_scratch, Address(L1_scratch, 0));
-  __ add(L1_scratch, wordSize, L1_scratch);
-  __ bind(entry);
-  __ cmp(L1_scratch, L4_scratch);
-  __ br(Assembler::notEqual, false, Assembler::pt, loop);
-  __ delayed()->ld_ptr(L1_scratch, entry_size, L3_scratch);
-
-  // now zero the slot so we can find it.
-  __ st_ptr(G0, L4_scratch, BasicObjectLock::obj_offset_in_bytes());
-
-}
-
-// Initial entry to C++ interpreter from the call_stub.
-// This entry point is called the frame manager since it handles the generation
-// of interpreter activation frames via requests directly from the vm (via call_stub)
-// and via requests from the interpreter. The requests from the call_stub happen
-// directly thru the entry point. Requests from the interpreter happen via returning
-// from the interpreter and examining the message the interpreter has returned to
-// the frame manager. The frame manager can take the following requests:
-
-// NO_REQUEST - error, should never happen.
-// MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
-//                 allocate a new monitor.
-// CALL_METHOD - setup a new activation to call a new method. Very similar to what
-//               happens during entry during the entry via the call stub.
-// RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
-//
-// Arguments:
-//
-// ebx: Method*
-// ecx: receiver - unused (retrieved from stack as needed)
-// esi: previous frame manager state (NULL from the call_stub/c1/c2)
-//
-//
-// Stack layout at entry
-//
-// [ return address     ] <--- esp
-// [ parameter n        ]
-//   ...
-// [ parameter 1        ]
-// [ expression stack   ]
-//
-//
-// We are free to blow any registers we like because the call_stub which brought us here
-// initially has preserved the callee save registers already.
-//
-//
-
-static address interpreter_frame_manager = NULL;
-
-#ifdef ASSERT
-  #define VALIDATE_STATE(scratch, marker)                         \
-  {                                                               \
-    Label skip;                                                   \
-    __ ld_ptr(STATE(_self_link), scratch);                        \
-    __ cmp(Lstate, scratch);                                      \
-    __ brx(Assembler::equal, false, Assembler::pt, skip);         \
-    __ delayed()->nop();                                          \
-    __ breakpoint_trap();                                         \
-    __ emit_int32(marker);                                         \
-    __ bind(skip);                                                \
-  }
-#else
-  #define VALIDATE_STATE(scratch, marker)
-#endif /* ASSERT */
-
-void CppInterpreterGenerator::adjust_callers_stack(Register args) {
-//
-// Adjust caller's stack so that all the locals can be contiguous with
-// the parameters.
-// Worries about stack overflow make this a pain.
-//
-// Destroys args, G3_scratch, G3_scratch
-// In/Out O5_savedSP (sender's original SP)
-//
-//  assert_different_registers(state, prev_state);
-  const Register Gtmp = G3_scratch;
-  const Register RconstMethod = G3_scratch;
-  const Register tmp = O2;
-  const Address constMethod(G5_method, in_bytes(Method::const_offset()));
-  const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
-  const Address size_of_locals    (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
-
-  __ ld_ptr(constMethod, RconstMethod);
-  __ lduh(size_of_parameters, tmp);
-  __ sll(tmp, LogBytesPerWord, Gargs);       // parameter size in bytes
-  __ add(args, Gargs, Gargs);                // points to first local + BytesPerWord
-  // NEW
-  __ add(Gargs, -wordSize, Gargs);             // points to first local[0]
-  // determine extra space for non-argument locals & adjust caller's SP
-  // Gtmp1: parameter size in words
-  __ lduh(size_of_locals, Gtmp);
-  __ compute_extra_locals_size_in_bytes(tmp, Gtmp, Gtmp);
-
-#if 1
-  // c2i adapters place the final interpreter argument in the register save area for O0/I0
-  // the call_stub will place the final interpreter argument at
-  // frame::memory_parameter_word_sp_offset. This is mostly not noticable for either asm
-  // or c++ interpreter. However with the c++ interpreter when we do a recursive call
-  // and try to make it look good in the debugger we will store the argument to
-  // RecursiveInterpreterActivation in the register argument save area. Without allocating
-  // extra space for the compiler this will overwrite locals in the local array of the
-  // interpreter.
-  // QQQ still needed with frameless adapters???
-
-  const int c2i_adjust_words = frame::memory_parameter_word_sp_offset - frame::callee_register_argument_save_area_sp_offset;
-
-  __ add(Gtmp, c2i_adjust_words*wordSize, Gtmp);
-#endif // 1
-
-
-  __ sub(SP, Gtmp, SP);                      // just caller's frame for the additional space we need.
-}
-
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-
-  // G5_method: Method*
-  // G2_thread: thread (unused)
-  // Gargs:   bottom of args (sender_sp)
-  // O5: sender's sp
-
-  // A single frame manager is plenty as we don't specialize for synchronized. We could and
-  // the code is pretty much ready. Would need to change the test below and for good measure
-  // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
-  // routines. Not clear this is worth it yet.
-
-  if (interpreter_frame_manager) {
-    return interpreter_frame_manager;
-  }
-
-  __ bind(frame_manager_entry);
-
-  // the following temporary registers are used during frame creation
-  const Register Gtmp1 = G3_scratch;
-  // const Register Lmirror = L1;     // native mirror (native calls only)
-
-  const Address constMethod       (G5_method, in_bytes(Method::const_offset()));
-  const Address access_flags      (G5_method, in_bytes(Method::access_flags_offset()));
-
-  address entry_point = __ pc();
-  __ mov(G0, prevState);                                                 // no current activation
-
-
-  Label re_dispatch;
-
-  __ bind(re_dispatch);
-
-  // Interpreter needs to have locals completely contiguous. In order to do that
-  // We must adjust the caller's stack pointer for any locals beyond just the
-  // parameters
-  adjust_callers_stack(Gargs);
-
-  // O5_savedSP still contains sender's sp
-
-  // NEW FRAME
-
-  generate_compute_interpreter_state(Lstate, prevState, false);
-
-  // At this point a new interpreter frame and state object are created and initialized
-  // Lstate has the pointer to the new activation
-  // Any stack banging or limit check should already be done.
-
-  Label call_interpreter;
-
-  __ bind(call_interpreter);
-
-
-#if 1
-  __ set(0xdead002, Lmirror);
-  __ set(0xdead002, L2_scratch);
-  __ set(0xdead003, L3_scratch);
-  __ set(0xdead004, L4_scratch);
-  __ set(0xdead005, Lscratch);
-  __ set(0xdead006, Lscratch2);
-  __ set(0xdead007, L7_scratch);
-
-  __ set(0xdeaf002, O2);
-  __ set(0xdeaf003, O3);
-  __ set(0xdeaf004, O4);
-  __ set(0xdeaf005, O5);
-#endif
-
-  // Call interpreter (stack bang complete) enter here if message is
-  // set and we know stack size is valid
-
-  Label call_interpreter_2;
-
-  __ bind(call_interpreter_2);
-
-#ifdef ASSERT
-  {
-    Label skip;
-    __ ld_ptr(STATE(_frame_bottom), G3_scratch);
-    __ cmp(G3_scratch, SP);
-    __ brx(Assembler::equal, false, Assembler::pt, skip);
-    __ delayed()->nop();
-    __ stop("SP not restored to frame bottom");
-    __ bind(skip);
-  }
-#endif
-
-  VALIDATE_STATE(G3_scratch, 4);
-  __ set_last_Java_frame(SP, noreg);
-  __ mov(Lstate, O0);                 // (arg) pointer to current state
-
-  __ call(CAST_FROM_FN_PTR(address,
-                           JvmtiExport::can_post_interpreter_events() ?
-                                                                  BytecodeInterpreter::runWithChecks
-                                                                : BytecodeInterpreter::run),
-         relocInfo::runtime_call_type);
-
-  __ delayed()->nop();
-
-  __ ld_ptr(STATE(_thread), G2_thread);
-  __ reset_last_Java_frame();
-
-  // examine msg from interpreter to determine next action
-  __ ld_ptr(STATE(_thread), G2_thread);                                  // restore G2_thread
-
-  __ ld(STATE(_msg), L1_scratch);                                       // Get new message
-
-  Label call_method;
-  Label return_from_interpreted_method;
-  Label throw_exception;
-  Label do_OSR;
-  Label bad_msg;
-  Label resume_interpreter;
-
-  __ cmp(L1_scratch, (int)BytecodeInterpreter::call_method);
-  __ br(Assembler::equal, false, Assembler::pt, call_method);
-  __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::return_from_method);
-  __ br(Assembler::equal, false, Assembler::pt, return_from_interpreted_method);
-  __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::throwing_exception);
-  __ br(Assembler::equal, false, Assembler::pt, throw_exception);
-  __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::do_osr);
-  __ br(Assembler::equal, false, Assembler::pt, do_OSR);
-  __ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::more_monitors);
-  __ br(Assembler::notEqual, false, Assembler::pt, bad_msg);
-
-  // Allocate more monitor space, shuffle expression stack....
-
-  generate_more_monitors();
-
-  // new monitor slot allocated, resume the interpreter.
-
-  __ set((int)BytecodeInterpreter::got_monitors, L1_scratch);
-  VALIDATE_STATE(G3_scratch, 5);
-  __ ba(call_interpreter);
-  __ delayed()->st(L1_scratch, STATE(_msg));
-
-  // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
-  unctrap_frame_manager_entry  = __ pc();
-
-  // QQQ what message do we send
-
-  __ ba(call_interpreter);
-  __ delayed()->ld_ptr(STATE(_frame_bottom), SP);                  // restore to full stack frame
-
-  //=============================================================================
-  // Returning from a compiled method into a deopted method. The bytecode at the
-  // bcp has completed. The result of the bytecode is in the native abi (the tosca
-  // for the template based interpreter). Any stack space that was used by the
-  // bytecode that has completed has been removed (e.g. parameters for an invoke)
-  // so all that we have to do is place any pending result on the expression stack
-  // and resume execution on the next bytecode.
-
-  generate_deopt_handling();
-
-  // ready to resume the interpreter
-
-  __ set((int)BytecodeInterpreter::deopt_resume, L1_scratch);
-  __ ba(call_interpreter);
-  __ delayed()->st(L1_scratch, STATE(_msg));
-
-  // Current frame has caught an exception we need to dispatch to the
-  // handler. We can get here because a native interpreter frame caught
-  // an exception in which case there is no handler and we must rethrow
-  // If it is a vanilla interpreted frame the we simply drop into the
-  // interpreter and let it do the lookup.
-
-  Interpreter::_rethrow_exception_entry = __ pc();
-
-  Label return_with_exception;
-  Label unwind_and_forward;
-
-  // O0: exception
-  // O7: throwing pc
-
-  // We want exception in the thread no matter what we ultimately decide about frame type.
-
-  Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
-  __ verify_thread();
-  __ st_ptr(O0, exception_addr);
-
-  // get the Method*
-  __ ld_ptr(STATE(_method), G5_method);
-
-  // if this current frame vanilla or native?
-
-  __ ld(access_flags, Gtmp1);
-  __ btst(JVM_ACC_NATIVE, Gtmp1);
-  __ br(Assembler::zero, false, Assembler::pt, return_with_exception);  // vanilla interpreted frame handle directly
-  __ delayed()->nop();
-
-  // We drop thru to unwind a native interpreted frame with a pending exception
-  // We jump here for the initial interpreter frame with exception pending
-  // We unwind the current acivation and forward it to our caller.
-
-  __ bind(unwind_and_forward);
-
-  // Unwind frame and jump to forward exception. unwinding will place throwing pc in O7
-  // as expected by forward_exception.
-
-  __ restore(FP, G0, SP);                  // unwind interpreter state frame
-  __ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
-  __ delayed()->mov(I5_savedSP->after_restore(), SP);
-
-  // Return point from a call which returns a result in the native abi
-  // (c1/c2/jni-native). This result must be processed onto the java
-  // expression stack.
-  //
-  // A pending exception may be present in which case there is no result present
-
-  address return_from_native_method = __ pc();
-
-  VALIDATE_STATE(G3_scratch, 6);
-
-  // Result if any is in native abi result (O0..O1/F0..F1). The java expression
-  // stack is in the state that the  calling convention left it.
-  // Copy the result from native abi result and place it on java expression stack.
-
-  // Current interpreter state is present in Lstate
-
-  // Exception pending?
-
-  __ ld_ptr(STATE(_frame_bottom), SP);                             // restore to full stack frame
-  __ ld_ptr(exception_addr, Lscratch);                                         // get any pending exception
-  __ tst(Lscratch);                                                            // exception pending?
-  __ brx(Assembler::notZero, false, Assembler::pt, return_with_exception);
-  __ delayed()->nop();
-
-  // Process the native abi result to java expression stack
-
-  __ ld_ptr(STATE(_result._to_call._callee), L4_scratch);                        // called method
-  __ ld_ptr(STATE(_stack), L1_scratch);                                          // get top of java expr stack
-  // get parameter size
-  __ ld_ptr(L4_scratch, in_bytes(Method::const_offset()), L2_scratch);
-  __ lduh(L2_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), L2_scratch);
-  __ sll(L2_scratch, LogBytesPerWord, L2_scratch     );                           // parameter size in bytes
-  __ add(L1_scratch, L2_scratch, L1_scratch);                                      // stack destination for result
-  __ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index
-
-  // tosca is really just native abi
-  __ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
-  __ sll(L3_scratch, LogBytesPerWord, L3_scratch);
-  __ ld_ptr(L4_scratch, L3_scratch, Lscratch);                                       // get typed result converter address
-  __ jmpl(Lscratch, G0, O7);                                                   // and convert it
-  __ delayed()->nop();
-
-  // L1_scratch points to top of stack (prepushed)
-
-  __ ba(resume_interpreter);
-  __ delayed()->mov(L1_scratch, O1);
-
-  // An exception is being caught on return to a vanilla interpreter frame.
-  // Empty the stack and resume interpreter
-
-  __ bind(return_with_exception);
-
-  __ ld_ptr(STATE(_frame_bottom), SP);                             // restore to full stack frame
-  __ ld_ptr(STATE(_stack_base), O1);                               // empty java expression stack
-  __ ba(resume_interpreter);
-  __ delayed()->sub(O1, wordSize, O1);                             // account for prepush
-
-  // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
-  // interpreter call, or native) and unwind this interpreter activation.
-  // All monitors should be unlocked.
-
-  __ bind(return_from_interpreted_method);
-
-  VALIDATE_STATE(G3_scratch, 7);
-
-  Label return_to_initial_caller;
-
-  // Interpreted result is on the top of the completed activation expression stack.
-  // We must return it to the top of the callers stack if caller was interpreted
-  // otherwise we convert to native abi result and return to call_stub/c1/c2
-  // The caller's expression stack was truncated by the call however the current activation
-  // has enough stuff on the stack that we have usable space there no matter what. The
-  // other thing that makes it easy is that the top of the caller's stack is stored in STATE(_locals)
-  // for the current activation
-
-  __ ld_ptr(STATE(_prev_link), L1_scratch);
-  __ ld_ptr(STATE(_method), L2_scratch);                               // get method just executed
-  __ ld(L2_scratch, in_bytes(Method::result_index_offset()), L2_scratch);
-  __ tst(L1_scratch);
-  __ brx(Assembler::zero, false, Assembler::pt, return_to_initial_caller);
-  __ delayed()->sll(L2_scratch, LogBytesPerWord, L2_scratch);
-
-  // Copy result to callers java stack
-
-  __ set((intptr_t)CppInterpreter::_stack_to_stack, L4_scratch);
-  __ ld_ptr(L4_scratch, L2_scratch, Lscratch);                          // get typed result converter address
-  __ ld_ptr(STATE(_stack), O0);                                       // current top (prepushed)
-  __ ld_ptr(STATE(_locals), O1);                                      // stack destination
-
-  // O0 - will be source, O1 - will be destination (preserved)
-  __ jmpl(Lscratch, G0, O7);                                          // and convert it
-  __ delayed()->add(O0, wordSize, O0);                                // get source (top of current expr stack)
-
-  // O1 == &locals[0]
-
-  // Result is now on caller's stack. Just unwind current activation and resume
-
-  Label unwind_recursive_activation;
-
-
-  __ bind(unwind_recursive_activation);
-
-  // O1 == &locals[0] (really callers stacktop) for activation now returning
-  // returning to interpreter method from "recursive" interpreter call
-  // result converter left O1 pointing to top of the( prepushed) java stack for method we are returning
-  // to. Now all we must do is unwind the state from the completed call
-
-  // Must restore stack
-  VALIDATE_STATE(G3_scratch, 8);
-
-  // Return to interpreter method after a method call (interpreted/native/c1/c2) has completed.
-  // Result if any is already on the caller's stack. All we must do now is remove the now dead
-  // frame and tell interpreter to resume.
-
-
-  __ mov(O1, I1);                                                     // pass back new stack top across activation
-  // POP FRAME HERE ==================================
-  __ restore(FP, G0, SP);                                             // unwind interpreter state frame
-  __ ld_ptr(STATE(_frame_bottom), SP);                                // restore to full stack frame
-
-
-  // Resume the interpreter. The current frame contains the current interpreter
-  // state object.
-  //
-  // O1 == new java stack pointer
-
-  __ bind(resume_interpreter);
-  VALIDATE_STATE(G3_scratch, 10);
-
-  // A frame we have already used before so no need to bang stack so use call_interpreter_2 entry
-
-  __ set((int)BytecodeInterpreter::method_resume, L1_scratch);
-  __ st(L1_scratch, STATE(_msg));
-  __ ba(call_interpreter_2);
-  __ delayed()->st_ptr(O1, STATE(_stack));
-
-  // interpreter returning to native code (call_stub/c1/c2)
-  // convert result and unwind initial activation
-  // L2_scratch - scaled result type index
-
-  __ bind(return_to_initial_caller);
-
-  __ set((intptr_t)CppInterpreter::_stack_to_native_abi, L4_scratch);
-  __ ld_ptr(L4_scratch, L2_scratch, Lscratch);                           // get typed result converter address
-  __ ld_ptr(STATE(_stack), O0);                                        // current top (prepushed)
-  __ jmpl(Lscratch, G0, O7);                                           // and convert it
-  __ delayed()->add(O0, wordSize, O0);                                 // get source (top of current expr stack)
-
-  Label unwind_initial_activation;
-  __ bind(unwind_initial_activation);
-
-  // RETURN TO CALL_STUB/C1/C2 code (result if any in I0..I1/(F0/..F1)
-  // we can return here with an exception that wasn't handled by interpreted code
-  // how does c1/c2 see it on return?
-
-  // compute resulting sp before/after args popped depending upon calling convention
-  // __ ld_ptr(STATE(_saved_sp), Gtmp1);
-  //
-  // POP FRAME HERE ==================================
-  __ restore(FP, G0, SP);
-  __ retl();
-  __ delayed()->mov(I5_savedSP->after_restore(), SP);
-
-  // OSR request, unwind the current frame and transfer to the OSR entry
-  // and enter OSR nmethod
-
-  __ bind(do_OSR);
-  Label remove_initial_frame;
-  __ ld_ptr(STATE(_prev_link), L1_scratch);
-  __ ld_ptr(STATE(_result._osr._osr_buf), G1_scratch);
-
-  // We are going to pop this frame. Is there another interpreter frame underneath
-  // it or is it callstub/compiled?
-
-  __ tst(L1_scratch);
-  __ brx(Assembler::zero, false, Assembler::pt, remove_initial_frame);
-  __ delayed()->ld_ptr(STATE(_result._osr._osr_entry), G3_scratch);
-
-  // Frame underneath is an interpreter frame simply unwind
-  // POP FRAME HERE ==================================
-  __ restore(FP, G0, SP);                                             // unwind interpreter state frame
-  __ mov(I5_savedSP->after_restore(), SP);
-
-  // Since we are now calling native need to change our "return address" from the
-  // dummy RecursiveInterpreterActivation to a return from native
-
-  __ set((intptr_t)return_from_native_method - 8, O7);
-
-  __ jmpl(G3_scratch, G0, G0);
-  __ delayed()->mov(G1_scratch, O0);
-
-  __ bind(remove_initial_frame);
-
-  // POP FRAME HERE ==================================
-  __ restore(FP, G0, SP);
-  __ mov(I5_savedSP->after_restore(), SP);
-  __ jmpl(G3_scratch, G0, G0);
-  __ delayed()->mov(G1_scratch, O0);
-
-  // Call a new method. All we do is (temporarily) trim the expression stack
-  // push a return address to bring us back to here and leap to the new entry.
-  // At this point we have a topmost frame that was allocated by the frame manager
-  // which contains the current method interpreted state. We trim this frame
-  // of excess java expression stack entries and then recurse.
-
-  __ bind(call_method);
-
-  // stack points to next free location and not top element on expression stack
-  // method expects sp to be pointing to topmost element
-
-  __ ld_ptr(STATE(_thread), G2_thread);
-  __ ld_ptr(STATE(_result._to_call._callee), G5_method);
-
-
-  // SP already takes in to account the 2 extra words we use for slop
-  // when we call a "static long no_params()" method. So if
-  // we trim back sp by the amount of unused java expression stack
-  // there will be automagically the 2 extra words we need.
-  // We also have to worry about keeping SP aligned.
-
-  __ ld_ptr(STATE(_stack), Gargs);
-  __ ld_ptr(STATE(_stack_limit), L1_scratch);
-
-  // compute the unused java stack size
-  __ sub(Gargs, L1_scratch, L2_scratch);                       // compute unused space
-
-  // Round down the unused space to that stack is always 16-byte aligned
-  // by making the unused space a multiple of the size of two longs.
-
-  __ and3(L2_scratch, -2*BytesPerLong, L2_scratch);
-
-  // Now trim the stack
-  __ add(SP, L2_scratch, SP);
-
-
-  // Now point to the final argument (account for prepush)
-  __ add(Gargs, wordSize, Gargs);
-#ifdef ASSERT
-  // Make sure we have space for the window
-  __ sub(Gargs, SP, L1_scratch);
-  __ cmp(L1_scratch, 16*wordSize);
-  {
-    Label skip;
-    __ brx(Assembler::greaterEqual, false, Assembler::pt, skip);
-    __ delayed()->nop();
-    __ stop("killed stack");
-    __ bind(skip);
-  }
-#endif // ASSERT
-
-  // Create a new frame where we can store values that make it look like the interpreter
-  // really recursed.
-
-  // prepare to recurse or call specialized entry
-
-  // First link the registers we need
-
-  // make the pc look good in debugger
-  __ set(CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation), O7);
-  // argument too
-  __ mov(Lstate, I0);
-
-  // Record our sending SP
-  __ mov(SP, O5_savedSP);
-
-  __ ld_ptr(STATE(_result._to_call._callee_entry_point), L2_scratch);
-  __ set((intptr_t) entry_point, L1_scratch);
-  __ cmp(L1_scratch, L2_scratch);
-  __ brx(Assembler::equal, false, Assembler::pt, re_dispatch);
-  __ delayed()->mov(Lstate, prevState);                                // link activations
-
-  // method uses specialized entry, push a return so we look like call stub setup
-  // this path will handle fact that result is returned in registers and not
-  // on the java stack.
-
-  __ set((intptr_t)return_from_native_method - 8, O7);
-  __ jmpl(L2_scratch, G0, G0);                               // Do specialized entry
-  __ delayed()->nop();
-
-  //
-  // Bad Message from interpreter
-  //
-  __ bind(bad_msg);
-  __ stop("Bad message from interpreter");
-
-  // Interpreted method "returned" with an exception pass it on...
-  // Pass result, unwind activation and continue/return to interpreter/call_stub
-  // We handle result (if any) differently based on return to interpreter or call_stub
-
-  __ bind(throw_exception);
-  __ ld_ptr(STATE(_prev_link), L1_scratch);
-  __ tst(L1_scratch);
-  __ brx(Assembler::zero, false, Assembler::pt, unwind_and_forward);
-  __ delayed()->nop();
-
-  __ ld_ptr(STATE(_locals), O1); // get result of popping callee's args
-  __ ba(unwind_recursive_activation);
-  __ delayed()->nop();
-
-  interpreter_frame_manager = entry_point;
-  return entry_point;
-}
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : CppInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-
-static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
-
-  // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
-  // expression stack, the callee will have callee_extra_locals (so we can account for
-  // frame extension) and monitor_size for monitors. Basically we need to calculate
-  // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
-  //
-  //
-  // The big complicating thing here is that we must ensure that the stack stays properly
-  // aligned. This would be even uglier if monitor size wasn't modulo what the stack
-  // needs to be aligned for). We are given that the sp (fp) is already aligned by
-  // the caller so we must ensure that it is properly aligned for our callee.
-  //
-  // Ths c++ interpreter always makes sure that we have a enough extra space on the
-  // stack at all times to deal with the "stack long no_params()" method issue. This
-  // is "slop_factor" here.
-  const int slop_factor = 2;
-
-  const int fixed_size = sizeof(BytecodeInterpreter)/wordSize +           // interpreter state object
-                         frame::memory_parameter_word_sp_offset;   // register save area + param window
-  return (round_to(max_stack +
-                   slop_factor +
-                   fixed_size +
-                   monitor_size +
-                   (callee_extra_locals * Interpreter::stackElementWords), WordsPerLong));
-
-}
-
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-
-  // See call_stub code
-  int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
-                                 WordsPerLong);    // 7 + register save area
-
-  // Save space for one monitor to get into the interpreted method in case
-  // the method is synchronized
-  int monitor_size    = method->is_synchronized() ?
-                                1*frame::interpreter_frame_monitor_size() : 0;
-  return size_activation_helper(method->max_locals(), method->max_stack(),
-                                monitor_size) + call_stub_size;
-}
-
-void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
-                                           frame* caller,
-                                           frame* current,
-                                           Method* method,
-                                           intptr_t* locals,
-                                           intptr_t* stack,
-                                           intptr_t* stack_base,
-                                           intptr_t* monitor_base,
-                                           intptr_t* frame_bottom,
-                                           bool is_top_frame
-                                           )
-{
-  // What about any vtable?
-  //
-  to_fill->_thread = JavaThread::current();
-  // This gets filled in later but make it something recognizable for now
-  to_fill->_bcp = method->code_base();
-  to_fill->_locals = locals;
-  to_fill->_constants = method->constants()->cache();
-  to_fill->_method = method;
-  to_fill->_mdx = NULL;
-  to_fill->_stack = stack;
-  if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
-    to_fill->_msg = deopt_resume2;
-  } else {
-    to_fill->_msg = method_resume;
-  }
-  to_fill->_result._to_call._bcp_advance = 0;
-  to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
-  to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
-  to_fill->_prev_link = NULL;
-
-  // Fill in the registers for the frame
-
-  // Need to install _sender_sp. Actually not too hard in C++!
-  // When the skeletal frames are layed out we fill in a value
-  // for _sender_sp. That value is only correct for the oldest
-  // skeletal frame constructed (because there is only a single
-  // entry for "caller_adjustment". While the skeletal frames
-  // exist that is good enough. We correct that calculation
-  // here and get all the frames correct.
-
-  // to_fill->_sender_sp = locals - (method->size_of_parameters() - 1);
-
-  *current->register_addr(Lstate) = (intptr_t) to_fill;
-  // skeletal already places a useful value here and this doesn't account
-  // for alignment so don't bother.
-  // *current->register_addr(I5_savedSP) =     (intptr_t) locals - (method->size_of_parameters() - 1);
-
-  if (caller->is_interpreted_frame()) {
-    interpreterState prev  = caller->get_interpreterState();
-    to_fill->_prev_link = prev;
-    // Make the prev callee look proper
-    prev->_result._to_call._callee = method;
-    if (*prev->_bcp == Bytecodes::_invokeinterface) {
-      prev->_result._to_call._bcp_advance = 5;
-    } else {
-      prev->_result._to_call._bcp_advance = 3;
-    }
-  }
-  to_fill->_oop_temp = NULL;
-  to_fill->_stack_base = stack_base;
-  // Need +1 here because stack_base points to the word just above the first expr stack entry
-  // and stack_limit is supposed to point to the word just below the last expr stack entry.
-  // See generate_compute_interpreter_state.
-  to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
-  to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
-
-  // sparc specific
-  to_fill->_frame_bottom = frame_bottom;
-  to_fill->_self_link = to_fill;
-#ifdef ASSERT
-  to_fill->_native_fresult = 123456.789;
-  to_fill->_native_lresult = CONST64(0xdeadcafedeafcafe);
-#endif
-}
-
-void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp) {
-  istate->_last_Java_pc = (intptr_t*) last_Java_pc;
-}
-
-static int frame_size_helper(int max_stack,
-                             int moncount,
-                             int callee_param_size,
-                             int callee_locals_size,
-                             bool is_top_frame,
-                             int& monitor_size,
-                             int& full_frame_words) {
-  int extra_locals_size = callee_locals_size - callee_param_size;
-  monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
-  full_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
-  int short_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
-  int frame_words = is_top_frame ? full_frame_words : short_frame_words;
-
-  return frame_words;
-}
-
-int AbstractInterpreter::size_activation(int max_stack,
-                                         int tempcount,
-                                         int extra_args,
-                                         int moncount,
-                                         int callee_param_size,
-                                         int callee_locals_size,
-                                         bool is_top_frame) {
-  assert(extra_args == 0, "NEED TO FIX");
-  // NOTE: return size is in words not bytes
-  // Calculate the amount our frame will be adjust by the callee. For top frame
-  // this is zero.
-
-  // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
-  // calculates the extra locals based on itself. Not what the callee does
-  // to it. So it ignores last_frame_adjust value. Seems suspicious as far
-  // as getting sender_sp correct.
-
-  int unused_monitor_size = 0;
-  int unused_full_frame_words = 0;
-  return frame_size_helper(max_stack, moncount, callee_param_size, callee_locals_size, is_top_frame,
-                           unused_monitor_size, unused_full_frame_words);
-}
-void AbstractInterpreter::layout_activation(Method* method,
-                                            int tempcount, // Number of slots on java expression stack in use
-                                            int popframe_extra_args,
-                                            int moncount,  // Number of active monitors
-                                            int caller_actual_parameters,
-                                            int callee_param_size,
-                                            int callee_locals_size,
-                                            frame* caller,
-                                            frame* interpreter_frame,
-                                            bool is_top_frame,
-                                            bool is_bottom_frame) {
-  assert(popframe_extra_args == 0, "NEED TO FIX");
-  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
-  // does as far as allocating an interpreter frame.
-  // Set up the method, locals, and monitors.
-  // The frame interpreter_frame is guaranteed to be the right size,
-  // as determined by a previous call to the size_activation() method.
-  // It is also guaranteed to be walkable even though it is in a skeletal state
-  // NOTE: tempcount is the current size of the java expression stack. For top most
-  //       frames we will allocate a full sized expression stack and not the curback
-  //       version that non-top frames have.
-
-  int monitor_size = 0;
-  int full_frame_words = 0;
-  int frame_words = frame_size_helper(method->max_stack(), moncount, callee_param_size, callee_locals_size,
-                                      is_top_frame, monitor_size, full_frame_words);
-
-  /*
-    We must now fill in all the pieces of the frame. This means both
-    the interpreterState and the registers.
-  */
-
-  // MUCHO HACK
-
-  intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
-  // 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
-  assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
-  frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
-
-  /* Now fillin the interpreterState object */
-
-  interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() -  sizeof(BytecodeInterpreter));
-
-
-  intptr_t* locals;
-
-  // Calculate the postion of locals[0]. This is painful because of
-  // stack alignment (same as ia64). The problem is that we can
-  // not compute the location of locals from fp(). fp() will account
-  // for the extra locals but it also accounts for aligning the stack
-  // and we can't determine if the locals[0] was misaligned but max_locals
-  // was enough to have the
-  // calculate postion of locals. fp already accounts for extra locals.
-  // +2 for the static long no_params() issue.
-
-  if (caller->is_interpreted_frame()) {
-    // locals must agree with the caller because it will be used to set the
-    // caller's tos when we return.
-    interpreterState prev  = caller->get_interpreterState();
-    // stack() is prepushed.
-    locals = prev->stack() + method->size_of_parameters();
-  } else {
-    // Lay out locals block in the caller adjacent to the register window save area.
-    //
-    // Compiled frames do not allocate a varargs area which is why this if
-    // statement is needed.
-    //
-    intptr_t* fp = interpreter_frame->fp();
-    int local_words = method->max_locals() * Interpreter::stackElementWords;
-
-    if (caller->is_compiled_frame()) {
-      locals = fp + frame::register_save_words + local_words - 1;
-    } else {
-      locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
-    }
-
-  }
-  // END MUCHO HACK
-
-  intptr_t* monitor_base = (intptr_t*) cur_state;
-  intptr_t* stack_base =  monitor_base - monitor_size;
-  /* +1 because stack is always prepushed */
-  intptr_t* stack = stack_base - (tempcount + 1);
-
-
-  BytecodeInterpreter::layout_interpreterState(cur_state,
-                                               caller,
-                                               interpreter_frame,
-                                               method,
-                                               locals,
-                                               stack,
-                                               stack_base,
-                                               monitor_base,
-                                               frame_bottom,
-                                               is_top_frame);
-
-  BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
-}
-
-#endif // CC_INTERP
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_CPPINTERPRETER_SPARC_HPP
-#define CPU_SPARC_VM_CPPINTERPRETER_SPARC_HPP
-
-  // Size of interpreter code.  Increase if too small.  Interpreter will
-  // fail with a guarantee ("not enough space for interpreter generation");
-  // if too small.
-  // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI
-
-  // QQQ this is proably way too large for c++ interpreter
-
-#ifdef _LP64
-  // The sethi() instruction generates lots more instructions when shell
-  // stack limit is unlimited, so that's why this is much bigger.
-  const static int InterpreterCodeSize = 210 * K;
-#else
-  const static int InterpreterCodeSize = 180 * K;
-#endif
-
-#endif // CPU_SPARC_VM_CPPINTERPRETER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -441,12 +441,10 @@
   return fp();
 }
 
-#ifndef CC_INTERP
 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
   assert(is_interpreted_frame(), "interpreted frame expected");
   Unimplemented();
 }
-#endif // CC_INTERP
 
 frame frame::sender_for_entry_frame(RegisterMap *map) const {
   assert(map != NULL, "map must be set");
@@ -600,9 +598,6 @@
 }
 
 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
-#ifdef CC_INTERP
-  // Is there anything to do?
-#else
   assert(is_interpreted_frame(), "Not an interpreted frame");
   // These are reasonable sanity checks
   if (fp() == 0 || (intptr_t(fp()) & (2*wordSize-1)) != 0) {
@@ -654,7 +649,6 @@
   if (locals > thread->stack_base() || locals < (address) fp()) return false;
 
   // We'd have to be pretty unlucky to be mislead at this point
-#endif /* CC_INTERP */
   return true;
 }
 
@@ -712,14 +706,8 @@
     // Prior to notifying the runtime of the method_exit the possible result
     // value is saved to l_scratch and d_scratch.
 
-#ifdef CC_INTERP
-    interpreterState istate = get_interpreterState();
-    intptr_t* l_scratch = (intptr_t*) &istate->_native_lresult;
-    intptr_t* d_scratch = (intptr_t*) &istate->_native_fresult;
-#else /* CC_INTERP */
     intptr_t* l_scratch = fp() + interpreter_frame_l_scratch_fp_offset;
     intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
-#endif /* CC_INTERP */
 
     address l_addr = (address)l_scratch;
 #ifdef _LP64
@@ -731,13 +719,9 @@
     switch (type) {
       case T_OBJECT:
       case T_ARRAY: {
-#ifdef CC_INTERP
-        *oop_result = istate->_oop_temp;
-#else
         oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
         *oop_result = obj;
-#endif // CC_INTERP
         break;
       }
 
@@ -797,7 +781,6 @@
   }
 
   if (is_interpreted_frame()) {
-#ifndef CC_INTERP
     DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
     DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
     DESCRIBE_FP_OFFSET(interpreter_frame_padding);
@@ -808,7 +791,6 @@
     if ((esp >= sp()) && (esp < fp())) {
       values.describe(-1, esp, "*Lesp");
     }
-#endif
   }
 
   if (!is_compiled_frame()) {
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,11 +90,6 @@
 // G5_method is set to method to call, G5_inline_cache_klass may be set,
 // parameters are put in O registers, and also extra parameters
 // must be cleverly copied from the top of stack to the outgoing param area in the frame,
-// ------------------------------ C++ interpreter ----------------------------------------
-// Layout of C++ interpreter frame:
-//
-
-
 
 // All frames:
 
@@ -211,7 +206,6 @@
 
  public:
   // Asm interpreter
-#ifndef CC_INTERP
   enum interpreter_frame_vm_locals {
        // 2 words, also used to save float regs across  calls to C
        interpreter_frame_d_scratch_fp_offset          = -2,
@@ -228,18 +222,6 @@
 
        interpreter_frame_extra_outgoing_argument_words = 2
   };
-#else
-  enum interpreter_frame_vm_locals {
-       // 2 words, also used to save float regs across  calls to C
-       interpreter_state_ptr_offset                   = 0,  // Is in L0 (Lstate) in save area
-       interpreter_frame_mirror_offset                = 1,  // Is in L1 (Lmirror) in save area (for native calls only)
-
-       // interpreter frame set-up needs to save 2 extra words in outgoing param area
-       // for class and jnienv arguments for native stubs (see nativeStubGen_sparc.cpp_
-
-       interpreter_frame_extra_outgoing_argument_words = 2
-  };
-#endif /* CC_INTERP */
 
   enum compiler_frame_fixed_locals {
        compiler_frame_vm_locals_fp_offset          = -2
@@ -248,8 +230,6 @@
  private:
   ConstantPoolCache** interpreter_frame_cpoolcache_addr() const;
 
-#ifndef CC_INTERP
-
   // where Lmonitors is saved:
   inline BasicObjectLock** interpreter_frame_monitors_addr() const;
   inline intptr_t** interpreter_frame_esp_addr() const;
@@ -262,14 +242,6 @@
  private:
   BasicObjectLock* interpreter_frame_monitors() const;
   void interpreter_frame_set_monitors(BasicObjectLock* monitors);
-#else
- public:
-  inline interpreterState get_interpreterState() const {
-    return ((interpreterState)sp_at(interpreter_state_ptr_offset));
-  }
-
-#endif /* CC_INTERP */
-
  public:
 
 #endif // CPU_SPARC_VM_FRAME_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -91,75 +91,6 @@
   return _sp_adjustment_by_callee * VMRegImpl::slots_per_word;
 }
 
-#ifdef CC_INTERP
-inline intptr_t** frame::interpreter_frame_locals_addr() const {
-  interpreterState istate = get_interpreterState();
-  return (intptr_t**) &istate->_locals;
-}
-
-inline intptr_t* frame::interpreter_frame_bcp_addr() const {
-  interpreterState istate = get_interpreterState();
-  return (intptr_t*) &istate->_bcp;
-}
-
-inline intptr_t* frame::interpreter_frame_mdp_addr() const {
-  interpreterState istate = get_interpreterState();
-  return (intptr_t*) &istate->_mdx;
-}
-
-inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
-
-// bottom(base) of the expression stack (highest address)
-inline intptr_t* frame::interpreter_frame_expression_stack() const {
-  return (intptr_t*)interpreter_frame_monitor_end() - 1;
-}
-
-// top of expression stack (lowest address)
-inline intptr_t* frame::interpreter_frame_tos_address() const {
-  interpreterState istate = get_interpreterState();
-  return istate->_stack + 1; // Is this off by one? QQQ
-}
-
-// monitor elements
-
-// in keeping with Intel side: end is lower in memory than begin;
-// and beginning element is oldest element
-// Also begin is one past last monitor.
-
-inline BasicObjectLock* frame::interpreter_frame_monitor_begin()       const  {
-  return get_interpreterState()->monitor_base();
-}
-
-inline BasicObjectLock* frame::interpreter_frame_monitor_end()         const  {
-  return (BasicObjectLock*) get_interpreterState()->stack_base();
-}
-
-
-inline int frame::interpreter_frame_monitor_size() {
-  return round_to(BasicObjectLock::size(), WordsPerLong);
-}
-
-inline Method** frame::interpreter_frame_method_addr() const {
-  interpreterState istate = get_interpreterState();
-  return &istate->_method;
-}
-
-
-// Constant pool cache
-
-// where LcpoolCache is saved:
-inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
-  interpreterState istate = get_interpreterState();
-  return &istate->_constants; // should really use accessor
-  }
-
-inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
-  interpreterState istate = get_interpreterState();
-  return &istate->_constants;
-}
-
-#else // !CC_INTERP
-
 inline intptr_t** frame::interpreter_frame_locals_addr() const {
   return (intptr_t**) sp_addr_at( Llocals->sp_offset_in_saved_window());
 }
@@ -246,7 +177,6 @@
 inline oop* frame::interpreter_frame_temp_oop_addr() const {
   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
 }
-#endif // CC_INTERP
 
 
 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -39,7 +39,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.inline.hpp"
 
-#ifndef CC_INTERP
 #ifndef FAST_DISPATCH
 #define FAST_DISPATCH 1
 #endif
@@ -52,13 +51,6 @@
 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
 
-#else // CC_INTERP
-#ifndef STATE
-#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
-#endif // STATE
-
-#endif // CC_INTERP
-
 void InterpreterMacroAssembler::jump_to_entry(address entry) {
   assert(entry, "Entry must have been generated by now");
   AddressLiteral al(entry);
@@ -82,8 +74,6 @@
   sll(delta, LogBytesPerWord, delta);  // extra space for locals in bytes
 }
 
-#ifndef CC_INTERP
-
 // Dispatch code executed in the prolog of a bytecode which does not do it's
 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
@@ -265,10 +255,6 @@
   mov(arg_2, O1);
   MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
 }
-#endif /* CC_INTERP */
-
-
-#ifndef CC_INTERP
 
 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
   assert_not_delayed();
@@ -1189,8 +1175,6 @@
 #endif /* COMPILER2 */
 
 }
-#endif /* CC_INTERP */
-
 
 // Lock object
 //
@@ -1323,8 +1307,6 @@
   }
 }
 
-#ifndef CC_INTERP
-
 // Get the method data pointer from the Method* and set the
 // specified register to its value.
 
@@ -2366,8 +2348,6 @@
   add( Lesp,      wordSize,                                    Rdest );
 }
 
-#endif /* CC_INTERP */
-
 void InterpreterMacroAssembler::get_method_counters(Register method,
                                                     Register Rcounters,
                                                     Label& skip) {
@@ -2443,7 +2423,6 @@
   // Note that this macro must leave backedge_count + invocation_count in Rtmp!
 }
 
-#ifndef CC_INTERP
 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
                                                              Register method_counters,
                                                              Register branch_bcp,
@@ -2581,7 +2560,6 @@
   br(cond, false, Assembler::pn, *where);
   delayed()->st(scratch1, counter_addr);
 }
-#endif /* CC_INTERP */
 
 // Inline assembly for:
 //
@@ -2597,8 +2575,6 @@
 
 void InterpreterMacroAssembler::notify_method_entry() {
 
-  // C++ interpreter only uses this for native methods.
-
   // Whenever JVMTI puts a thread in interp_only_mode, method
   // entry/exit events are sent for that thread to track stack
   // depth.  If it is possible to enter interp_only_mode we add
@@ -2647,7 +2623,6 @@
 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
                                                    TosState state,
                                                    NotifyMethodExitMode mode) {
-  // C++ interpreter only uses this for native methods.
 
   // Whenever JVMTI puts a thread in interp_only_mode, method
   // entry/exit events are sent for that thread to track stack
@@ -2687,15 +2662,6 @@
 }
 
 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
-#ifdef CC_INTERP
-  // result potentially in O0/O1: save it across calls
-  stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
-#ifdef _LP64
-  stx(O0, STATE(_native_lresult));
-#else
-  std(O0, STATE(_native_lresult));
-#endif
-#else // CC_INTERP
   if (is_native_call) {
     stf(FloatRegisterImpl::D, F0, d_tmp);
 #ifdef _LP64
@@ -2706,18 +2672,9 @@
   } else {
     push(state);
   }
-#endif // CC_INTERP
 }
 
 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
-#ifdef CC_INTERP
-  ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
-#ifdef _LP64
-  ldx(STATE(_native_lresult), O0);
-#else
-  ldd(STATE(_native_lresult), O0);
-#endif
-#else // CC_INTERP
   if (is_native_call) {
     ldf(FloatRegisterImpl::D, d_tmp, F0);
 #ifdef _LP64
@@ -2728,5 +2685,4 @@
   } else {
     pop(state);
   }
-#endif // CC_INTERP
 }
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -54,7 +54,6 @@
 
 class InterpreterMacroAssembler: public MacroAssembler {
  protected:
-#ifndef CC_INTERP
   // Interpreter specific version of call_VM_base
     virtual void call_VM_leaf_base(
     Register java_thread,
@@ -76,7 +75,6 @@
 
   // base routine for all dispatches
   void dispatch_base(TosState state, address* table);
-#endif /* CC_INTERP */
 
  public:
   InterpreterMacroAssembler(CodeBuffer* c)
@@ -84,12 +82,10 @@
 
   void jump_to_entry(address entry);
 
-#ifndef CC_INTERP
   virtual void load_earlyret_value(TosState state);
 
   static const Address l_tmp ;
   static const Address d_tmp ;
-#endif /* CC_INTERP */
 
   // helper routine for frame allocation/deallocation
   // compute the delta by which the caller's SP has to
@@ -97,8 +93,6 @@
   // locals
   void compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta);
 
-#ifndef CC_INTERP
-
   // dispatch routines
   void dispatch_prolog(TosState state, int step = 0);
   void dispatch_epilog(TosState state, int step = 0);
@@ -118,7 +112,6 @@
 
  protected:
   void dispatch_Lbyte_code(TosState state, address* table, int bcp_incr = 0, bool verify = true);
-#endif /* CC_INTERP */
 
  public:
   // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
@@ -130,7 +123,6 @@
                      Register arg_2,
                      bool check_exception = true);
 
-#ifndef CC_INTERP
   void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
 
   // Generate a subtype check: branch to ok_is_subtype if sub_klass is
@@ -265,19 +257,15 @@
   Address top_most_monitor();
   void compute_stack_base( Register Rdest );
 
-#endif /* CC_INTERP */
   void get_method_counters(Register method, Register Rcounters, Label& skip);
   void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
   void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
-#ifndef CC_INTERP
   void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register branch_bcp, Register Rtmp );
 
-#endif /* CC_INTERP */
   // Object locking
   void lock_object  (Register lock_reg, Register obj_reg);
   void unlock_object(Register lock_reg);
 
-#ifndef CC_INTERP
   // Interpreter profiling operations
   void set_method_data_pointer();
   void set_method_data_pointer_for_bcp();
@@ -341,7 +329,6 @@
   void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
   void verify_FPU(int stack_depth, TosState state = ftos); // only if +VerifyFPU  && (state == ftos || state == dtos)
 
-#endif /* CC_INTERP */
   // support for JVMTI/Dtrace
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
   void notify_method_entry();
--- a/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
-#define CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
-
- friend class AbstractInterpreterGenerator;
-
- private:
-
-  address generate_normal_entry(bool synchronized);
-  address generate_native_entry(bool synchronized);
-  address generate_abstract_entry(void);
-  // there are no math intrinsics on sparc
-  address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
-  address generate_accessor_entry(void) { return NULL; }
-  address generate_empty_entry(void) { return NULL; }
-  address generate_Reference_get_entry(void);
-  void save_native_result(void);
-  void restore_native_result(void);
-
-  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
-  void generate_counter_overflow(Label& Lcontinue);
-
-  address generate_CRC32_update_entry();
-  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
-
-  // Not supported
-  address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
-#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -26,9 +26,9 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -53,7 +53,7 @@
 
 // Generation of Interpreter
 //
-// The InterpreterGenerator generates the interpreter into Interpreter::_code.
+// The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
 
 
 #define __ _masm->
@@ -194,7 +194,7 @@
 }
 #endif
 
-void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
+void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
 
   // Generate code to initiate compilation on the counter overflow.
 
@@ -219,7 +219,7 @@
 // Abstract method entry
 // Attempt to execute abstract method. Throw exception
 //
-address InterpreterGenerator::generate_abstract_entry(void) {
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
   address entry = __ pc();
   // abstract method entry
   // throw exception
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_INTERPRETER_SPARC_HPP
-#define CPU_SPARC_VM_INTERPRETER_SPARC_HPP
-
- public:
-
-  static int expr_offset_in_bytes(int i) { return stackElementSize * i + wordSize; }
-
-  // Stack index relative to tos (which points at value)
-  static int expr_index_at(int i)        { return stackElementWords * i; }
-
-  // Already negated by c++ interpreter
-  static int local_index_at(int i) {
-    assert(i <= 0, "local direction already negated");
-    return stackElementWords * i;
-  }
-
-#endif // CPU_SPARC_VM_INTERPRETER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -401,9 +401,6 @@
 void MacroAssembler::verify_thread() {
   if (VerifyThread) {
     // NOTE: this chops off the heads of the 64-bit O registers.
-#ifdef CC_INTERP
-    save_frame(0);
-#else
     // make sure G2_thread contains the right value
     save_frame_and_mov(0, Lmethod, Lmethod);   // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
     mov(G1, L1);                // avoid clobbering G1
@@ -411,7 +408,6 @@
     mov(G3, L3);                // avoid clobbering G3
     mov(G4, L4);                // avoid clobbering G4
     mov(G5_method, L5);         // avoid clobbering G5_method
-#endif /* CC_INTERP */
 #if defined(COMPILER2) && !defined(_LP64)
     // Save & restore possible 64-bit Long arguments in G-regs
     srlx(G1,32,L0);
@@ -530,11 +526,7 @@
 
 #ifdef ASSERT
   // check that it WAS previously set
-#ifdef CC_INTERP
-    save_frame(0);
-#else
     save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod to helper frame for -Xprof
-#endif /* CC_INTERP */
     ld_ptr(sp_addr, L0);
     tst(L0);
     breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
@@ -754,11 +746,7 @@
 
 # ifdef ASSERT
     // Check that we are not overwriting any other oop.
-#ifdef CC_INTERP
-    save_frame(0);
-#else
     save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod for -Xprof
-#endif /* CC_INTERP */
     ld_ptr(vm_result_addr, L0);
     tst(L0);
     restore();
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -136,25 +136,6 @@
 
 // Interpreter frames
 
-#ifdef CC_INTERP
-REGISTER_DECLARATION(Register, Lstate           , L0); // interpreter state object pointer
-REGISTER_DECLARATION(Register, L1_scratch       , L1); // scratch
-REGISTER_DECLARATION(Register, Lmirror          , L1); // mirror (for native methods only)
-REGISTER_DECLARATION(Register, L2_scratch       , L2);
-REGISTER_DECLARATION(Register, L3_scratch       , L3);
-REGISTER_DECLARATION(Register, L4_scratch       , L4);
-REGISTER_DECLARATION(Register, Lscratch         , L5); // C1 uses
-REGISTER_DECLARATION(Register, Lscratch2        , L6); // C1 uses
-REGISTER_DECLARATION(Register, L7_scratch       , L7); // constant pool cache
-REGISTER_DECLARATION(Register, O5_savedSP       , O5);
-REGISTER_DECLARATION(Register, I5_savedSP       , I5); // Saved SP before bumping for locals.  This is simply
-                                                       // a copy SP, so in 64-bit it's a biased value.  The bias
-                                                       // is added and removed as needed in the frame code.
-// Interface to signature handler
-REGISTER_DECLARATION(Register, Llocals          , L7); // pointer to locals for signature handler
-REGISTER_DECLARATION(Register, Lmethod          , L6); // Method* when calling signature handler
-
-#else
 REGISTER_DECLARATION(Register, Lesp             , L0); // expression stack pointer
 REGISTER_DECLARATION(Register, Lbcp             , L1); // pointer to next bytecode
 REGISTER_DECLARATION(Register, Lmethod          , L2);
@@ -178,7 +159,6 @@
 REGISTER_DECLARATION(Register, IdispatchTables  , I4); // Base address of the bytecode dispatch tables
 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
 REGISTER_DECLARATION(Register, ImethodDataPtr   , I2); // Pointer to the current method data
-#endif /* CC_INTERP */
 
 // NOTE: Lscratch2 and LcpoolCache point to the same registers in
 //       the interpreter code. If Lscratch2 needs to be used for some
@@ -233,19 +213,6 @@
 #define Gframe_size         AS_REGISTER(Register, Gframe_size)
 #define Gtemp               AS_REGISTER(Register, Gtemp)
 
-#ifdef CC_INTERP
-#define Lstate              AS_REGISTER(Register, Lstate)
-#define Lesp                AS_REGISTER(Register, Lesp)
-#define L1_scratch          AS_REGISTER(Register, L1_scratch)
-#define Lmirror             AS_REGISTER(Register, Lmirror)
-#define L2_scratch          AS_REGISTER(Register, L2_scratch)
-#define L3_scratch          AS_REGISTER(Register, L3_scratch)
-#define L4_scratch          AS_REGISTER(Register, L4_scratch)
-#define Lscratch            AS_REGISTER(Register, Lscratch)
-#define Lscratch2           AS_REGISTER(Register, Lscratch2)
-#define L7_scratch          AS_REGISTER(Register, L7_scratch)
-#define Ostate              AS_REGISTER(Register, Ostate)
-#else
 #define Lesp                AS_REGISTER(Register, Lesp)
 #define Lbcp                AS_REGISTER(Register, Lbcp)
 #define Lmethod             AS_REGISTER(Register, Lmethod)
@@ -255,7 +222,6 @@
 #define Lscratch            AS_REGISTER(Register, Lscratch)
 #define Lscratch2           AS_REGISTER(Register, Lscratch2)
 #define LcpoolCache         AS_REGISTER(Register, LcpoolCache)
-#endif /* ! CC_INTERP */
 
 #define Lentry_args         AS_REGISTER(Register, Lentry_args)
 #define I5_savedSP          AS_REGISTER(Register, I5_savedSP)
@@ -610,13 +576,7 @@
   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
   // may customize this version by overriding it for its purposes (e.g., to save/restore
   // additional registers when doing a VM call).
-#ifdef CC_INTERP
-  #define VIRTUAL
-#else
-  #define VIRTUAL virtual
-#endif
-
-  VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
+  virtual void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
 
   //
   // It is imperative that all calls into the VM are handled via the call_VM macros.
@@ -1483,7 +1443,6 @@
   void fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp);
   void fold_8bit_crc32(Register crc, Register table, Register tmp);
 
-#undef VIRTUAL
 };
 
 /**
--- a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -152,18 +152,6 @@
 REGISTER_DEFINITION(Register, G3_method_handle);
 REGISTER_DEFINITION(Register, L7_mh_SP_save);
 
-#ifdef CC_INTERP
-REGISTER_DEFINITION(Register, Lstate);
-REGISTER_DEFINITION(Register, L1_scratch);
-REGISTER_DEFINITION(Register, Lmirror);
-REGISTER_DEFINITION(Register, L2_scratch);
-REGISTER_DEFINITION(Register, L3_scratch);
-REGISTER_DEFINITION(Register, L4_scratch);
-REGISTER_DEFINITION(Register, Lscratch);
-REGISTER_DEFINITION(Register, Lscratch2);
-REGISTER_DEFINITION(Register, L7_scratch);
-REGISTER_DEFINITION(Register, I5_savedSP);
-#else // CC_INTERP
 REGISTER_DEFINITION(Register, Lesp);
 REGISTER_DEFINITION(Register, Lbcp);
 REGISTER_DEFINITION(Register, Lmonitors);
@@ -177,7 +165,6 @@
 REGISTER_DEFINITION(Register, IdispatchAddress);
 REGISTER_DEFINITION(Register, ImethodDataPtr);
 REGISTER_DEFINITION(Register, IdispatchTables);
-#endif // CC_INTERP
 REGISTER_DEFINITION(Register, Lmethod);
 REGISTER_DEFINITION(Register, Llocals);
 REGISTER_DEFINITION(Register, Oexception);
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -26,9 +26,9 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -47,7 +47,6 @@
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
-#ifndef CC_INTERP
 #ifndef FAST_DISPATCH
 #define FAST_DISPATCH 1
 #endif
@@ -56,7 +55,7 @@
 
 // Generation of Interpreter
 //
-// The InterpreterGenerator generates the interpreter into Interpreter::_code.
+// The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
 
 
 #define __ _masm->
@@ -65,7 +64,7 @@
 //----------------------------------------------------------------------------------------------------
 
 
-void InterpreterGenerator::save_native_result(void) {
+void TemplateInterpreterGenerator::save_native_result(void) {
   // result potentially in O0/O1: save it across calls
   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
 
@@ -81,7 +80,7 @@
 #endif
 }
 
-void InterpreterGenerator::restore_native_result(void) {
+void TemplateInterpreterGenerator::restore_native_result(void) {
   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
 
@@ -293,7 +292,7 @@
 // Lmethod: method
 // ??: invocation counter
 //
-void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
+void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
   // Note: In tiered we increment either counters in MethodCounters* or in
   // MDO depending if we're profiling or not.
   const Register G3_method_counters = G3_scratch;
@@ -724,7 +723,7 @@
 }
 
 // Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
+address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
   // Code: _aload_0, _getfield, _areturn
   // parameter size = 1
@@ -807,7 +806,7 @@
  * Method entry for static native methods:
  *   int java.util.zip.CRC32.update(int crc, int b)
  */
-address InterpreterGenerator::generate_CRC32_update_entry() {
+address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
@@ -851,7 +850,7 @@
  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
  */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
@@ -903,13 +902,22 @@
   return NULL;
 }
 
+// Not supported
+address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  return NULL;
+}
+
+// Not supported
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+  return NULL;
+}
 //
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the native method
 // than the typical interpreter frame setup.
 //
 
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
+address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
   address entry = __ pc();
 
   // the following temporary registers are used during frame creation
@@ -1336,7 +1344,7 @@
 
 
 // Generic method entry to (asm) interpreter
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
   address entry = __ pc();
 
   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
@@ -1743,14 +1751,6 @@
 
 // --------------------------------------------------------------------------------
 
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-// --------------------------------------------------------------------------------
-
 // Non-product code
 #ifndef PRODUCT
 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
@@ -1829,4 +1829,3 @@
   __ breakpoint_trap(Assembler::equal, Assembler::icc);
 }
 #endif // not PRODUCT
-#endif // !CC_INTERP
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_TEMPLATEINTERPRETERGENERATOR_SPARC_HPP
-#define CPU_SPARC_VM_TEMPLATEINTERPRETERGENERATOR_SPARC_HPP
-
-  protected:
-
-  void generate_fixed_frame(bool native_call); // template interpreter only
-  void generate_stack_overflow_check(Register Rframe_size, Register Rscratch,
-                                     Register Rscratch2);
-
-#endif // CPU_SPARC_VM_TEMPLATEINTERPRETERGENERATOR_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "oops/constMethod.hpp"
 #include "oops/method.hpp"
 #include "runtime/arguments.hpp"
@@ -32,6 +31,18 @@
 #include "runtime/synchronizer.hpp"
 #include "utilities/macros.hpp"
 
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+#ifdef _LP64
+  // The sethi() instruction generates lots more instructions when shell
+  // stack limit is unlimited, so that's why this is much bigger.
+int TemplateInterpreter::InterpreterCodeSize = 260 * K;
+#else
+int TemplateInterpreter::InterpreterCodeSize = 230 * K;
+#endif
 
 int AbstractInterpreter::BasicType_as_index(BasicType type) {
   int i = 0;
@@ -107,7 +118,7 @@
                                          int callee_locals,
                                          bool is_top_frame) {
   // Note: This calculation must exactly parallel the frame setup
-  // in InterpreterGenerator::generate_fixed_frame.
+  // in TemplateInterpreterGenerator::generate_fixed_frame.
 
   int monitor_size           = monitors * frame::interpreter_frame_monitor_size();
 
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_SPARC_VM_TEMPLATEINTERPRETER_SPARC_HPP
-#define CPU_SPARC_VM_TEMPLATEINTERPRETER_SPARC_HPP
-
-
-  protected:
-
-  // Size of interpreter code.  Increase if too small.  Interpreter will
-  // fail with a guarantee ("not enough space for interpreter generation");
-  // if too small.
-  // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI
-
-#ifdef _LP64
-  // The sethi() instruction generates lots more instructions when shell
-  // stack limit is unlimited, so that's why this is much bigger.
-  const static int InterpreterCodeSize = 260 * K;
-#else
-  const static int InterpreterCodeSize = 230 * K;
-#endif
-
-#endif // CPU_SPARC_VM_TEMPLATEINTERPRETER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -37,7 +37,6 @@
 #include "runtime/synchronizer.hpp"
 #include "utilities/macros.hpp"
 
-#ifndef CC_INTERP
 #define __ _masm->
 
 // Misc helpers
@@ -3777,4 +3776,3 @@
      call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
   __ add(  Lesp,     Lscratch,        Lesp); // pop all dimensions off the stack
 }
-#endif /* !CC_INTERP */
--- a/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "interpreter/bytecodeInterpreter.hpp"
-#include "interpreter/bytecodeInterpreter.inline.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#ifdef TARGET_ARCH_x86
-# include "interp_masm_x86.hpp"
-#endif
-
-#ifdef CC_INTERP
-
-#endif // CC_INTERP (all)
--- a/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
-#define CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
-
-// Platform specific for C++ based Interpreter
-
-private:
-
-    interpreterState _self_link;          /*  Previous interpreter state  */ /* sometimes points to self??? */
-    address   _result_handler;            /* temp for saving native result handler */
-    intptr_t* _sender_sp;                 /* sender's sp before stack (locals) extension */
-
-    address   _extra_junk1;               /* temp to save on recompiles */
-    address   _extra_junk2;               /* temp to save on recompiles */
-    address   _extra_junk3;               /* temp to save on recompiles */
-    // address dummy_for_native2;         /* a native frame result handler would be here... */
-    // address dummy_for_native1;         /* native result type stored here in a interpreter native frame */
-    address   _extra_junk4;               /* temp to save on recompiles */
-    address   _extra_junk5;               /* temp to save on recompiles */
-    address   _extra_junk6;               /* temp to save on recompiles */
-public:
-                                                         // we have an interpreter frame...
-inline intptr_t* sender_sp() {
-  return _sender_sp;
-}
-
-// The interpreter always has the frame anchor fully setup so we don't
-// have to do anything going to vm from the interpreter. On return
-// we do have to clear the flags in case they we're modified to
-// maintain the stack walking invariants.
-//
-#define SET_LAST_JAVA_FRAME()
-
-#define RESET_LAST_JAVA_FRAME()
-
-/*
- * Macros for accessing the stack.
- */
-#undef STACK_INT
-#undef STACK_FLOAT
-#undef STACK_ADDR
-#undef STACK_OBJECT
-#undef STACK_DOUBLE
-#undef STACK_LONG
-
-// JavaStack Implementation
-
-#define GET_STACK_SLOT(offset)    (*((intptr_t*) &topOfStack[-(offset)]))
-#define STACK_SLOT(offset)    ((address) &topOfStack[-(offset)])
-#define STACK_ADDR(offset)    (*((address *) &topOfStack[-(offset)]))
-#define STACK_INT(offset)     (*((jint*) &topOfStack[-(offset)]))
-#define STACK_FLOAT(offset)   (*((jfloat *) &topOfStack[-(offset)]))
-#define STACK_OBJECT(offset)  (*((oop *) &topOfStack [-(offset)]))
-#define STACK_DOUBLE(offset)  (((VMJavaVal64*) &topOfStack[-(offset)])->d)
-#define STACK_LONG(offset)    (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
-
-#define SET_STACK_SLOT(value, offset)   (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
-#define SET_STACK_ADDR(value, offset)   (*((address *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_INT(value, offset)    (*((jint *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_FLOAT(value, offset)  (*((jfloat *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
-#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
-#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d =  \
-                                                 ((VMJavaVal64*)(addr))->d)
-#define SET_STACK_LONG(value, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
-#define SET_STACK_LONG_FROM_ADDR(addr, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l =  \
-                                                 ((VMJavaVal64*)(addr))->l)
-// JavaLocals implementation
-
-#define LOCALS_SLOT(offset)    ((intptr_t*)&locals[-(offset)])
-#define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
-#define LOCALS_INT(offset)     ((jint)(locals[-(offset)]))
-#define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
-#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
-#define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
-#define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
-#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
-#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
-
-#define SET_LOCALS_SLOT(value, offset)    (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
-#define SET_LOCALS_ADDR(value, offset)    (*((address *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_INT(value, offset)     (*((jint *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_FLOAT(value, offset)   (*((jfloat *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_OBJECT(value, offset)  (*((oop *)&locals[-(offset)]) = (value))
-#define SET_LOCALS_DOUBLE(value, offset)  (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
-#define SET_LOCALS_LONG(value, offset)    (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
-#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
-                                                  ((VMJavaVal64*)(addr))->d)
-#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
-                                                ((VMJavaVal64*)(addr))->l)
-
-#endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,285 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
-#define CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
-
-// Inline interpreter functions for IA32
-
-inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
-inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
-inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
-inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
-inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return fmod(op1, op2); }
-
-inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
-
-inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
-  return ( op1 < op2 ? -1 :
-               op1 > op2 ? 1 :
-                   op1 == op2 ? 0 :
-                       (direction == -1 || direction == 1) ? direction : 0);
-
-}
-
-inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
-  // x86 can do unaligned copies but not 64bits at a time
-  to[0] = from[0]; to[1] = from[1];
-}
-
-// The long operations depend on compiler support for "long long" on x86
-
-inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
-  return op1 + op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
-  return op1 & op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
-  // QQQ what about check and throw...
-  return op1 / op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
-  return op1 * op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
-  return op1 | op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
-  return op1 - op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
-  return op1 ^ op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
-  return op1 % op2;
-}
-
-inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
-  // CVM did this 0x3f mask, is the really needed??? QQQ
-  return ((unsigned long long) op1) >> (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
-  return op1 >> (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
-  return op1 << (op2 & 0x3F);
-}
-
-inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
-  return -op;
-}
-
-inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
-  return ~op;
-}
-
-inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
-  return (op <= 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
-  return (op >= 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
-  return (op == 0);
-}
-
-inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
-  return (op1 == op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
-  return (op1 != op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
-  return (op1 >= op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
-  return (op1 <= op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
-  return (op1 < op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
-  return (op1 > op2);
-}
-
-inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
-  return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
-}
-
-// Long conversions
-
-inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
-  return (jdouble) val;
-}
-
-inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
-  return (jfloat) val;
-}
-
-inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
-  return (jint) val;
-}
-
-// Double Arithmetic
-
-inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
-  return op1 + op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
-  // Divide by zero... QQQ
-  return op1 / op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
-  return op1 * op2;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
-  return -op;
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
-  return fmod(op1, op2);
-}
-
-inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
-  return op1 - op2;
-}
-
-inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
-  return ( op1 < op2 ? -1 :
-               op1 > op2 ? 1 :
-                   op1 == op2 ? 0 :
-                       (direction == -1 || direction == 1) ? direction : 0);
-}
-
-// Double Conversions
-
-inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
-  return (jfloat) val;
-}
-
-// Float Conversions
-
-inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
-  return (jdouble) op;
-}
-
-// Integer Arithmetic
-
-inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
-  return op1 + op2;
-}
-
-inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
-  return op1 & op2;
-}
-
-inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
-  /* it's possible we could catch this special case implicitly */
-  if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
-  else return op1 / op2;
-}
-
-inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
-  return op1 * op2;
-}
-
-inline jint BytecodeInterpreter::VMintNeg(jint op) {
-  return -op;
-}
-
-inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
-  return op1 | op2;
-}
-
-inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
-  /* it's possible we could catch this special case implicitly */
-  if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
-  else return op1 % op2;
-}
-
-inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
-  return op1 << op2;
-}
-
-inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
-  return op1 >> (op2 & 0x1f);
-}
-
-inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
-  return op1 - op2;
-}
-
-inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
-  return ((juint) op1) >> (op2 & 0x1f);
-}
-
-inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
-  return op1 ^ op2;
-}
-
-inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
-  return (jdouble) val;
-}
-
-inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
-  return (jfloat) val;
-}
-
-inline jlong BytecodeInterpreter::VMint2Long(jint val) {
-  return (jlong) val;
-}
-
-inline jchar BytecodeInterpreter::VMint2Char(jint val) {
-  return (jchar) val;
-}
-
-inline jshort BytecodeInterpreter::VMint2Short(jint val) {
-  return (jshort) val;
-}
-
-inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
-  return (jbyte) val;
-}
-
-#endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -38,11 +38,7 @@
 define_pd_global(bool, PreferInterpreterNativeStubs, false);
 define_pd_global(bool, ProfileTraps,                 true);
 define_pd_global(bool, UseOnStackReplacement,        true);
-#ifdef CC_INTERP
-define_pd_global(bool, ProfileInterpreter,           false);
-#else
 define_pd_global(bool, ProfileInterpreter,           true);
-#endif // CC_INTERP
 define_pd_global(bool, TieredCompilation,            trueInTiered);
 define_pd_global(intx, CompileThreshold,             10000);
 
--- a/hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
-#define CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
-
- protected:
-
-  void generate_more_monitors();
-  void generate_deopt_handling();
-  void lock_method(void);
-  address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
-  void generate_compute_interpreter_state(const Register state,
-                                          const Register prev_state,
-                                          const Register sender_sp,
-                                          bool native); // C++ interpreter only
-
-#endif // CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2314 +0,0 @@
-/*
- * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/cppInterpreter.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-#ifdef SHARK
-#include "shark/shark_globals.hpp"
-#endif
-
-#ifdef CC_INTERP
-
-// Routine exists to make tracebacks look decent in debugger
-// while we are recursed in the frame manager/c++ interpreter.
-// We could use an address in the frame manager but having
-// frames look natural in the debugger is a plus.
-extern "C" void RecursiveInterpreterActivation(interpreterState istate )
-{
-  //
-  ShouldNotReachHere();
-}
-
-
-#define __ _masm->
-#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
-
-// default registers for state and sender_sp
-// state and sender_sp are the same on 32bit because we have no choice.
-// state could be rsi on 64bit but it is an arg reg and not callee save
-// so r13 is better choice.
-
-const Register state = NOT_LP64(rsi) LP64_ONLY(r13);
-const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13);
-
-// NEEDED for JVMTI?
-// address AbstractInterpreter::_remove_activation_preserving_args_entry;
-
-static address unctrap_frame_manager_entry  = NULL;
-
-static address deopt_frame_manager_return_atos  = NULL;
-static address deopt_frame_manager_return_btos  = NULL;
-static address deopt_frame_manager_return_itos  = NULL;
-static address deopt_frame_manager_return_ltos  = NULL;
-static address deopt_frame_manager_return_ftos  = NULL;
-static address deopt_frame_manager_return_dtos  = NULL;
-static address deopt_frame_manager_return_vtos  = NULL;
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_VOID   : i = 5; break;
-    case T_FLOAT  : i = 8; break;
-    case T_LONG   : i = 9; break;
-    case T_DOUBLE : i = 6; break;
-    case T_OBJECT : // fall through
-    case T_ARRAY  : i = 7; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
-// Is this pc anywhere within code owned by the interpreter?
-// This only works for pc that might possibly be exposed to frame
-// walkers. It clearly misses all of the actual c++ interpreter
-// implementation
-bool CppInterpreter::contains(address pc)            {
-    return (_code->contains(pc) ||
-            pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
-}
-
-
-address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  address entry = __ pc();
-  switch (type) {
-    case T_BOOLEAN: __ c2bool(rax);            break;
-    case T_CHAR   : __ andl(rax, 0xFFFF);      break;
-    case T_BYTE   : __ sign_extend_byte (rax); break;
-    case T_SHORT  : __ sign_extend_short(rax); break;
-    case T_VOID   : // fall thru
-    case T_LONG   : // fall thru
-    case T_INT    : /* nothing to do */        break;
-
-    case T_DOUBLE :
-    case T_FLOAT  :
-      {
-        const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
-        __ pop(t);                            // remove return address first
-        // Must return a result for interpreter or compiler. In SSE
-        // mode, results are returned in xmm0 and the FPU stack must
-        // be empty.
-        if (type == T_FLOAT && UseSSE >= 1) {
-#ifndef _LP64
-          // Load ST0
-          __ fld_d(Address(rsp, 0));
-          // Store as float and empty fpu stack
-          __ fstp_s(Address(rsp, 0));
-#endif // !_LP64
-          // and reload
-          __ movflt(xmm0, Address(rsp, 0));
-        } else if (type == T_DOUBLE && UseSSE >= 2 ) {
-          __ movdbl(xmm0, Address(rsp, 0));
-        } else {
-          // restore ST0
-          __ fld_d(Address(rsp, 0));
-        }
-        // and pop the temp
-        __ addptr(rsp, 2 * wordSize);
-        __ push(t);                            // restore return address
-      }
-      break;
-    case T_OBJECT :
-      // retrieve result from frame
-      __ movptr(rax, STATE(_oop_temp));
-      // and verify it
-      __ verify_oop(rax);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret(0);                                   // return from result handler
-  return entry;
-}
-
-// tosca based result to c++ interpreter stack based result.
-// Result goes to top of native stack.
-
-#undef EXTEND  // SHOULD NOT BE NEEDED
-address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
-  // A result is in the tosca (abi result) from either a native method call or compiled
-  // code. Place this result on the java expression stack so C++ interpreter can use it.
-  address entry = __ pc();
-
-  const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
-  __ pop(t);                            // remove return address first
-  switch (type) {
-    case T_VOID:
-       break;
-    case T_BOOLEAN:
-#ifdef EXTEND
-      __ c2bool(rax);
-#endif
-      __ push(rax);
-      break;
-    case T_CHAR   :
-#ifdef EXTEND
-      __ andl(rax, 0xFFFF);
-#endif
-      __ push(rax);
-      break;
-    case T_BYTE   :
-#ifdef EXTEND
-      __ sign_extend_byte (rax);
-#endif
-      __ push(rax);
-      break;
-    case T_SHORT  :
-#ifdef EXTEND
-      __ sign_extend_short(rax);
-#endif
-      __ push(rax);
-      break;
-    case T_LONG    :
-      __ push(rdx);                             // pushes useless junk on 64bit
-      __ push(rax);
-      break;
-    case T_INT    :
-      __ push(rax);
-      break;
-    case T_FLOAT  :
-      // Result is in ST(0)/xmm0
-      __ subptr(rsp, wordSize);
-      if ( UseSSE < 1) {
-        __ fstp_s(Address(rsp, 0));
-      } else {
-        __ movflt(Address(rsp, 0), xmm0);
-      }
-      break;
-    case T_DOUBLE  :
-      __ subptr(rsp, 2*wordSize);
-      if ( UseSSE < 2 ) {
-        __ fstp_d(Address(rsp, 0));
-      } else {
-        __ movdbl(Address(rsp, 0), xmm0);
-      }
-      break;
-    case T_OBJECT :
-      __ verify_oop(rax);                      // verify it
-      __ push(rax);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ jmp(t);                                   // return from result handler
-  return entry;
-}
-
-address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
-  // A result is in the java expression stack of the interpreted method that has just
-  // returned. Place this result on the java expression stack of the caller.
-  //
-  // The current interpreter activation in rsi/r13 is for the method just returning its
-  // result. So we know that the result of this method is on the top of the current
-  // execution stack (which is pre-pushed) and will be return to the top of the caller
-  // stack. The top of the callers stack is the bottom of the locals of the current
-  // activation.
-  // Because of the way activation are managed by the frame manager the value of rsp is
-  // below both the stack top of the current activation and naturally the stack top
-  // of the calling activation. This enable this routine to leave the return address
-  // to the frame manager on the stack and do a vanilla return.
-  //
-  // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
-  // On Return: rsi/r13 - unchanged
-  //            rax - new stack top for caller activation (i.e. activation in _prev_link)
-  //
-  // Can destroy rdx, rcx.
-  //
-
-  address entry = __ pc();
-  const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
-  switch (type) {
-    case T_VOID:
-      __ movptr(rax, STATE(_locals));                                   // pop parameters get new stack value
-      __ addptr(rax, wordSize);                                         // account for prepush before we return
-      break;
-    case T_FLOAT  :
-    case T_BOOLEAN:
-    case T_CHAR   :
-    case T_BYTE   :
-    case T_SHORT  :
-    case T_INT    :
-      // 1 word result
-      __ movptr(rdx, STATE(_stack));
-      __ movptr(rax, STATE(_locals));                                   // address for result
-      __ movl(rdx, Address(rdx, wordSize));                             // get result
-      __ movptr(Address(rax, 0), rdx);                                  // and store it
-      break;
-    case T_LONG    :
-    case T_DOUBLE  :
-      // return top two words on current expression stack to caller's expression stack
-      // The caller's expression stack is adjacent to the current frame manager's intepretState
-      // except we allocated one extra word for this intepretState so we won't overwrite it
-      // when we return a two word result.
-
-      __ movptr(rax, STATE(_locals));                                   // address for result
-      __ movptr(rcx, STATE(_stack));
-      __ subptr(rax, wordSize);                                         // need addition word besides locals[0]
-      __ movptr(rdx, Address(rcx, 2*wordSize));                         // get result word (junk in 64bit)
-      __ movptr(Address(rax, wordSize), rdx);                           // and store it
-      __ movptr(rdx, Address(rcx, wordSize));                           // get result word
-      __ movptr(Address(rax, 0), rdx);                                  // and store it
-      break;
-    case T_OBJECT :
-      __ movptr(rdx, STATE(_stack));
-      __ movptr(rax, STATE(_locals));                                   // address for result
-      __ movptr(rdx, Address(rdx, wordSize));                           // get result
-      __ verify_oop(rdx);                                               // verify it
-      __ movptr(Address(rax, 0), rdx);                                  // and store it
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret(0);
-  return entry;
-}
-
-address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
-  // A result is in the java expression stack of the interpreted method that has just
-  // returned. Place this result in the native abi that the caller expects.
-  //
-  // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
-  // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
-  // and so rather than return result onto caller's java expression stack we return the
-  // result in the expected location based on the native abi.
-  // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
-  // On Return: rsi/r13 - unchanged
-  // Other registers changed [rax/rdx/ST(0) as needed for the result returned]
-
-  address entry = __ pc();
-  switch (type) {
-    case T_VOID:
-       break;
-    case T_BOOLEAN:
-    case T_CHAR   :
-    case T_BYTE   :
-    case T_SHORT  :
-    case T_INT    :
-      __ movptr(rdx, STATE(_stack));                                    // get top of stack
-      __ movl(rax, Address(rdx, wordSize));                             // get result word 1
-      break;
-    case T_LONG    :
-      __ movptr(rdx, STATE(_stack));                                    // get top of stack
-      __ movptr(rax, Address(rdx, wordSize));                           // get result low word
-      NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));)                 // get result high word
-      break;
-    case T_FLOAT  :
-      __ movptr(rdx, STATE(_stack));                                    // get top of stack
-      if ( UseSSE >= 1) {
-        __ movflt(xmm0, Address(rdx, wordSize));
-      } else {
-        __ fld_s(Address(rdx, wordSize));                               // pushd float result
-      }
-      break;
-    case T_DOUBLE  :
-      __ movptr(rdx, STATE(_stack));                                    // get top of stack
-      if ( UseSSE > 1) {
-        __ movdbl(xmm0, Address(rdx, wordSize));
-      } else {
-        __ fld_d(Address(rdx, wordSize));                               // push double result
-      }
-      break;
-    case T_OBJECT :
-      __ movptr(rdx, STATE(_stack));                                    // get top of stack
-      __ movptr(rax, Address(rdx, wordSize));                           // get result word 1
-      __ verify_oop(rax);                                               // verify it
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret(0);
-  return entry;
-}
-
-address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
-  // make it look good in the debugger
-  return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
-}
-
-address CppInterpreter::deopt_entry(TosState state, int length) {
-  address ret = NULL;
-  if (length != 0) {
-    switch (state) {
-      case atos: ret = deopt_frame_manager_return_atos; break;
-      case btos: ret = deopt_frame_manager_return_btos; break;
-      case ctos:
-      case stos:
-      case itos: ret = deopt_frame_manager_return_itos; break;
-      case ltos: ret = deopt_frame_manager_return_ltos; break;
-      case ftos: ret = deopt_frame_manager_return_ftos; break;
-      case dtos: ret = deopt_frame_manager_return_dtos; break;
-      case vtos: ret = deopt_frame_manager_return_vtos; break;
-    }
-  } else {
-    ret = unctrap_frame_manager_entry;  // re-execute the bytecode ( e.g. uncommon trap)
-  }
-  assert(ret != NULL, "Not initialized");
-  return ret;
-}
-
-// C++ Interpreter
-void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
-                                                                 const Register locals,
-                                                                 const Register sender_sp,
-                                                                 bool native) {
-
-  // On entry the "locals" argument points to locals[0] (or where it would be in case no locals in
-  // a static method). "state" contains any previous frame manager state which we must save a link
-  // to in the newly generated state object. On return "state" is a pointer to the newly allocated
-  // state object. We must allocate and initialize a new interpretState object and the method
-  // expression stack. Because the returned result (if any) of the method will be placed on the caller's
-  // expression stack and this will overlap with locals[0] (and locals[1] if double/long) we must
-  // be sure to leave space on the caller's stack so that this result will not overwrite values when
-  // locals[0] and locals[1] do not exist (and in fact are return address and saved rbp). So when
-  // we are non-native we in essence ensure that locals[0-1] exist. We play an extra trick in
-  // non-product builds and initialize this last local with the previous interpreterState as
-  // this makes things look real nice in the debugger.
-
-  // State on entry
-  // Assumes locals == &locals[0]
-  // Assumes state == any previous frame manager state (assuming call path from c++ interpreter)
-  // Assumes rax = return address
-  // rcx == senders_sp
-  // rbx == method
-  // Modifies rcx, rdx, rax
-  // Returns:
-  // state == address of new interpreterState
-  // rsp == bottom of method's expression stack.
-
-  const Address const_offset      (rbx, Method::const_offset());
-
-
-  // On entry sp is the sender's sp. This includes the space for the arguments
-  // that the sender pushed. If the sender pushed no args (a static) and the
-  // caller returns a long then we need two words on the sender's stack which
-  // are not present (although when we return a restore full size stack the
-  // space will be present). If we didn't allocate two words here then when
-  // we "push" the result of the caller's stack we would overwrite the return
-  // address and the saved rbp. Not good. So simply allocate 2 words now
-  // just to be safe. This is the "static long no_params() method" issue.
-  // See Lo.java for a testcase.
-  // We don't need this for native calls because they return result in
-  // register and the stack is expanded in the caller before we store
-  // the results on the stack.
-
-  if (!native) {
-#ifdef PRODUCT
-    __ subptr(rsp, 2*wordSize);
-#else /* PRODUCT */
-    __ push((int32_t)NULL_WORD);
-    __ push(state);                         // make it look like a real argument
-#endif /* PRODUCT */
-  }
-
-  // Now that we are assure of space for stack result, setup typical linkage
-
-  __ push(rax);
-  __ enter();
-
-  __ mov(rax, state);                                  // save current state
-
-  __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
-  __ mov(state, rsp);
-
-  // rsi/r13 == state/locals rax == prevstate
-
-  // initialize the "shadow" frame so that use since C++ interpreter not directly
-  // recursive. Simpler to recurse but we can't trim expression stack as we call
-  // new methods.
-  __ movptr(STATE(_locals), locals);                    // state->_locals = locals()
-  __ movptr(STATE(_self_link), state);                  // point to self
-  __ movptr(STATE(_prev_link), rax);                    // state->_link = state on entry (NULL or previous state)
-  __ movptr(STATE(_sender_sp), sender_sp);              // state->_sender_sp = sender_sp
-#ifdef _LP64
-  __ movptr(STATE(_thread), r15_thread);                // state->_bcp = codes()
-#else
-  __ get_thread(rax);                                   // get vm's javathread*
-  __ movptr(STATE(_thread), rax);                       // state->_bcp = codes()
-#endif // _LP64
-  __ movptr(rdx, Address(rbx, Method::const_offset())); // get constantMethodOop
-  __ lea(rdx, Address(rdx, ConstMethod::codes_offset())); // get code base
-  if (native) {
-    __ movptr(STATE(_bcp), (int32_t)NULL_WORD);         // state->_bcp = NULL
-  } else {
-    __ movptr(STATE(_bcp), rdx);                        // state->_bcp = codes()
-  }
-  __ xorptr(rdx, rdx);
-  __ movptr(STATE(_oop_temp), rdx);                     // state->_oop_temp = NULL (only really needed for native)
-  __ movptr(STATE(_mdx), rdx);                          // state->_mdx = NULL
-  __ movptr(rdx, Address(rbx, Method::const_offset()));
-  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
-  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
-  __ movptr(STATE(_constants), rdx);                    // state->_constants = constants()
-
-  __ movptr(STATE(_method), rbx);                       // state->_method = method()
-  __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry);   // state->_msg = initial method entry
-  __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
-
-
-  __ movptr(STATE(_monitor_base), rsp);                 // set monitor block bottom (grows down) this would point to entry [0]
-                                                        // entries run from -1..x where &monitor[x] ==
-
-  {
-    // Must not attempt to lock method until we enter interpreter as gc won't be able to find the
-    // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack
-    // immediately.
-
-    // synchronize method
-    const Address access_flags      (rbx, Method::access_flags_offset());
-    const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
-    Label not_synced;
-
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::zero, not_synced);
-
-    // Allocate initial monitor and pre initialize it
-    // get synchronization object
-
-    Label done;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_STATIC);
-    __ movptr(rax, Address(locals, 0));                   // get receiver (assume this is frequent case)
-    __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, Method::const_offset()));
-    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
-    __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(rax, Address(rax, mirror_offset));
-    __ bind(done);
-    // add space for monitor & lock
-    __ subptr(rsp, entry_size);                                           // add space for a monitor entry
-    __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
-    __ bind(not_synced);
-  }
-
-  __ movptr(STATE(_stack_base), rsp);                                     // set expression stack base ( == &monitors[-count])
-  if (native) {
-    __ movptr(STATE(_stack), rsp);                                        // set current expression stack tos
-    __ movptr(STATE(_stack_limit), rsp);
-  } else {
-    __ subptr(rsp, wordSize);                                             // pre-push stack
-    __ movptr(STATE(_stack), rsp);                                        // set current expression stack tos
-
-    // compute full expression stack limit
-
-    __ movptr(rdx, Address(rbx, Method::const_offset()));
-    __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
-    __ negptr(rdx);                                                       // so we can subtract in next step
-    // Allocate expression stack
-    __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -Method::extra_stack_words()));
-    __ movptr(STATE(_stack_limit), rsp);
-  }
-
-#ifdef _LP64
-  // Make sure stack is properly aligned and sized for the abi
-  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
-#endif // _LP64
-
-
-
-}
-
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// rbx,: method
-// rcx: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  Label done;
-  const Address invocation_counter(rax,
-                MethodCounters::invocation_counter_offset() +
-                InvocationCounter::counter_offset());
-  const Address backedge_counter  (rax,
-                MethodCounters::backedge_counter_offset() +
-                InvocationCounter::counter_offset());
-
-  __ get_method_counters(rbx, rax, done);
-
-  if (ProfileInterpreter) {
-    __ incrementl(Address(rax,
-            MethodCounters::interpreter_invocation_counter_offset()));
-  }
-  // Update standard invocation counters
-  __ movl(rcx, invocation_counter);
-  __ increment(rcx, InvocationCounter::count_increment);
-  __ movl(invocation_counter, rcx);             // save invocation count
-
-  __ movl(rax, backedge_counter);               // load backedge counter
-  __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
-
-  __ addl(rcx, rax);                            // add both counters
-
-  // profile_method is non-null only for interpreted method so
-  // profile_method != NULL == !native_call
-  // BytecodeInterpreter only calls for native so code is elided.
-
-  __ cmp32(rcx,
-           ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
-  __ jcc(Assembler::aboveEqual, *overflow);
-  __ bind(done);
-}
-
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
-
-  // C++ interpreter on entry
-  // rsi/r13 - new interpreter state pointer
-  // rbp - interpreter frame pointer
-  // rbx - method
-
-  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
-  // rbx, - method
-  // rcx - rcvr (assuming there is one)
-  // top of stack return address of interpreter caller
-  // rsp - sender_sp
-
-  // C++ interpreter only
-  // rsi/r13 - previous interpreter state pointer
-
-  // InterpreterRuntime::frequency_counter_overflow takes one argument
-  // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
-  // The call returns the address of the verified entry point for the method or NULL
-  // if the compilation did not complete (either went background or bailed out).
-  __ movptr(rax, (int32_t)false);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
-
-  // for c++ interpreter can rsi really be munged?
-  __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));                               // restore state
-  __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method)));            // restore method
-  __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals)));            // get locals pointer
-
-  __ jmp(*do_continue, relocInfo::none);
-
-}
-
-void InterpreterGenerator::generate_stack_overflow_check(void) {
-  // see if we've got enough room on the stack for locals plus overhead.
-  // the expression stack grows down incrementally, so the normal guard
-  // page mechanism will work for that.
-  //
-  // Registers live on entry:
-  //
-  // Asm interpreter
-  // rdx: number of additional locals this frame needs (what we must check)
-  // rbx,: Method*
-
-  // C++ Interpreter
-  // rsi/r13: previous interpreter frame state object
-  // rdi: &locals[0]
-  // rcx: # of locals
-  // rdx: number of additional locals this frame needs (what we must check)
-  // rbx: Method*
-
-  // destroyed on exit
-  // rax,
-
-  // NOTE:  since the additional locals are also always pushed (wasn't obvious in
-  // generate_method_entry) so the guard should work for them too.
-  //
-
-  const int entry_size    = frame::interpreter_frame_monitor_size() * wordSize;
-
-  // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
-  // be sure to change this if you add/subtract anything to/from the overhead area
-  const int overhead_size = (int)sizeof(BytecodeInterpreter);
-
-  const int page_size = os::vm_page_size();
-
-  Label after_frame_check;
-
-  // compute rsp as if this were going to be the last frame on
-  // the stack before the red zone
-
-  Label after_frame_check_pop;
-
-  // save rsi == caller's bytecode ptr (c++ previous interp. state)
-  // QQQ problem here?? rsi overload????
-  __ push(state);
-
-  const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi);
-
-  NOT_LP64(__ get_thread(thread));
-
-  const Address stack_base(thread, Thread::stack_base_offset());
-  const Address stack_size(thread, Thread::stack_size_offset());
-
-  // locals + overhead, in bytes
-  // Always give one monitor to allow us to start interp if sync method.
-  // Any additional monitors need a check when moving the expression stack
-  const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
-  __ movptr(rax, Address(rbx, Method::const_offset()));
-  __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
-  __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor+Method::extra_stack_words()));
-  __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
-
-#ifdef ASSERT
-  Label stack_base_okay, stack_size_okay;
-  // verify that thread stack base is non-zero
-  __ cmpptr(stack_base, (int32_t)0);
-  __ jcc(Assembler::notEqual, stack_base_okay);
-  __ stop("stack base is zero");
-  __ bind(stack_base_okay);
-  // verify that thread stack size is non-zero
-  __ cmpptr(stack_size, (int32_t)0);
-  __ jcc(Assembler::notEqual, stack_size_okay);
-  __ stop("stack size is zero");
-  __ bind(stack_size_okay);
-#endif
-
-  // Add stack base to locals and subtract stack size
-  __ addptr(rax, stack_base);
-  __ subptr(rax, stack_size);
-
-  // We should have a magic number here for the size of the c++ interpreter frame.
-  // We can't actually tell this ahead of time. The debug version size is around 3k
-  // product is 1k and fastdebug is 4k
-  const int slop = 6 * K;
-
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-  // Only need this if we are stack banging which is temporary while
-  // we're debugging.
-  __ addptr(rax, slop + 2*max_pages * page_size);
-
-  // check against the current stack bottom
-  __ cmpptr(rsp, rax);
-  __ jcc(Assembler::above, after_frame_check_pop);
-
-  __ pop(state);  //  get c++ prev state.
-
-     // throw exception return address becomes throwing pc
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
-
-  // all done with frame size check
-  __ bind(after_frame_check_pop);
-  __ pop(state);
-
-  __ bind(after_frame_check);
-}
-
-// Find preallocated  monitor and lock method (C++ interpreter)
-// rbx - Method*
-//
-void CppInterpreterGenerator::lock_method() {
-  // assumes state == rsi/r13 == pointer to current interpreterState
-  // minimally destroys rax, rdx|c_rarg1, rdi
-  //
-  // synchronize method
-  const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
-  const Address access_flags      (rbx, Method::access_flags_offset());
-
-  const Register monitor  = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
-
-  // find initial monitor i.e. monitors[-1]
-  __ movptr(monitor, STATE(_monitor_base));                                   // get monitor bottom limit
-  __ subptr(monitor, entry_size);                                             // point to initial monitor
-
-#ifdef ASSERT
-  { Label L;
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::notZero, L);
-    __ stop("method doesn't need synchronization");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // get synchronization object
-  { Label done;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(rax, access_flags);
-    __ movptr(rdi, STATE(_locals));                                     // prepare to get receiver (assume common case)
-    __ testl(rax, JVM_ACC_STATIC);
-    __ movptr(rax, Address(rdi, 0));                                    // get receiver (assume this is frequent case)
-    __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, Method::const_offset()));
-    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
-    __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(rax, Address(rax, mirror_offset));
-    __ bind(done);
-  }
-#ifdef ASSERT
-  { Label L;
-    __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));   // correct object?
-    __ jcc(Assembler::equal, L);
-    __ stop("wrong synchronization lobject");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
-  __ lock_object(monitor);
-}
-
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  if (UseG1GC) {
-    // We need to generate have a routine that generates code to:
-    //   * load the value in the referent field
-    //   * passes that value to the pre-barrier.
-    //
-    // In the case of G1 this will record the value of the
-    // referent in an SATB buffer if marking is active.
-    // This will cause concurrent marking to mark the referent
-    // field as live.
-    Unimplemented();
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-//
-// C++ Interpreter stub for calling a native method.
-// This sets up a somewhat different looking stack for calling the native method
-// than the typical interpreter frame setup but still has the pointer to
-// an interpreter state.
-//
-
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls;
-
-  // rbx: Method*
-  // rcx: receiver (unused)
-  // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve
-  //      in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless
-  //      to save/restore.
-  address entry_point = __ pc();
-
-  const Address access_flags      (rbx, Method::access_flags_offset());
-
-  // rsi/r13 == state/locals rdi == prevstate
-  const Register locals = rdi;
-
-  // get parameter size (always needed)
-  {
-    const Address constMethod       (rbx, Method::const_offset());
-    const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
-    __ movptr(rcx, constMethod);
-    __ load_unsigned_short(rcx, size_of_parameters);
-  }
-
-  // rbx: Method*
-  // rcx: size of parameters
-  __ pop(rax);                                       // get return address
-  // for natives the size of locals is zero
-
-  // compute beginning of parameters /locals
-
-  __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
-
-  // initialize fixed part of activation frame
-
-  // Assumes rax = return address
-
-  // allocate and initialize new interpreterState and method expression stack
-  // IN(locals) ->  locals
-  // IN(state) -> previous frame manager state (NULL from stub/c1/c2)
-  // destroys rax, rcx, rdx
-  // OUT (state) -> new interpreterState
-  // OUT(rsp) -> bottom of methods expression stack
-
-  // save sender_sp
-  __ mov(rcx, sender_sp_on_entry);
-  // start with NULL previous state
-  __ movptr(state, (int32_t)NULL_WORD);
-  generate_compute_interpreter_state(state, locals, rcx, true);
-
-#ifdef ASSERT
-  { Label L;
-    __ movptr(rax, STATE(_stack_base));
-#ifdef _LP64
-    // duplicate the alignment rsp got after setting stack_base
-    __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
-    __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
-#endif // _LP64
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax);
-  NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation will
-  // check this flag.
-
-  const Address do_not_unlock_if_synchronized(unlock_thread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  // make sure method is native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::notZero, L);
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  { Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-
-  Label continue_after_compile;
-
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  NOT_LP64(__ movl(rax, STATE(_thread));)                       // get thread
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-
-  // check for synchronized native methods
-  //
-  // Note: This must happen *after* invocation counter check, since
-  //       when overflow happens, the method should not be locked.
-  if (synchronized) {
-    // potentially kills rax, rcx, rdx, rdi
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-      { Label L;
-        __ movl(rax, access_flags);
-        __ testl(rax, JVM_ACC_SYNCHRONIZED);
-        __ jcc(Assembler::zero, L);
-        __ stop("method needs synchronization");
-        __ bind(L);
-      }
-#endif
-  }
-
-  // start execution
-
-  // jvmti support
-  __ notify_method_entry();
-
-  // work registers
-  const Register method = rbx;
-  const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
-  const Register t      = InterpreterRuntime::SignatureHandlerGenerator::temp();    // rcx|rscratch1
-
- // allocate space for parameters
-  __ movptr(method, STATE(_method));
-  __ verify_method_ptr(method);
-  {
-    const Address constMethod       (method, Method::const_offset());
-    const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
-    __ movptr(t, constMethod);
-    __ load_unsigned_short(t, size_of_parameters);
-  }
-  __ shll(t, 2);
-#ifdef _LP64
-  __ subptr(rsp, t);
-  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
-#else
-  __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
-  __ subptr(rsp, t);
-  __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
-#endif // _LP64
-
-  // get signature handler
-    Label pending_exception_present;
-
-  { Label L;
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ testptr(t, t);
-    __ jcc(Assembler::notZero, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
-    __ movptr(method, STATE(_method));
-    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-    __ jcc(Assembler::notEqual, pending_exception_present);
-    __ verify_method_ptr(method);
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ bind(L);
-  }
-#ifdef ASSERT
-  {
-    Label L;
-    __ push(t);
-    __ get_thread(t);                                   // get vm's javathread*
-    __ cmpptr(t, STATE(_thread));
-    __ jcc(Assembler::equal, L);
-    __ int3();
-    __ bind(L);
-    __ pop(t);
-  }
-#endif //
-
-  const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from();
-  // call signature handler
-  assert(InterpreterRuntime::SignatureHandlerGenerator::to  () == rsp, "adjust this code");
-
-  // The generated handlers do not touch RBX (the method oop).
-  // However, large signatures cannot be cached and are generated
-  // each time here.  The slow-path generator will blow RBX
-  // sometime, so we must reload it after the call.
-  __ movptr(from_ptr, STATE(_locals));  // get the from pointer
-  __ call(t);
-  __ movptr(method, STATE(_method));
-  __ verify_method_ptr(method);
-
-  // result handler is in rax
-  // set result handler
-  __ movptr(STATE(_result_handler), rax);
-
-
-  // get native function entry point
-  { Label L;
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    __ testptr(rax, rax);
-    __ jcc(Assembler::notZero, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
-    __ movptr(method, STATE(_method));
-    __ verify_method_ptr(method);
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    __ bind(L);
-  }
-
-  // pass mirror handle if static call
-  { Label L;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_STATIC);
-    __ jcc(Assembler::zero, L);
-    // get mirror
-    __ movptr(t, Address(method, Method:: const_offset()));
-    __ movptr(t, Address(t, ConstMethod::constants_offset()));
-    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(t, Address(t, mirror_offset));
-    // copy mirror into activation object
-    __ movptr(STATE(_oop_temp), t);
-    // pass handle to mirror
-#ifdef _LP64
-    __ lea(c_rarg1, STATE(_oop_temp));
-#else
-    __ lea(t, STATE(_oop_temp));
-    __ movptr(Address(rsp, wordSize), t);
-#endif // _LP64
-    __ bind(L);
-  }
-#ifdef ASSERT
-  {
-    Label L;
-    __ push(t);
-    __ get_thread(t);                                   // get vm's javathread*
-    __ cmpptr(t, STATE(_thread));
-    __ jcc(Assembler::equal, L);
-    __ int3();
-    __ bind(L);
-    __ pop(t);
-  }
-#endif //
-
-  // pass JNIEnv
-#ifdef _LP64
-  __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset()));
-#else
-  __ movptr(thread, STATE(_thread));          // get thread
-  __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
-
-  __ movptr(Address(rsp, 0), t);
-#endif // _LP64
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ push(t);
-    __ get_thread(t);                                   // get vm's javathread*
-    __ cmpptr(t, STATE(_thread));
-    __ jcc(Assembler::equal, L);
-    __ int3();
-    __ bind(L);
-    __ pop(t);
-  }
-#endif //
-
-#ifdef ASSERT
-  { Label L;
-    __ movl(t, Address(thread, JavaThread::thread_state_offset()));
-    __ cmpl(t, _thread_in_Java);
-    __ jcc(Assembler::equal, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif
-
-  // Change state to native (we save the return address in the thread, since it might not
-  // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
-  // points into the right code segment. It does not have to be the correct return pc.
-
-  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
-
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
-
-  __ call(rax);
-
-  // result potentially in rdx:rax or ST0
-  __ movptr(method, STATE(_method));
-  NOT_LP64(__ movptr(thread, STATE(_thread));)                  // get thread
-
-  // The potential result is in ST(0) & rdx:rax
-  // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then
-  // we do the appropriate stuff for returning the result. rdx:rax must always be saved because just about
-  // anything we do here will destroy it, st(0) is only saved if we re-enter the vm where it would
-  // be destroyed.
-  // It is safe to do these pushes because state is _thread_in_native and return address will be found
-  // via _last_native_pc and not via _last_jave_sp
-
-    // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler
-    { Label Lpush, Lskip;
-      ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
-      ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
-      __ cmpptr(STATE(_result_handler), float_handler.addr());
-      __ jcc(Assembler::equal, Lpush);
-      __ cmpptr(STATE(_result_handler), double_handler.addr());
-      __ jcc(Assembler::notEqual, Lskip);
-      __ bind(Lpush);
-      __ subptr(rsp, 2*wordSize);
-      if ( UseSSE < 2 ) {
-        __ fstp_d(Address(rsp, 0));
-      } else {
-        __ movdbl(Address(rsp, 0), xmm0);
-      }
-      __ bind(Lskip);
-    }
-
-  // save rax:rdx for potential use by result handler.
-  __ push(rax);
-#ifndef _LP64
-  __ push(rdx);
-#endif // _LP64
-
-  // Verify or restore cpu control state after JNI call
-  __ restore_cpu_control_state_after_jni();
-
-  // change thread state
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
-  if(os::is_MP()) {
-    // Write serialization page so VM thread can do a pseudo remote membar.
-    // We use the current thread pointer to calculate a thread specific
-    // offset to write to within the page. This minimizes bus traffic
-    // due to cache line collision.
-    __ serialize_memory(thread, rcx);
-  }
-
-  // check for safepoint operation in progress and/or pending suspend requests
-  { Label Continue;
-
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    // threads running native code and they are expected to self-suspend
-    // when leaving the _thread_in_native state. We need to check for
-    // pending suspend requests here.
-    Label L;
-    __ jcc(Assembler::notEqual, L);
-    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
-    __ jcc(Assembler::equal, Continue);
-    __ bind(L);
-
-    // Don't use call_VM as it will see a possible pending exception and forward it
-    // and never return here preventing us from clearing _last_native_pc down below.
-    // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
-    // preserved and correspond to the bcp/locals pointers.
-    //
-
-    ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
-                          thread);
-    __ increment(rsp, wordSize);
-
-    __ movptr(method, STATE(_method));
-    __ verify_method_ptr(method);
-    __ movptr(thread, STATE(_thread));                       // get thread
-
-    __ bind(Continue);
-  }
-
-  // change thread state
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
-
-  __ reset_last_Java_frame(thread, true, true);
-
-  // reset handle block
-  __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
-  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
-
-  // If result was an oop then unbox and save it in the frame
-  { Label L;
-    Label no_oop, store_result;
-      ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT));
-    __ cmpptr(STATE(_result_handler), oop_handler.addr());
-    __ jcc(Assembler::notEqual, no_oop);
-#ifndef _LP64
-    __ pop(rdx);
-#endif // _LP64
-    __ pop(rax);
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, store_result);
-    // unbox
-    __ movptr(rax, Address(rax, 0));
-    __ bind(store_result);
-    __ movptr(STATE(_oop_temp), rax);
-    // keep stack depth as expected by pushing oop which will eventually be discarded
-    __ push(rax);
-#ifndef _LP64
-    __ push(rdx);
-#endif // _LP64
-    __ bind(no_oop);
-  }
-
-  {
-     Label no_reguard;
-     __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
-     __ jcc(Assembler::notEqual, no_reguard);
-
-     __ pusha();
-     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
-     __ popa();
-
-     __ bind(no_reguard);
-   }
-
-
-  // QQQ Seems like for native methods we simply return and the caller will see the pending
-  // exception and do the right thing. Certainly the interpreter will, don't know about
-  // compiled methods.
-  // Seems that the answer to above is no this is wrong. The old code would see the exception
-  // and forward it before doing the unlocking and notifying jvmdi that method has exited.
-  // This seems wrong need to investigate the spec.
-
-  // handle exceptions (exception handling will handle unlocking!)
-  { Label L;
-    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    __ bind(pending_exception_present);
-
-    // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply
-    // return and let caller deal with exception. This skips the unlocking here which
-    // seems wrong but seems to be what asm interpreter did. Can't find this in the spec.
-    // Note: must preverve method in rbx
-    //
-
-    // remove activation
-
-    __ movptr(t, STATE(_sender_sp));
-    __ leave();                                  // remove frame anchor
-    __ pop(rdi);                                 // get return address
-    __ movptr(state, STATE(_prev_link));         // get previous state for return
-    __ mov(rsp, t);                              // set sp to sender sp
-    __ push(rdi);                                // push throwing pc
-    // The skips unlocking!! This seems to be what asm interpreter does but seems
-    // very wrong. Not clear if this violates the spec.
-    __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
-    __ bind(L);
-  }
-
-  // do unlocking if necessary
-  { Label L;
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::zero, L);
-    // the code below should be shared with interpreter macro assembler implementation
-    { Label unlock;
-    const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
-      // BasicObjectLock will be first in list, since this is a synchronized method. However, need
-      // to check that the object has not been unlocked by an explicit monitorexit bytecode.
-      __ movptr(monitor, STATE(_monitor_base));
-      __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize);  // address of initial monitor
-
-      __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));
-      __ testptr(t, t);
-      __ jcc(Assembler::notZero, unlock);
-
-      // Entry already unlocked, need to throw exception
-      __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
-      __ should_not_reach_here();
-
-      __ bind(unlock);
-      __ unlock_object(monitor);
-      // unlock can blow rbx so restore it for path that needs it below
-      __ movptr(method, STATE(_method));
-    }
-    __ bind(L);
-  }
-
-  // jvmti support
-  // Note: This must happen _after_ handling/throwing any exceptions since
-  //       the exception handler code notifies the runtime of method exits
-  //       too. If this happens before, method entry/exit notifications are
-  //       not properly paired (was bug - gri 11/22/99).
-  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
-
-  // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
-#ifndef _LP64
-  __ pop(rdx);
-#endif // _LP64
-  __ pop(rax);
-  __ movptr(t, STATE(_result_handler));       // get result handler
-  __ call(t);                                 // call result handler to convert to tosca form
-
-  // remove activation
-
-  __ movptr(t, STATE(_sender_sp));
-
-  __ leave();                                  // remove frame anchor
-  __ pop(rdi);                                 // get return address
-  __ movptr(state, STATE(_prev_link));         // get previous state for return (if c++ interpreter was caller)
-  __ mov(rsp, t);                              // set sp to sender sp
-  __ jmp(rdi);
-
-  // invocation counter overflow
-  if (inc_counter) {
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-// Generate entries that will put a result type index into rcx
-void CppInterpreterGenerator::generate_deopt_handling() {
-
-  Label return_from_deopt_common;
-
-  // Generate entries that will put a result type index into rcx
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_atos  = __ pc();
-
-  // rax is live here
-  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT));    // Result stub address array index
-  __ jmp(return_from_deopt_common);
-
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_btos  = __ pc();
-
-  // rax is live here
-  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN));    // Result stub address array index
-  __ jmp(return_from_deopt_common);
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_itos  = __ pc();
-
-  // rax is live here
-  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT));    // Result stub address array index
-  __ jmp(return_from_deopt_common);
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-
-  deopt_frame_manager_return_ltos  = __ pc();
-  // rax,rdx are live here
-  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG));    // Result stub address array index
-  __ jmp(return_from_deopt_common);
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-
-  deopt_frame_manager_return_ftos  = __ pc();
-  // st(0) is live here
-  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT));    // Result stub address array index
-  __ jmp(return_from_deopt_common);
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_dtos  = __ pc();
-
-  // st(0) is live here
-  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE));    // Result stub address array index
-  __ jmp(return_from_deopt_common);
-
-  // deopt needs to jump to here to enter the interpreter (return a result)
-  deopt_frame_manager_return_vtos  = __ pc();
-
-  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID));
-
-  // Deopt return common
-  // an index is present in rcx that lets us move any possible result being
-  // return to the interpreter's stack
-  //
-  // Because we have a full sized interpreter frame on the youngest
-  // activation the stack is pushed too deep to share the tosca to
-  // stack converters directly. We shrink the stack to the desired
-  // amount and then push result and then re-extend the stack.
-  // We could have the code in size_activation layout a short
-  // frame for the top activation but that would look different
-  // than say sparc (which needs a full size activation because
-  // the windows are in the way. Really it could be short? QQQ
-  //
-  __ bind(return_from_deopt_common);
-
-  __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
-
-  // setup rsp so we can push the "result" as needed.
-  __ movptr(rsp, STATE(_stack));                                   // trim stack (is prepushed)
-  __ addptr(rsp, wordSize);                                        // undo prepush
-
-  ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
-  // Address index(noreg, rcx, Address::times_ptr);
-  __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
-  // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
-  __ call(rcx);                                                   // call result converter
-
-  __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume);
-  __ lea(rsp, Address(rsp, -wordSize));                            // prepush stack (result if any already present)
-  __ movptr(STATE(_stack), rsp);                                   // inform interpreter of new stack depth (parameters removed,
-                                                                   // result if any on stack already )
-  __ movptr(rsp, STATE(_stack_limit));                             // restore expression stack to full depth
-}
-
-// Generate the code to handle a more_monitors message from the c++ interpreter
-void CppInterpreterGenerator::generate_more_monitors() {
-
-
-  Label entry, loop;
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-  // 1. compute new pointers                     // rsp: old expression stack top
-  __ movptr(rdx, STATE(_stack_base));            // rdx: old expression stack bottom
-  __ subptr(rsp, entry_size);                    // move expression stack top limit
-  __ subptr(STATE(_stack), entry_size);          // update interpreter stack top
-  __ subptr(STATE(_stack_limit), entry_size);    // inform interpreter
-  __ subptr(rdx, entry_size);                    // move expression stack bottom
-  __ movptr(STATE(_stack_base), rdx);            // inform interpreter
-  __ movptr(rcx, STATE(_stack));                 // set start value for copy loop
-  __ jmp(entry);
-  // 2. move expression stack contents
-  __ bind(loop);
-  __ movptr(rbx, Address(rcx, entry_size));      // load expression stack word from old location
-  __ movptr(Address(rcx, 0), rbx);               // and store it at new location
-  __ addptr(rcx, wordSize);                      // advance to next word
-  __ bind(entry);
-  __ cmpptr(rcx, rdx);                           // check if bottom reached
-  __ jcc(Assembler::notEqual, loop);             // if not at bottom then copy next word
-  // now zero the slot so we can find it.
-  __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
-  __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors);
-}
-
-
-// Initial entry to C++ interpreter from the call_stub.
-// This entry point is called the frame manager since it handles the generation
-// of interpreter activation frames via requests directly from the vm (via call_stub)
-// and via requests from the interpreter. The requests from the call_stub happen
-// directly thru the entry point. Requests from the interpreter happen via returning
-// from the interpreter and examining the message the interpreter has returned to
-// the frame manager. The frame manager can take the following requests:
-
-// NO_REQUEST - error, should never happen.
-// MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
-//                 allocate a new monitor.
-// CALL_METHOD - setup a new activation to call a new method. Very similar to what
-//               happens during entry during the entry via the call stub.
-// RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
-//
-// Arguments:
-//
-// rbx: Method*
-// rcx: receiver - unused (retrieved from stack as needed)
-// rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2)
-//
-//
-// Stack layout at entry
-//
-// [ return address     ] <--- rsp
-// [ parameter n        ]
-//   ...
-// [ parameter 1        ]
-// [ expression stack   ]
-//
-//
-// We are free to blow any registers we like because the call_stub which brought us here
-// initially has preserved the callee save registers already.
-//
-//
-
-static address interpreter_frame_manager = NULL;
-
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-
-  // rbx: Method*
-  // rsi/r13: sender sp
-
-  // Because we redispatch "recursive" interpreter entries thru this same entry point
-  // the "input" register usage is a little strange and not what you expect coming
-  // from the call_stub. From the call stub rsi/rdi (current/previous) interpreter
-  // state are NULL but on "recursive" dispatches they are what you'd expect.
-  // rsi: current interpreter state (C++ interpreter) must preserve (null from call_stub/c1/c2)
-
-
-  // A single frame manager is plenty as we don't specialize for synchronized. We could and
-  // the code is pretty much ready. Would need to change the test below and for good measure
-  // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
-  // routines. Not clear this is worth it yet.
-
-  if (interpreter_frame_manager) return interpreter_frame_manager;
-
-  address entry_point = __ pc();
-
-  Label dispatch_entry_2;
-  __ movptr(rcx, sender_sp_on_entry);
-  __ movptr(state, (int32_t)NULL_WORD);                              // no current activation
-
-  __ jmp(dispatch_entry_2);
-
-  const Register locals  = rdi;
-
-  Label re_dispatch;
-
-  __ bind(re_dispatch);
-
-  // save sender sp (doesn't include return address
-  __ lea(rcx, Address(rsp, wordSize));
-
-  __ bind(dispatch_entry_2);
-
-  // save sender sp
-  __ push(rcx);
-
-  const Address constMethod       (rbx, Method::const_offset());
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals    (rdx, ConstMethod::size_of_locals_offset());
-
-  // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
-  // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset        * wordSize);
-  // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
-
-  // get parameter size (always needed)
-  __ movptr(rdx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // rbx: Method*
-  // rcx: size of parameters
-  __ load_unsigned_short(rdx, size_of_locals);                     // get size of locals in words
-
-  __ subptr(rdx, rcx);                                             // rdx = no. of additional locals
-
-  // see if we've got enough room on the stack for locals plus overhead.
-  generate_stack_overflow_check();                                 // C++
-
-  // c++ interpreter does not use stack banging or any implicit exceptions
-  // leave for now to verify that check is proper.
-  bang_stack_shadow_pages(false);
-
-
-
-  // compute beginning of parameters (rdi)
-  __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize));
-
-  // save sender's sp
-  // __ movl(rcx, rsp);
-
-  // get sender's sp
-  __ pop(rcx);
-
-  // get return address
-  __ pop(rax);
-
-  // rdx - # of additional locals
-  // allocate space for locals
-  // explicitly initialize locals
-  {
-    Label exit, loop;
-    __ testl(rdx, rdx);                               // (32bit ok)
-    __ jcc(Assembler::lessEqual, exit);               // do nothing if rdx <= 0
-    __ bind(loop);
-    __ push((int32_t)NULL_WORD);                      // initialize local variables
-    __ decrement(rdx);                                // until everything initialized
-    __ jcc(Assembler::greater, loop);
-    __ bind(exit);
-  }
-
-
-  // Assumes rax = return address
-
-  // allocate and initialize new interpreterState and method expression stack
-  // IN(locals) ->  locals
-  // IN(state) -> any current interpreter activation
-  // destroys rax, rcx, rdx, rdi
-  // OUT (state) -> new interpreterState
-  // OUT(rsp) -> bottom of methods expression stack
-
-  generate_compute_interpreter_state(state, locals, rcx, false);
-
-  // Call interpreter
-
-  Label call_interpreter;
-  __ bind(call_interpreter);
-
-  // c++ interpreter does not use stack banging or any implicit exceptions
-  // leave for now to verify that check is proper.
-  bang_stack_shadow_pages(false);
-
-
-  // Call interpreter enter here if message is
-  // set and we know stack size is valid
-
-  Label call_interpreter_2;
-
-  __ bind(call_interpreter_2);
-
-  {
-    const Register thread  = NOT_LP64(rcx) LP64_ONLY(r15_thread);
-
-#ifdef _LP64
-    __ mov(c_rarg0, state);
-#else
-    __ push(state);                                                 // push arg to interpreter
-    __ movptr(thread, STATE(_thread));
-#endif // _LP64
-
-    // We can setup the frame anchor with everything we want at this point
-    // as we are thread_in_Java and no safepoints can occur until we go to
-    // vm mode. We do have to clear flags on return from vm but that is it
-    //
-    __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
-    __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
-
-    // Call the interpreter
-
-    RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run));
-    RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks));
-
-    __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal);
-    NOT_LP64(__ pop(rax);)                                          // discard parameter to run
-    //
-    // state is preserved since it is callee saved
-    //
-
-    // reset_last_Java_frame
-
-    NOT_LP64(__ movl(thread, STATE(_thread));)
-    __ reset_last_Java_frame(thread, true, true);
-  }
-
-  // examine msg from interpreter to determine next action
-
-  __ movl(rdx, STATE(_msg));                                       // Get new message
-
-  Label call_method;
-  Label return_from_interpreted_method;
-  Label throw_exception;
-  Label bad_msg;
-  Label do_OSR;
-
-  __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method);
-  __ jcc(Assembler::equal, call_method);
-  __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method);
-  __ jcc(Assembler::equal, return_from_interpreted_method);
-  __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr);
-  __ jcc(Assembler::equal, do_OSR);
-  __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception);
-  __ jcc(Assembler::equal, throw_exception);
-  __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors);
-  __ jcc(Assembler::notEqual, bad_msg);
-
-  // Allocate more monitor space, shuffle expression stack....
-
-  generate_more_monitors();
-
-  __ jmp(call_interpreter);
-
-  // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
-  unctrap_frame_manager_entry  = __ pc();
-  //
-  // Load the registers we need.
-  __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
-  __ movptr(rsp, STATE(_stack_limit));                             // restore expression stack to full depth
-  __ jmp(call_interpreter_2);
-
-
-
-  //=============================================================================
-  // Returning from a compiled method into a deopted method. The bytecode at the
-  // bcp has completed. The result of the bytecode is in the native abi (the tosca
-  // for the template based interpreter). Any stack space that was used by the
-  // bytecode that has completed has been removed (e.g. parameters for an invoke)
-  // so all that we have to do is place any pending result on the expression stack
-  // and resume execution on the next bytecode.
-
-
-  generate_deopt_handling();
-  __ jmp(call_interpreter);
-
-
-  // Current frame has caught an exception we need to dispatch to the
-  // handler. We can get here because a native interpreter frame caught
-  // an exception in which case there is no handler and we must rethrow
-  // If it is a vanilla interpreted frame the we simply drop into the
-  // interpreter and let it do the lookup.
-
-  Interpreter::_rethrow_exception_entry = __ pc();
-  // rax: exception
-  // rdx: return address/pc that threw exception
-
-  Label return_with_exception;
-  Label unwind_and_forward;
-
-  // restore state pointer.
-  __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
-
-  __ movptr(rbx, STATE(_method));                       // get method
-#ifdef _LP64
-  __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
-#else
-  __ movl(rcx, STATE(_thread));                       // get thread
-
-  // Store exception with interpreter will expect it
-  __ movptr(Address(rcx, Thread::pending_exception_offset()), rax);
-#endif // _LP64
-
-  // is current frame vanilla or native?
-
-  __ movl(rdx, access_flags);
-  __ testl(rdx, JVM_ACC_NATIVE);
-  __ jcc(Assembler::zero, return_with_exception);     // vanilla interpreted frame, handle directly
-
-  // We drop thru to unwind a native interpreted frame with a pending exception
-  // We jump here for the initial interpreter frame with exception pending
-  // We unwind the current acivation and forward it to our caller.
-
-  __ bind(unwind_and_forward);
-
-  // unwind rbp, return stack to unextended value and re-push return address
-
-  __ movptr(rcx, STATE(_sender_sp));
-  __ leave();
-  __ pop(rdx);
-  __ mov(rsp, rcx);
-  __ push(rdx);
-  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
-
-  // Return point from a call which returns a result in the native abi
-  // (c1/c2/jni-native). This result must be processed onto the java
-  // expression stack.
-  //
-  // A pending exception may be present in which case there is no result present
-
-  Label resume_interpreter;
-  Label do_float;
-  Label do_double;
-  Label done_conv;
-
-  // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
-  if (UseSSE < 2) {
-    __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
-    __ movptr(rbx, STATE(_result._to_call._callee));                   // get method just executed
-    __ movl(rcx, Address(rbx, Method::result_index_offset()));
-    __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT));    // Result stub address array index
-    __ jcc(Assembler::equal, do_float);
-    __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE));    // Result stub address array index
-    __ jcc(Assembler::equal, do_double);
-#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
-    __ empty_FPU_stack();
-#endif // COMPILER2
-    __ jmp(done_conv);
-
-    __ bind(do_float);
-#ifdef COMPILER2
-    for (int i = 1; i < 8; i++) {
-      __ ffree(i);
-    }
-#endif // COMPILER2
-    __ jmp(done_conv);
-    __ bind(do_double);
-#ifdef COMPILER2
-    for (int i = 1; i < 8; i++) {
-      __ ffree(i);
-    }
-#endif // COMPILER2
-    __ jmp(done_conv);
-  } else {
-    __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
-    __ jmp(done_conv);
-  }
-
-  // Return point to interpreter from compiled/native method
-  InternalAddress return_from_native_method(__ pc());
-
-  __ bind(done_conv);
-
-
-  // Result if any is in tosca. The java expression stack is in the state that the
-  // calling convention left it (i.e. params may or may not be present)
-  // Copy the result from tosca and place it on java expression stack.
-
-  // Restore rsi/r13 as compiled code may not preserve it
-
-  __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
-
-  // restore stack to what we had when we left (in case i2c extended it)
-
-  __ movptr(rsp, STATE(_stack));
-  __ lea(rsp, Address(rsp, wordSize));
-
-  // If there is a pending exception then we don't really have a result to process
-
-#ifdef _LP64
-  __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-#else
-  __ movptr(rcx, STATE(_thread));                       // get thread
-  __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-#endif // _LP64
-  __ jcc(Assembler::notZero, return_with_exception);
-
-  // get method just executed
-  __ movptr(rbx, STATE(_result._to_call._callee));
-
-  // callee left args on top of expression stack, remove them
-  __ movptr(rcx, constMethod);
-  __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
-
-  __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
-
-  __ movl(rcx, Address(rbx, Method::result_index_offset()));
-  ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
-  // Address index(noreg, rax, Address::times_ptr);
-  __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
-  // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
-  __ call(rcx);                                               // call result converter
-  __ jmp(resume_interpreter);
-
-  // An exception is being caught on return to a vanilla interpreter frame.
-  // Empty the stack and resume interpreter
-
-  __ bind(return_with_exception);
-
-  // Exception present, empty stack
-  __ movptr(rsp, STATE(_stack_base));
-  __ jmp(resume_interpreter);
-
-  // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
-  // interpreter call, or native) and unwind this interpreter activation.
-  // All monitors should be unlocked.
-
-  __ bind(return_from_interpreted_method);
-
-  Label return_to_initial_caller;
-
-  __ movptr(rbx, STATE(_method));                                   // get method just executed
-  __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD);                 // returning from "recursive" interpreter call?
-  __ movl(rax, Address(rbx, Method::result_index_offset())); // get result type index
-  __ jcc(Assembler::equal, return_to_initial_caller);               // back to native code (call_stub/c1/c2)
-
-  // Copy result to callers java stack
-  ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack);
-  // Address index(noreg, rax, Address::times_ptr);
-
-  __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr)));
-  // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack)));
-  __ call(rax);                                                     // call result converter
-
-  Label unwind_recursive_activation;
-  __ bind(unwind_recursive_activation);
-
-  // returning to interpreter method from "recursive" interpreter call
-  // result converter left rax pointing to top of the java stack for method we are returning
-  // to. Now all we must do is unwind the state from the completed call
-
-  __ movptr(state, STATE(_prev_link));                              // unwind state
-  __ leave();                                                       // pop the frame
-  __ mov(rsp, rax);                                                 // unwind stack to remove args
-
-  // Resume the interpreter. The current frame contains the current interpreter
-  // state object.
-  //
-
-  __ bind(resume_interpreter);
-
-  // state == interpreterState object for method we are resuming
-
-  __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume);
-  __ lea(rsp, Address(rsp, -wordSize));                            // prepush stack (result if any already present)
-  __ movptr(STATE(_stack), rsp);                                   // inform interpreter of new stack depth (parameters removed,
-                                                                   // result if any on stack already )
-  __ movptr(rsp, STATE(_stack_limit));                             // restore expression stack to full depth
-  __ jmp(call_interpreter_2);                                      // No need to bang
-
-  // interpreter returning to native code (call_stub/c1/c2)
-  // convert result and unwind initial activation
-  // rax - result index
-
-  __ bind(return_to_initial_caller);
-  ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi);
-  // Address index(noreg, rax, Address::times_ptr);
-
-  __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr)));
-  __ call(rax);                                                    // call result converter
-
-  Label unwind_initial_activation;
-  __ bind(unwind_initial_activation);
-
-  // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0))
-
-  /* Current stack picture
-
-        [ incoming parameters ]
-        [ extra locals ]
-        [ return address to CALL_STUB/C1/C2]
-  fp -> [ CALL_STUB/C1/C2 fp ]
-        BytecodeInterpreter object
-        expression stack
-  sp ->
-
-  */
-
-  // return restoring the stack to the original sender_sp value
-
-  __ movptr(rcx, STATE(_sender_sp));
-  __ leave();
-  __ pop(rdi);                                                        // get return address
-  // set stack to sender's sp
-  __ mov(rsp, rcx);
-  __ jmp(rdi);                                                        // return to call_stub
-
-  // OSR request, adjust return address to make current frame into adapter frame
-  // and enter OSR nmethod
-
-  __ bind(do_OSR);
-
-  Label remove_initial_frame;
-
-  // We are going to pop this frame. Is there another interpreter frame underneath
-  // it or is it callstub/compiled?
-
-  // Move buffer to the expected parameter location
-  __ movptr(rcx, STATE(_result._osr._osr_buf));
-
-  __ movptr(rax, STATE(_result._osr._osr_entry));
-
-  __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD);            // returning from "recursive" interpreter call?
-  __ jcc(Assembler::equal, remove_initial_frame);              // back to native code (call_stub/c1/c2)
-
-  __ movptr(sender_sp_on_entry, STATE(_sender_sp));            // get sender's sp in expected register
-  __ leave();                                                  // pop the frame
-  __ mov(rsp, sender_sp_on_entry);                             // trim any stack expansion
-
-
-  // We know we are calling compiled so push specialized return
-  // method uses specialized entry, push a return so we look like call stub setup
-  // this path will handle fact that result is returned in registers and not
-  // on the java stack.
-
-  __ pushptr(return_from_native_method.addr());
-
-  __ jmp(rax);
-
-  __ bind(remove_initial_frame);
-
-  __ movptr(rdx, STATE(_sender_sp));
-  __ leave();
-  // get real return
-  __ pop(rsi);
-  // set stack to sender's sp
-  __ mov(rsp, rdx);
-  // repush real return
-  __ push(rsi);
-  // Enter OSR nmethod
-  __ jmp(rax);
-
-
-
-
-  // Call a new method. All we do is (temporarily) trim the expression stack
-  // push a return address to bring us back to here and leap to the new entry.
-
-  __ bind(call_method);
-
-  // stack points to next free location and not top element on expression stack
-  // method expects sp to be pointing to topmost element
-
-  __ movptr(rsp, STATE(_stack));                                     // pop args to c++ interpreter, set sp to java stack top
-  __ lea(rsp, Address(rsp, wordSize));
-
-  __ movptr(rbx, STATE(_result._to_call._callee));                   // get method to execute
-
-  // don't need a return address if reinvoking interpreter
-
-  // Make it look like call_stub calling conventions
-
-  // Get (potential) receiver
-  // get size of parameters in words
-  __ movptr(rcx, constMethod);
-  __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
-
-  ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
-  __ pushptr(recursive.addr());                                      // make it look good in the debugger
-
-  InternalAddress entry(entry_point);
-  __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter?
-  __ jcc(Assembler::equal, re_dispatch);                             // yes
-
-  __ pop(rax);                                                       // pop dummy address
-
-
-  // get specialized entry
-  __ movptr(rax, STATE(_result._to_call._callee_entry_point));
-  // set sender SP
-  __ mov(sender_sp_on_entry, rsp);
-
-  // method uses specialized entry, push a return so we look like call stub setup
-  // this path will handle fact that result is returned in registers and not
-  // on the java stack.
-
-  __ pushptr(return_from_native_method.addr());
-
-  __ jmp(rax);
-
-  __ bind(bad_msg);
-  __ stop("Bad message from interpreter");
-
-  // Interpreted method "returned" with an exception pass it on...
-  // Pass result, unwind activation and continue/return to interpreter/call_stub
-  // We handle result (if any) differently based on return to interpreter or call_stub
-
-  Label unwind_initial_with_pending_exception;
-
-  __ bind(throw_exception);
-  __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD);                 // returning from recursive interpreter call?
-  __ jcc(Assembler::equal, unwind_initial_with_pending_exception);  // no, back to native code (call_stub/c1/c2)
-  __ movptr(rax, STATE(_locals));                                   // pop parameters get new stack value
-  __ addptr(rax, wordSize);                                         // account for prepush before we return
-  __ jmp(unwind_recursive_activation);
-
-  __ bind(unwind_initial_with_pending_exception);
-
-  // We will unwind the current (initial) interpreter frame and forward
-  // the exception to the caller. We must put the exception in the
-  // expected register and clear pending exception and then forward.
-
-  __ jmp(unwind_and_forward);
-
-  interpreter_frame_manager = entry_point;
-  return entry_point;
-}
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : CppInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-// Deoptimization helpers for C++ interpreter
-
-// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-
-  const int stub_code = 4;  // see generate_call_stub
-  // Save space for one monitor to get into the interpreted method in case
-  // the method is synchronized
-  int monitor_size    = method->is_synchronized() ?
-                                1*frame::interpreter_frame_monitor_size() : 0;
-
-  // total static overhead size. Account for interpreter state object, return
-  // address, saved rbp and 2 words for a "static long no_params() method" issue.
-
-  const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
-    ( frame::sender_sp_offset - frame::link_offset) + 2;
-
-  const int method_stack = (method->max_locals() + method->max_stack()) *
-                           Interpreter::stackElementWords;
-  return overhead_size + method_stack + stub_code;
-}
-
-// returns the activation size.
-static int size_activation_helper(int extra_locals_size, int monitor_size) {
-  return (extra_locals_size +                  // the addition space for locals
-          2*BytesPerWord +                     // return address and saved rbp
-          2*BytesPerWord +                     // "static long no_params() method" issue
-          sizeof(BytecodeInterpreter) +               // interpreterState
-          monitor_size);                       // monitors
-}
-
-void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
-                                           frame* caller,
-                                           frame* current,
-                                           Method* method,
-                                           intptr_t* locals,
-                                           intptr_t* stack,
-                                           intptr_t* stack_base,
-                                           intptr_t* monitor_base,
-                                           intptr_t* frame_bottom,
-                                           bool is_top_frame
-                                           )
-{
-  // What about any vtable?
-  //
-  to_fill->_thread = JavaThread::current();
-  // This gets filled in later but make it something recognizable for now
-  to_fill->_bcp = method->code_base();
-  to_fill->_locals = locals;
-  to_fill->_constants = method->constants()->cache();
-  to_fill->_method = method;
-  to_fill->_mdx = NULL;
-  to_fill->_stack = stack;
-  if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
-    to_fill->_msg = deopt_resume2;
-  } else {
-    to_fill->_msg = method_resume;
-  }
-  to_fill->_result._to_call._bcp_advance = 0;
-  to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
-  to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
-  to_fill->_prev_link = NULL;
-
-  to_fill->_sender_sp = caller->unextended_sp();
-
-  if (caller->is_interpreted_frame()) {
-    interpreterState prev  = caller->get_interpreterState();
-    to_fill->_prev_link = prev;
-    // *current->register_addr(GR_Iprev_state) = (intptr_t) prev;
-    // Make the prev callee look proper
-    prev->_result._to_call._callee = method;
-    if (*prev->_bcp == Bytecodes::_invokeinterface) {
-      prev->_result._to_call._bcp_advance = 5;
-    } else {
-      prev->_result._to_call._bcp_advance = 3;
-    }
-  }
-  to_fill->_oop_temp = NULL;
-  to_fill->_stack_base = stack_base;
-  // Need +1 here because stack_base points to the word just above the first expr stack entry
-  // and stack_limit is supposed to point to the word just below the last expr stack entry.
-  // See generate_compute_interpreter_state.
-  to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
-  to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
-
-  to_fill->_self_link = to_fill;
-  assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base,
-         "Stack top out of range");
-}
-
-
-static int frame_size_helper(int max_stack,
-                             int tempcount,
-                             int moncount,
-                             int callee_param_count,
-                             int callee_locals,
-                             bool is_top_frame,
-                             int& monitor_size,
-                             int& full_frame_size) {
-  int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
-  monitor_size = sizeof(BasicObjectLock) * moncount;
-
-  // First calculate the frame size without any java expression stack
-  int short_frame_size = size_activation_helper(extra_locals_size,
-                                                monitor_size);
-
-  // Now with full size expression stack
-  full_frame_size = short_frame_size + max_stack * BytesPerWord;
-
-  // and now with only live portion of the expression stack
-  short_frame_size = short_frame_size + tempcount * BytesPerWord;
-
-  // the size the activation is right now. Only top frame is full size
-  int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
-  return frame_size;
-}
-
-int AbstractInterpreter::size_activation(int max_stack,
-                                         int tempcount,
-                                         int extra_args,
-                                         int moncount,
-                                         int callee_param_count,
-                                         int callee_locals,
-                                         bool is_top_frame) {
-  assert(extra_args == 0, "FIX ME");
-  // NOTE: return size is in words not bytes
-
-  // Calculate the amount our frame will be adjust by the callee. For top frame
-  // this is zero.
-
-  // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
-  // calculates the extra locals based on itself. Not what the callee does
-  // to it. So it ignores last_frame_adjust value. Seems suspicious as far
-  // as getting sender_sp correct.
-
-  int unused_monitor_size = 0;
-  int unused_full_frame_size = 0;
-  return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals,
-                           is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord;
-}
-
-void AbstractInterpreter::layout_activation(Method* method,
-                                            int tempcount,  //
-                                            int popframe_extra_args,
-                                            int moncount,
-                                            int caller_actual_parameters,
-                                            int callee_param_count,
-                                            int callee_locals,
-                                            frame* caller,
-                                            frame* interpreter_frame,
-                                            bool is_top_frame,
-                                            bool is_bottom_frame) {
-
-  assert(popframe_extra_args == 0, "FIX ME");
-  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
-  // does as far as allocating an interpreter frame.
-  // Set up the method, locals, and monitors.
-  // The frame interpreter_frame is guaranteed to be the right size,
-  // as determined by a previous call to the size_activation() method.
-  // It is also guaranteed to be walkable even though it is in a skeletal state
-  // NOTE: tempcount is the current size of the java expression stack. For top most
-  //       frames we will allocate a full sized expression stack and not the curback
-  //       version that non-top frames have.
-
-  int monitor_size = 0;
-  int full_frame_size = 0;
-  int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals,
-                                     is_top_frame, monitor_size, full_frame_size);
-
-#ifdef ASSERT
-  assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
-#endif
-
-  // MUCHO HACK
-
-  intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
-
-  /* Now fillin the interpreterState object */
-
-  // The state object is the first thing on the frame and easily located
-
-  interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
-
-
-  // Find the locals pointer. This is rather simple on x86 because there is no
-  // confusing rounding at the callee to account for. We can trivially locate
-  // our locals based on the current fp().
-  // Note: the + 2 is for handling the "static long no_params() method" issue.
-  // (too bad I don't really remember that issue well...)
-
-  intptr_t* locals;
-  // If the caller is interpreted we need to make sure that locals points to the first
-  // argument that the caller passed and not in an area where the stack might have been extended.
-  // because the stack to stack to converter needs a proper locals value in order to remove the
-  // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
-  // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
-  // adjust the stack?? HMMM QQQ
-  //
-  if (caller->is_interpreted_frame()) {
-    // locals must agree with the caller because it will be used to set the
-    // caller's tos when we return.
-    interpreterState prev  = caller->get_interpreterState();
-    // stack() is prepushed.
-    locals = prev->stack() + method->size_of_parameters();
-    // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
-    if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
-      // os::breakpoint();
-    }
-  } else {
-    // this is where a c2i would have placed locals (except for the +2)
-    locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
-  }
-
-  intptr_t* monitor_base = (intptr_t*) cur_state;
-  intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
-  /* +1 because stack is always prepushed */
-  intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
-
-
-  BytecodeInterpreter::layout_interpreterState(cur_state,
-                                               caller,
-                                               interpreter_frame,
-                                               method,
-                                               locals,
-                                               stack,
-                                               stack_base,
-                                               monitor_base,
-                                               frame_bottom,
-                                               is_top_frame);
-
-  // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
-}
-
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     :
-      return false;
-    default:
-      return true;
-  }
-}
-
-
-#endif // CC_INTERP (all)
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_CPPINTERPRETER_X86_HPP
-#define CPU_X86_VM_CPPINTERPRETER_X86_HPP
-
-
-  protected:
-
-  // Size of interpreter code.  Increase if too small.  Interpreter will
-  // fail with a guarantee ("not enough space for interpreter generation");
-  // if too small.
-  // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI
-  const static int InterpreterCodeSize = 168 * 1024;
-
-#endif // CPU_X86_VM_CPPINTERPRETER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -314,26 +314,6 @@
 }
 
 // sender_sp
-#ifdef CC_INTERP
-intptr_t* frame::interpreter_frame_sender_sp() const {
-  assert(is_interpreted_frame(), "interpreted frame expected");
-  // QQQ why does this specialize method exist if frame::sender_sp() does same thing?
-  // seems odd and if we always know interpreted vs. non then sender_sp() is really
-  // doing too much work.
-  return get_interpreterState()->sender_sp();
-}
-
-// monitor elements
-
-BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
-  return get_interpreterState()->monitor_base();
-}
-
-BasicObjectLock* frame::interpreter_frame_monitor_end() const {
-  return (BasicObjectLock*) get_interpreterState()->stack_base();
-}
-
-#else // CC_INTERP
 
 intptr_t* frame::interpreter_frame_sender_sp() const {
   assert(is_interpreted_frame(), "interpreted frame expected");
@@ -368,7 +348,6 @@
 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
     *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
 }
-#endif // CC_INTERP
 
 frame frame::sender_for_entry_frame(RegisterMap* map) const {
   assert(map != NULL, "map must be set");
@@ -524,9 +503,6 @@
 }
 
 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
-// QQQ
-#ifdef CC_INTERP
-#else
   assert(is_interpreted_frame(), "Not an interpreted frame");
   // These are reasonable sanity checks
   if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
@@ -545,7 +521,6 @@
   }
 
   // do some validation of frame elements
-
   // first the method
 
   Method* m = *interpreter_frame_method_addr();
@@ -580,17 +555,10 @@
   if (locals > thread->stack_base() || locals < (address) fp()) return false;
 
   // We'd have to be pretty unlucky to be mislead at this point
-
-#endif // CC_INTERP
   return true;
 }
 
 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
-#ifdef CC_INTERP
-  // Needed for JVMTI. The result should always be in the
-  // interpreterState object
-  interpreterState istate = get_interpreterState();
-#endif // CC_INTERP
   assert(is_interpreted_frame(), "interpreted frame expected");
   Method* method = interpreter_frame_method();
   BasicType type = method->result_type();
@@ -620,11 +588,7 @@
     case T_ARRAY   : {
       oop obj;
       if (method->is_native()) {
-#ifdef CC_INTERP
-        obj = istate->_oop_temp;
-#else
         obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
-#endif // CC_INTERP
       } else {
         oop* obj_p = (oop*)tos_addr;
         obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
@@ -673,7 +637,6 @@
 
 void frame::describe_pd(FrameValues& values, int frame_no) {
   if (is_interpreted_frame()) {
-#ifndef CC_INTERP
     DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
     DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
     DESCRIBE_FP_OFFSET(interpreter_frame_method);
@@ -692,7 +655,6 @@
     }
 #endif // AMD64
   }
-#endif
 }
 #endif // !PRODUCT
 
--- a/hotspot/src/cpu/x86/vm/frame_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -101,8 +101,6 @@
     // non-interpreter frames
     sender_sp_offset                                 =  2,
 
-#ifndef CC_INTERP
-
     // Interpreter frames
     interpreter_frame_result_handler_offset          =  3, // for native calls only
     interpreter_frame_oop_temp_offset                =  2, // for native calls only
@@ -120,8 +118,6 @@
     interpreter_frame_monitor_block_top_offset       = interpreter_frame_initial_sp_offset,
     interpreter_frame_monitor_block_bottom_offset    = interpreter_frame_initial_sp_offset,
 
-#endif // CC_INTERP
-
     // Entry frames
 #ifdef AMD64
 #ifdef _WIN64
@@ -193,13 +189,7 @@
   // helper to update a map with callee-saved RBP
   static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
 
-#ifndef CC_INTERP
   // deoptimization support
   void interpreter_frame_set_last_sp(intptr_t* sp);
-#endif // CC_INTERP
-
-#ifdef CC_INTERP
-  inline interpreterState get_interpreterState() const;
-#endif // CC_INTERP
 
 #endif // CPU_X86_VM_FRAME_X86_HPP
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -151,59 +151,6 @@
 inline address* frame::sender_pc_addr()      const { return (address*) addr_at( return_addr_offset); }
 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
 
-#ifdef CC_INTERP
-
-inline interpreterState frame::get_interpreterState() const {
-  return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
-}
-
-inline intptr_t*    frame::sender_sp()        const {
-  // Hmm this seems awfully expensive QQQ, is this really called with interpreted frames?
-  if (is_interpreted_frame()) {
-    assert(false, "should never happen");
-    return get_interpreterState()->sender_sp();
-  } else {
-    return            addr_at(sender_sp_offset);
-  }
-}
-
-inline intptr_t** frame::interpreter_frame_locals_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return &(get_interpreterState()->_locals);
-}
-
-inline intptr_t* frame::interpreter_frame_bcp_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return (intptr_t*) &(get_interpreterState()->_bcp);
-}
-
-
-// Constant pool cache
-
-inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return &(get_interpreterState()->_constants);
-}
-
-// Method
-
-inline Method** frame::interpreter_frame_method_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return &(get_interpreterState()->_method);
-}
-
-inline intptr_t* frame::interpreter_frame_mdp_addr() const {
-  assert(is_interpreted_frame(), "must be interpreted");
-  return (intptr_t*) &(get_interpreterState()->_mdx);
-}
-
-// top of expression stack
-inline intptr_t* frame::interpreter_frame_tos_address() const {
-  assert(is_interpreted_frame(), "wrong frame type");
-  return get_interpreterState()->_stack + 1;
-}
-
-#else /* asm interpreter */
 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
 
 inline intptr_t** frame::interpreter_frame_locals_addr() const {
@@ -255,8 +202,6 @@
   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
 }
 
-#endif /* CC_INTERP */
-
 inline int frame::pd_oop_map_offset_adjustment() const {
   return 0;
 }
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -45,7 +45,6 @@
   jump(RuntimeAddress(entry));
 }
 
-#ifndef CC_INTERP
 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
   Label update, next, none;
 
@@ -246,16 +245,7 @@
     bind(profile_continue);
   }
 }
-#endif
 
-#ifdef CC_INTERP
-void InterpreterMacroAssembler::get_method(Register reg) {
-  movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
-  movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
-}
-#endif // CC_INTERP
-
-#ifndef CC_INTERP
 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
                                                   int number_of_arguments) {
   // interpreter specific
@@ -1046,7 +1036,6 @@
   pop(ret_addr);                     // get return address
   mov(rsp, rbx);                     // set sp to sender sp
 }
-#endif // !CC_INTERP
 
 void InterpreterMacroAssembler::get_method_counters(Register method,
                                                     Register mcs, Label& skip) {
@@ -1227,7 +1216,7 @@
     restore_bcp();
   }
 }
-#ifndef CC_INTERP
+
 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
                                                          Label& zero_continue) {
   assert(ProfileInterpreter, "must be profiling interpreter");
@@ -1886,7 +1875,6 @@
   andl(scratch, mask);
   jcc(cond, *where);
 }
-#endif // CC_INTERP
 
 void InterpreterMacroAssembler::notify_method_entry() {
   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
@@ -1938,9 +1926,8 @@
     // is changed then the interpreter_frame_result implementation will
     // need to be updated too.
 
-    // For c++ interpreter the result is always stored at a known location in the frame
-    // template interpreter will leave it on the top of the stack.
-    NOT_CC_INTERP(push(state);)
+    // template interpreter will leave the result on the top of the stack.
+    push(state);
     NOT_LP64(get_thread(rthread);)
     movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
     testl(rdx, rdx);
@@ -1948,16 +1935,16 @@
     call_VM(noreg,
             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
     bind(L);
-    NOT_CC_INTERP(pop(state));
+    pop(state);
   }
 
   {
     SkipIfEqual skip(this, &DTraceMethodProbes, false);
-    NOT_CC_INTERP(push(state));
+    push(state);
     NOT_LP64(get_thread(rthread);)
     get_method(rarg);
     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
                  rthread, rarg);
-    NOT_CC_INTERP(pop(state));
+    pop(state);
   }
 }
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -36,7 +36,6 @@
 
 class InterpreterMacroAssembler: public MacroAssembler {
 
-#ifndef CC_INTERP
  protected:
   // Interpreter specific version of call_VM_base
   virtual void call_VM_leaf_base(address entry_point,
@@ -54,7 +53,6 @@
 
   // base routine for all dispatches
   void dispatch_base(TosState state, address* table, bool verifyoop = true);
-#endif // CC_INTERP
 
  public:
   InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code),
@@ -65,15 +63,6 @@
 
   void load_earlyret_value(TosState state);
 
-#ifdef CC_INTERP
-  void save_bcp()                                          { /*  not needed in c++ interpreter and harmless */ }
-  void restore_bcp()                                       { /*  not needed in c++ interpreter and harmless */ }
-
-  // Helpers for runtime call arguments/results
-  void get_method(Register reg);
-
-#else
-
   // Interpreter-specific registers
   void save_bcp() {
     movptr(Address(rbp, frame::interpreter_frame_bcp_offset * wordSize), _bcp_register);
@@ -219,15 +208,12 @@
                          bool throw_monitor_exception = true,
                          bool install_monitor_exception = true,
                          bool notify_jvmdi = true);
-#endif // CC_INTERP
   void get_method_counters(Register method, Register mcs, Label& skip);
 
   // Object locking
   void lock_object  (Register lock_reg);
   void unlock_object(Register lock_reg);
 
-#ifndef CC_INTERP
-
   // Interpreter profiling operations
   void set_method_data_pointer_for_bcp();
   void test_method_data_pointer(Register mdp, Label& zero_continue);
@@ -285,8 +271,6 @@
   // only if +VerifyFPU  && (state == ftos || state == dtos)
   void verify_FPU(int stack_depth, TosState state = ftos);
 
-#endif // !CC_INTERP
-
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
 
   // support for jvmti/dtrace
@@ -299,12 +283,10 @@
   Register _bcp_register; // register that contains the bcp
 
  public:
-#ifndef CC_INTERP
   void profile_obj_type(Register obj, const Address& mdo_addr);
   void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
   void profile_return_type(Register mdp, Register ret, Register tmp);
   void profile_parameters_type(Register mdp, Register tmp1, Register tmp2);
-#endif /* !CC_INTERP */
 
 };
 
--- a/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,26 +25,24 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 
 #define __ _masm->
 
 // Abstract method entry
 // Attempt to execute abstract method. Throw exception
-address InterpreterGenerator::generate_abstract_entry(void) {
+address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 
   address entry_point = __ pc();
 
   // abstract method entry
 
-#ifndef CC_INTERP
   //  pop return address, reset last_sp to NULL
   __ empty_expression_stack();
   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
-#endif
 
   // throw exception
   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
--- a/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
-#define CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
-
-
-// Generation of Interpreter
-//
-  friend class AbstractInterpreterGenerator;
-
- private:
-
-  address generate_normal_entry(bool synchronized);
-  address generate_native_entry(bool synchronized);
-  address generate_abstract_entry(void);
-  address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_accessor_entry(void) { return NULL; }
-  address generate_empty_entry(void) { return NULL; }
-  address generate_Reference_get_entry();
-  address generate_CRC32_update_entry();
-  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
-  address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind);
-#ifndef _LP64
-  address generate_Float_intBitsToFloat_entry();
-  address generate_Float_floatToRawIntBits_entry();
-  address generate_Double_longBitsToDouble_entry();
-  address generate_Double_doubleToRawLongBits_entry();
-#endif
-  void generate_stack_overflow_check(void);
-
-  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
-  void generate_counter_overflow(Label* do_continue);
-
-#endif // CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
--- a/hotspot/src/cpu/x86/vm/interpreter_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_INTERPRETER_X86_HPP
-#define CPU_X86_VM_INTERPRETER_X86_HPP
-
- public:
-  static Address::ScaleFactor stackElementScale() {
-    return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8);
-  }
-
-  // Offset from rsp (which points to the last stack element)
-  static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
-
-  // Stack index relative to tos (which points at value)
-  static int expr_index_at(int i)        { return stackElementWords * i; }
-
-  // Already negated by c++ interpreter
-  static int local_index_at(int i) {
-    assert(i <= 0, "local direction already negated");
-    return stackElementWords * i;
-  }
-
-#endif // CPU_X86_VM_INTERPRETER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -26,9 +26,9 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -66,7 +66,7 @@
 }
 
 
-address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 
   // rbx,: Method*
   // rcx: scratrch
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -26,9 +26,9 @@
 #include "asm/macroAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -199,7 +199,7 @@
 // Various method entries
 //
 
-address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 
   // rbx,: Method*
   // rcx: scratrch
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -2525,11 +2525,9 @@
   // Only interpreter should have to clear fp
   reset_last_Java_frame(java_thread, true, false);
 
-#ifndef CC_INTERP
    // C++ interp handles this in the interpreter
   check_and_handle_popframe(java_thread);
   check_and_handle_earlyret(java_thread);
-#endif /* CC_INTERP */
 
   if (check_exceptions) {
     // check for pending exceptions (java_thread is set upon return)
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,16 +48,9 @@
   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
   // may customize this version by overriding it for its purposes (e.g., to save/restore
   // additional registers when doing a VM call).
-#ifdef CC_INTERP
-  // c++ interpreter never wants to use interp_masm version of call_VM
-  #define VIRTUAL
-#else
-  #define VIRTUAL virtual
-#endif
-
 #define COMMA ,
 
-  VIRTUAL void call_VM_leaf_base(
+  virtual void call_VM_leaf_base(
     address entry_point,               // the entry point
     int     number_of_arguments        // the number of arguments to pop after the call
   );
@@ -70,7 +63,7 @@
   // returns the register which contains the thread upon return. If a thread register has been
   // specified, the return value will correspond to that register. If no last_java_sp is specified
   // (noreg) than rsp will be used instead.
-  VIRTUAL void call_VM_base(           // returns the register containing the thread upon return
+  virtual void call_VM_base(           // returns the register containing the thread upon return
     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
     Register java_thread,              // the thread if computed before     ; use noreg otherwise
     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
@@ -1422,8 +1415,6 @@
   void byte_array_inflate(Register src, Register dst, Register len,
                           XMMRegister tmp1, Register tmp2);
 
-#undef VIRTUAL
-
 };
 
 /**
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -2652,30 +2652,14 @@
   Label loop;
   __ bind(loop);
   __ movptr(rbx, Address(rsi, 0));      // Load frame size
-#ifdef CC_INTERP
-  __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
-#ifdef ASSERT
-  __ push(0xDEADDEAD);                  // Make a recognizable pattern
-  __ push(0xDEADDEAD);
-#else /* ASSERT */
-  __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
-#endif /* ASSERT */
-#else /* CC_INTERP */
   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
-#endif /* CC_INTERP */
   __ pushptr(Address(rcx, 0));          // save return address
   __ enter();                           // save old & set new rbp,
   __ subptr(rsp, rbx);                  // Prolog!
   __ movptr(rbx, sp_temp);              // sender's sp
-#ifdef CC_INTERP
-  __ movptr(Address(rbp,
-                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
-          rbx); // Make it walkable
-#else /* CC_INTERP */
   // This value is corrected by layout_activation_impl
   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
-#endif /* CC_INTERP */
   __ movptr(sp_temp, rsp);              // pass to next frame
   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
@@ -2894,30 +2878,14 @@
   Label loop;
   __ bind(loop);
   __ movptr(rbx, Address(rsi, 0));      // Load frame size
-#ifdef CC_INTERP
-  __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
-#ifdef ASSERT
-  __ push(0xDEADDEAD);                  // Make a recognizable pattern
-  __ push(0xDEADDEAD);                  // (parm to RecursiveInterpreter...)
-#else /* ASSERT */
-  __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
-#endif /* ASSERT */
-#else /* CC_INTERP */
   __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
-#endif /* CC_INTERP */
   __ pushptr(Address(rcx, 0));          // save return address
   __ enter();                           // save old & set new rbp,
   __ subptr(rsp, rbx);                  // Prolog!
   __ movptr(rbx, sp_temp);              // sender's sp
-#ifdef CC_INTERP
-  __ movptr(Address(rbp,
-                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
-          rbx); // Make it walkable
-#else /* CC_INTERP */
   // This value is corrected by layout_activation_impl
   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
-#endif /* CC_INTERP */
   __ movptr(sp_temp, rsp);              // pass to next frame
   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -3021,29 +3021,13 @@
   Label loop;
   __ bind(loop);
   __ movptr(rbx, Address(rsi, 0));      // Load frame size
-#ifdef CC_INTERP
-  __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
-#ifdef ASSERT
-  __ push(0xDEADDEAD);                  // Make a recognizable pattern
-  __ push(0xDEADDEAD);
-#else /* ASSERT */
-  __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
-#endif /* ASSERT */
-#else
   __ subptr(rbx, 2*wordSize);           // We'll push pc and ebp by hand
-#endif // CC_INTERP
   __ pushptr(Address(rcx, 0));          // Save return address
   __ enter();                           // Save old & set new ebp
   __ subptr(rsp, rbx);                  // Prolog
-#ifdef CC_INTERP
-  __ movptr(Address(rbp,
-                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
-            sender_sp); // Make it walkable
-#else /* CC_INTERP */
   // This value is corrected by layout_activation_impl
   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
-#endif /* CC_INTERP */
   __ mov(sender_sp, rsp);               // Pass sender_sp to next frame
   __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
   __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
@@ -3242,16 +3226,10 @@
   __ pushptr(Address(rcx, 0));     // Save return address
   __ enter();                      // Save old & set new rbp
   __ subptr(rsp, rbx);             // Prolog
-#ifdef CC_INTERP
-  __ movptr(Address(rbp,
-                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
-            sender_sp); // Make it walkable
-#else // CC_INTERP
   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
             sender_sp);            // Make it walkable
   // This value is corrected by layout_activation_impl
   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
-#endif // CC_INTERP
   __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
   __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
   __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -25,10 +25,10 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -49,8 +49,6 @@
 
 #define __ _masm->
 
-#ifndef CC_INTERP
-
 // Global Register Names
 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
@@ -361,7 +359,7 @@
 // rbx: method
 // rcx: invocation counter
 //
-void InterpreterGenerator::generate_counter_incr(
+void TemplateInterpreterGenerator::generate_counter_incr(
         Label* overflow,
         Label* profile_method,
         Label* profile_method_continue) {
@@ -436,7 +434,7 @@
   }
 }
 
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
+void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 
   // Asm interpreter on entry
   // r14/rdi - locals
@@ -466,7 +464,7 @@
   __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
   // Preserve invariant that r13/r14 contain bcp/locals of sender frame
   // and jump to the interpreted entry.
-  __ jmp(*do_continue, relocInfo::none);
+  __ jmp(do_continue, relocInfo::none);
 }
 
 // See if we've got enough room on the stack for locals plus overhead.
@@ -483,7 +481,7 @@
 //
 // Kills:
 //      rax
-void InterpreterGenerator::generate_stack_overflow_check(void) {
+void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 
   // monitor entry size: see picture of stack in frame_x86.hpp
   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
@@ -687,7 +685,7 @@
 // End of helpers
 
 // Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
+address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
   // Code: _aload_0, _getfield, _areturn
   // parameter size = 1
@@ -783,7 +781,7 @@
 // Interpreter stub for calling a native method. (asm interpreter)
 // This sets up a somewhat different looking stack for calling the
 // native method than the typical interpreter frame setup.
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
+address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
   // determine code generation flags
   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
 
@@ -1300,7 +1298,7 @@
   if (inc_counter) {
     // Handle overflow of counter and compile method
     __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
+    generate_counter_overflow(continue_after_compile);
   }
 
   return entry_point;
@@ -1309,7 +1307,7 @@
 //
 // Generic interpreted method entry to (asm) interpreter
 //
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
   // determine code generation flags
   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
 
@@ -1471,7 +1469,7 @@
     }
     // Handle overflow of counter and compile method
     __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
+    generate_counter_overflow(continue_after_compile);
   }
 
   return entry_point;
@@ -1767,18 +1765,6 @@
   generate_and_dispatch(t);
 }
 
-
-//-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
 //-----------------------------------------------------------------------------
 
 // Non-product code
@@ -1871,4 +1857,3 @@
   __ bind(L);
 }
 #endif // !PRODUCT
-#endif // ! CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
-#define CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
-
- protected:
-
- void generate_fixed_frame(bool native_call);
-
- // address generate_asm_interpreter_entry(bool synchronized);
-
-#endif // CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -24,20 +24,19 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
+#include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "runtime/arguments.hpp"
 
 #define __ _masm->
 
 
-#ifndef CC_INTERP
-
 /**
  * Method entry for static native methods:
  *   int java.util.zip.CRC32.update(int crc, int b)
  */
-address InterpreterGenerator::generate_CRC32_update_entry() {
+address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
 
@@ -89,7 +88,7 @@
  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
  */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
 
@@ -155,7 +154,7 @@
 *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
 *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
 */
-address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
   if (UseCRC32CIntrinsics) {
     address entry = __ pc();
     // Load parameters
@@ -201,7 +200,7 @@
  * Method entry for static native method:
  *    java.lang.Float.intBitsToFloat(int bits)
  */
-address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
+address TemplateInterpreterGenerator::generate_Float_intBitsToFloat_entry() {
   if (UseSSE >= 1) {
     address entry = __ pc();
 
@@ -227,7 +226,7 @@
  * Method entry for static native method:
  *    java.lang.Float.floatToRawIntBits(float value)
  */
-address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
+address TemplateInterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
   if (UseSSE >= 1) {
     address entry = __ pc();
 
@@ -254,7 +253,7 @@
  * Method entry for static native method:
  *    java.lang.Double.longBitsToDouble(long bits)
  */
-address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
+address TemplateInterpreterGenerator::generate_Double_longBitsToDouble_entry() {
    if (UseSSE >= 2) {
      address entry = __ pc();
 
@@ -280,7 +279,7 @@
  * Method entry for static native method:
  *    java.lang.Double.doubleToRawLongBits(double value)
  */
-address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
+address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
   if (UseSSE >= 2) {
     address entry = __ pc();
 
@@ -302,4 +301,3 @@
 
   return NULL;
 }
-#endif // CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -24,19 +24,18 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
+#include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "runtime/arguments.hpp"
 
 #define __ _masm->
 
-#ifndef CC_INTERP
-
 /**
  * Method entry for static native methods:
  *   int java.util.zip.CRC32.update(int crc, int b)
  */
-address InterpreterGenerator::generate_CRC32_update_entry() {
+address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
 
@@ -88,7 +87,7 @@
  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
  */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
   if (UseCRC32Intrinsics) {
     address entry = __ pc();
 
@@ -149,7 +148,7 @@
 *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
 *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
 */
-address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
   if (UseCRC32CIntrinsics) {
     address entry = __ pc();
     // Load parameters
@@ -194,4 +193,3 @@
 
   return NULL;
 }
-#endif // ! CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -27,7 +27,16 @@
 #include "interpreter/interpreter.hpp"
 #include "runtime/frame.inline.hpp"
 
-#ifndef CC_INTERP
+// Size of interpreter code.  Increase if too small.  Interpreter will
+// fail with a guarantee ("not enough space for interpreter generation");
+// if too small.
+// Run with +PrintInterpreter to get the VM to print out the size.
+// Max size with JVMTI
+#ifdef AMD64
+int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
+#else
+int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
+#endif // AMD64
 
 // asm based interpreter deoptimization helpers
 int AbstractInterpreter::size_activation(int max_stack,
@@ -38,7 +47,7 @@
                                          int callee_locals,
                                          bool is_top_frame) {
   // Note: This calculation must exactly parallel the frame setup
-  // in InterpreterGenerator::generate_fixed_frame.
+  // in TemplateInterpreterGenerator::generate_fixed_frame.
 
   // fixed size of an interpreter frame:
   int overhead = frame::sender_sp_offset -
@@ -198,5 +207,3 @@
                            Interpreter::stackElementWords;
   return (overhead_size + method_stack + stub_code);
 }
-
-#endif // CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
-#define CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
-
-
-  protected:
-
-  // Size of interpreter code.  Increase if too small.  Interpreter will
-  // fail with a guarantee ("not enough space for interpreter generation");
-  // if too small.
-  // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI
-#ifdef AMD64
-  const static int InterpreterCodeSize = 256 * 1024;
-#else
-  const static int InterpreterCodeSize = 224 * 1024;
-#endif // AMD64
-
-#endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -38,8 +38,6 @@
 #include "runtime/synchronizer.hpp"
 #include "utilities/macros.hpp"
 
-#ifndef CC_INTERP
-
 #define __ _masm->
 
 // Global Register Names
@@ -4341,5 +4339,3 @@
   __ load_unsigned_byte(rbx, at_bcp(3));
   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
 }
-#endif /* !CC_INTERP */
-
--- a/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP
-#define CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP
-
- protected:
-  MacroAssembler* assembler() const {
-    return _masm;
-  }
-
- public:
-  static address generate_entry_impl(MacroAssembler* masm, address entry_point) {
-    ZeroEntry *entry = (ZeroEntry *) masm->pc();
-    masm->advance(sizeof(ZeroEntry));
-    entry->set_entry_point(entry_point);
-    return (address) entry;
-  }
-
- protected:
-  address generate_entry(address entry_point) {
-        return generate_entry_impl(assembler(), entry_point);
-  }
-
-#endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -27,8 +27,8 @@
 #include "asm/assembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/cppInterpreter.hpp"
+#include "interpreter/cppInterpreterGenerator.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -788,21 +788,21 @@
   return t;
 }
 
-address InterpreterGenerator::generate_empty_entry() {
+address CppInterpreterGenerator::generate_empty_entry() {
   if (!UseFastEmptyMethods)
     return NULL;
 
   return generate_entry((address) CppInterpreter::empty_entry);
 }
 
-address InterpreterGenerator::generate_accessor_entry() {
+address CppInterpreterGenerator::generate_accessor_entry() {
   if (!UseFastAccessorMethods)
     return NULL;
 
   return generate_entry((address) CppInterpreter::accessor_entry);
 }
 
-address InterpreterGenerator::generate_Reference_get_entry(void) {
+address CppInterpreterGenerator::generate_Reference_get_entry(void) {
 #if INCLUDE_ALL_GCS
   if (UseG1GC) {
     // We need to generate have a routine that generates code to:
@@ -822,20 +822,15 @@
   return NULL;
 }
 
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
+address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
   return generate_entry((address) CppInterpreter::native_entry);
 }
 
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
   return generate_entry((address) CppInterpreter::normal_entry);
 }
 
 
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : CppInterpreterGenerator(code) {
-   generate_all();
-}
-
 // Deoptimization helpers
 
 InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
@@ -980,31 +975,4 @@
 bool CppInterpreter::contains(address pc) {
   return false; // make frame::print_value_on work
 }
-
-// Result handlers and convertors
-
-address CppInterpreterGenerator::generate_result_handler_for(
-    BasicType type) {
-  assembler()->advance(1);
-  return ShouldNotCallThisStub();
-}
-
-address CppInterpreterGenerator::generate_tosca_to_stack_converter(
-    BasicType type) {
-  assembler()->advance(1);
-  return ShouldNotCallThisStub();
-}
-
-address CppInterpreterGenerator::generate_stack_to_stack_converter(
-    BasicType type) {
-  assembler()->advance(1);
-  return ShouldNotCallThisStub();
-}
-
-address CppInterpreterGenerator::generate_stack_to_native_abi_converter(
-    BasicType type) {
-  assembler()->advance(1);
-  return ShouldNotCallThisStub();
-}
-
 #endif // CC_INTERP
--- a/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -122,6 +122,11 @@
 inline intptr_t* frame::interpreter_frame_tos_address() const {
   return get_interpreterState()->_stack + 1;
 }
+
+inline oop* frame::interpreter_frame_temp_oop_addr() const {
+  interpreterState istate = get_interpreterState();
+  return (oop *)&istate->_oop_temp;
+}
 #endif // CC_INTERP
 
 inline int frame::interpreter_frame_monitor_size() {
--- a/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
-#define CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
-
-  // Generation of Interpreter
-  //
-  friend class AbstractInterpreterGenerator;
-
- private:
-  address generate_normal_entry(bool synchronized);
-  address generate_native_entry(bool synchronized);
-  address generate_abstract_entry();
-  address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_empty_entry();
-  address generate_accessor_entry();
-  address generate_Reference_get_entry();
-
-  // Not supported
-  address generate_CRC32_update_entry() { return NULL; }
-  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
-  address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
-#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -26,8 +26,8 @@
 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/cppInterpreterGenerator.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/templateTable.hpp"
 #include "oops/arrayOop.hpp"
@@ -57,7 +57,7 @@
   return (address) InterpreterRuntime::slow_signature_handler;
 }
 
-address InterpreterGenerator::generate_math_entry(
+address CppInterpreterGenerator::generate_math_entry(
     AbstractInterpreter::MethodKind kind) {
   if (!InlineIntrinsics)
     return NULL;
@@ -66,7 +66,7 @@
   return NULL;
 }
 
-address InterpreterGenerator::generate_abstract_entry() {
+address CppInterpreterGenerator::generate_abstract_entry() {
   return generate_entry((address) ShouldNotCallThisEntry());
 }
 
--- a/hotspot/src/cpu/zero/vm/interpreter_zero.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_INTERPRETER_ZERO_HPP
-#define CPU_ZERO_VM_INTERPRETER_ZERO_HPP
-
- public:
-  static void invoke_method(Method* method, address entry_point, TRAPS) {
-    ((ZeroEntry *) entry_point)->invoke(method, THREAD);
-  }
-  static void invoke_osr(Method* method,
-                         address   entry_point,
-                         address   osr_buf,
-                         TRAPS) {
-    ((ZeroEntry *) entry_point)->invoke_osr(method, osr_buf, THREAD);
-  }
-
- public:
-  static int expr_index_at(int i) {
-    return stackElementWords * i;
-  }
-
-  static int expr_offset_in_bytes(int i) {
-    return stackElementSize * i;
-  }
-
-  static int local_index_at(int i) {
-    assert(i <= 0, "local direction already negated");
-    return stackElementWords * i;
-  }
-
-#endif // CPU_ZERO_VM_INTERPRETER_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/cppInterpreterGenerator.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -167,16 +167,16 @@
     // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
     // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
     // They all allow an appendix argument.
-    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid);
+    return CppInterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid);
   case vmIntrinsics::_invokeBasic:
-    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic);
+    return CppInterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic);
   case vmIntrinsics::_linkToStatic:
   case vmIntrinsics::_linkToSpecial:
-    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial);
+    return CppInterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial);
   case vmIntrinsics::_linkToInterface:
-    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface);
+    return CppInterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface);
   case vmIntrinsics::_linkToVirtual:
-    return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual);
+    return CppInterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual);
   default:
     ShouldNotReachHere();
     return NULL;
--- a/hotspot/src/cpu/zero/vm/templateInterpreterGenerator_zero.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_TEMPLATEINTERPRETERGENERATOR_ZERO_HPP
-#define CPU_ZERO_VM_TEMPLATEINTERPRETERGENERATOR_ZERO_HPP
-
-// This file is intentionally empty
-
-#endif // CPU_ZERO_VM_TEMPLATEINTERPRETERGENERATOR_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/templateInterpreter_zero.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-
-// This file is intentionally empty
--- a/hotspot/src/cpu/zero/vm/templateInterpreter_zero.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_TEMPLATEINTERPRETER_ZERO_HPP
-#define CPU_ZERO_VM_TEMPLATEINTERPRETER_ZERO_HPP
-
-// This file is intentionally empty
-
-#endif // CPU_ZERO_VM_TEMPLATEINTERPRETER_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/templateTable_zero.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/templateTable.hpp"
-#include "memory/universe.inline.hpp"
-#include "oops/methodData.hpp"
-#include "oops/objArrayKlass.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-
-// This file is intentionally empty
--- a/hotspot/src/cpu/zero/vm/templateTable_zero.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_ZERO_VM_TEMPLATETABLE_ZERO_HPP
-#define CPU_ZERO_VM_TEMPLATETABLE_ZERO_HPP
-
-// This file is intentionally empty
-
-#endif // CPU_ZERO_VM_TEMPLATETABLE_ZERO_HPP
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -48,22 +48,13 @@
 //                                                      Also code for populating interpreter
 //                                                      frames created during deoptimization.
 //
-// For both template and c++ interpreter. There are common files for aspects of the interpreter
-// that are generic to both interpreters. This is the layout:
-//
-// abstractInterpreter.hpp: generic description of the interpreter.
-// interpreter*:            generic frame creation and handling.
-//
-
-//------------------------------------------------------------------------------------------------------------------------
-// The C++ interface to the bytecode interpreter(s).
 
 class InterpreterMacroAssembler;
 
 class AbstractInterpreter: AllStatic {
   friend class VMStructs;
-  friend class Interpreter;
   friend class CppInterpreterGenerator;
+  friend class TemplateInterpreterGenerator;
  public:
   enum MethodKind {
     zerolocals,                                                 // method needs locals initialization
@@ -128,7 +119,6 @@
   static address    _rethrow_exception_entry;                   // rethrows an activation in previous frame
 
   friend class      AbstractInterpreterGenerator;
-  friend class              InterpreterGenerator;
   friend class      InterpreterMacroAssembler;
 
  public:
@@ -213,6 +203,29 @@
   const static int stackElementSize    = stackElementWords * wordSize;
   const static int logStackElementSize = LogBytesPerWord;
 
+  static int expr_index_at(int i) {
+    return stackElementWords * i;
+  }
+
+  static int expr_offset_in_bytes(int i) {
+#if !defined(ZERO) && (defined(PPC) || defined(SPARC))
+    return stackElementSize * i + wordSize;  // both point to one word past TOS
+#else
+    return stackElementSize * i;
+#endif
+  }
+
+  static int local_index_at(int i) {
+    assert(i <= 0, "local direction already negated");
+    return stackElementWords * i;
+  }
+
+#if !defined(ZERO) && (defined(IA32) || defined(AMD64))
+  static Address::ScaleFactor stackElementScale() {
+    return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8);
+  }
+#endif
+
   // Local values relative to locals[n]
   static int  local_offset_in_bytes(int n) {
     return ((frame::interpreter_frame_expression_stack_direction() * n) * stackElementSize);
--- a/hotspot/src/share/vm/interpreter/bytecodeHistogram.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/bytecodeHistogram.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,6 @@
   NOT_PRODUCT(static int _counters[Bytecodes::number_of_codes];)   // a counter for each bytecode
 
   friend class TemplateInterpreterGenerator;
-  friend class         InterpreterGenerator;
   friend class         BytecodeInterpreter;
 
  public:
@@ -87,7 +86,6 @@
   NOT_PRODUCT(static int  _counters[number_of_pairs];)  // a counter for each pair
 
   friend class TemplateInterpreterGenerator;
-  friend class         InterpreterGenerator;
 
  public:
   // Initialization
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,6 @@
 friend class SharedRuntime;
 friend class AbstractInterpreterGenerator;
 friend class CppInterpreterGenerator;
-friend class InterpreterGenerator;
 friend class InterpreterMacroAssembler;
 friend class frame;
 friend class VMStructs;
@@ -572,24 +571,10 @@
 void print();
 #endif // PRODUCT
 
-    // Platform fields/methods
-#ifdef TARGET_ARCH_x86
-# include "bytecodeInterpreter_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "bytecodeInterpreter_sparc.hpp"
-#endif
 #ifdef TARGET_ARCH_zero
 # include "bytecodeInterpreter_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "bytecodeInterpreter_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "bytecodeInterpreter_ppc.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "bytecodeInterpreter_aarch64.hpp"
+#else
+#error "Only Zero Bytecode Interpreter is supported"
 #endif
 
 
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,24 +42,10 @@
 #define VERIFY_OOP(o)
 #endif
 
-// Platform dependent data manipulation
-#ifdef TARGET_ARCH_x86
-# include "bytecodeInterpreter_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "bytecodeInterpreter_sparc.inline.hpp"
-#endif
 #ifdef TARGET_ARCH_zero
 # include "bytecodeInterpreter_zero.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "bytecodeInterpreter_arm.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "bytecodeInterpreter_ppc.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "bytecodeInterpreter_aarch64.inline.hpp"
+#else
+#error "Only Zero Bytecode Interpreter is supported"
 #endif
 
 #endif // CC_INTERP
--- a/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/cppInterpreter.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,12 +24,17 @@
 
 #include "precompiled.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
+#include "interpreter/cppInterpreterGenerator.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 
 #ifdef CC_INTERP
-# define __ _masm->
+
+#ifdef ZERO
+# include "entry_zero.hpp"
+#else
+#error "Only Zero CppInterpreter is supported"
+#endif
 
 void CppInterpreter::initialize() {
   if (_code != NULL) return;
@@ -42,7 +47,7 @@
     NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
     _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
                           "Interpreter");
-    InterpreterGenerator g(_code);
+    CppInterpreterGenerator g(_code);
     if (PrintInterpreter) print();
   }
 
@@ -56,11 +61,20 @@
 }
 
 
-address    CppInterpreter::_tosca_to_stack         [AbstractInterpreter::number_of_result_handlers];
-address    CppInterpreter::_stack_to_stack         [AbstractInterpreter::number_of_result_handlers];
-address    CppInterpreter::_stack_to_native_abi    [AbstractInterpreter::number_of_result_handlers];
+void CppInterpreter::invoke_method(Method* method, address entry_point, TRAPS) {
+  ((ZeroEntry *) entry_point)->invoke(method, THREAD);
+}
+
+void CppInterpreter::invoke_osr(Method* method,
+                                address   entry_point,
+                                address   osr_buf,
+                                TRAPS) {
+  ((ZeroEntry *) entry_point)->invoke_osr(method, osr_buf, THREAD);
+}
+
 
 CppInterpreterGenerator::CppInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
+  generate_all();
 }
 
 static const BasicType types[Interpreter::number_of_result_handlers] = {
@@ -79,36 +93,8 @@
 void CppInterpreterGenerator::generate_all() {
   AbstractInterpreterGenerator::generate_all();
 
-  { CodeletMark cm(_masm, "result handlers for native calls");
-    // The various result converter stublets.
-    int is_generated[Interpreter::number_of_result_handlers];
-    memset(is_generated, 0, sizeof(is_generated));
-    int _tosca_to_stack_is_generated[Interpreter::number_of_result_handlers];
-    int _stack_to_stack_is_generated[Interpreter::number_of_result_handlers];
-    int _stack_to_native_abi_is_generated[Interpreter::number_of_result_handlers];
 
-    memset(_tosca_to_stack_is_generated, 0, sizeof(_tosca_to_stack_is_generated));
-    memset(_stack_to_stack_is_generated, 0, sizeof(_stack_to_stack_is_generated));
-    memset(_stack_to_native_abi_is_generated, 0, sizeof(_stack_to_native_abi_is_generated));
-    for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
-      BasicType type = types[i];
-      if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
-        Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
-      }
-      if (!_tosca_to_stack_is_generated[Interpreter::BasicType_as_index(type)]++) {
-        Interpreter::_tosca_to_stack[Interpreter::BasicType_as_index(type)] = generate_tosca_to_stack_converter(type);
-      }
-      if (!_stack_to_stack_is_generated[Interpreter::BasicType_as_index(type)]++) {
-        Interpreter::_stack_to_stack[Interpreter::BasicType_as_index(type)] = generate_stack_to_stack_converter(type);
-      }
-      if (!_stack_to_native_abi_is_generated[Interpreter::BasicType_as_index(type)]++) {
-        Interpreter::_stack_to_native_abi[Interpreter::BasicType_as_index(type)] = generate_stack_to_native_abi_converter(type);
-      }
-    }
-  }
-
-
-#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind)
+#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
 
   { CodeletMark cm(_masm, "(kind = frame_manager)");
     // all non-native method kinds
@@ -138,7 +124,63 @@
 
 
 #undef method_entry
+}
 
+InterpreterCodelet* CppInterpreter::codelet_containing(address pc) {
+  // FIXME: I'm pretty sure _code is null and this is never called, which is why it's copied.
+  return (InterpreterCodelet*)_code->stub_containing(pc);
 }
 
+// Generate method entries
+address CppInterpreterGenerator::generate_method_entry(
+                                        AbstractInterpreter::MethodKind kind) {
+  // determine code generation flags
+  bool native = false;
+  bool synchronized = false;
+  address entry_point = NULL;
+
+  switch (kind) {
+  case Interpreter::zerolocals             :                                          break;
+  case Interpreter::zerolocals_synchronized:                synchronized = true;      break;
+  case Interpreter::native                 : native = true;                           break;
+  case Interpreter::native_synchronized    : native = true; synchronized = true;      break;
+  case Interpreter::empty                  : entry_point = generate_empty_entry();    break;
+  case Interpreter::accessor               : entry_point = generate_accessor_entry(); break;
+  case Interpreter::abstract               : entry_point = generate_abstract_entry(); break;
+
+  case Interpreter::java_lang_math_sin     : // fall thru
+  case Interpreter::java_lang_math_cos     : // fall thru
+  case Interpreter::java_lang_math_tan     : // fall thru
+  case Interpreter::java_lang_math_abs     : // fall thru
+  case Interpreter::java_lang_math_log     : // fall thru
+  case Interpreter::java_lang_math_log10   : // fall thru
+  case Interpreter::java_lang_math_sqrt    : // fall thru
+  case Interpreter::java_lang_math_pow     : // fall thru
+  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
+  case Interpreter::java_lang_ref_reference_get
+                                           : entry_point = generate_Reference_get_entry(); break;
+  default:
+    fatal("unexpected method kind: %d", kind);
+    break;
+  }
+
+  if (entry_point) {
+    return entry_point;
+  }
+
+  // We expect the normal and native entry points to be generated first so we can reuse them.
+  if (native) {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
+    if (entry_point == NULL) {
+      entry_point = generate_native_entry(synchronized);
+    }
+  } else {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
+    if (entry_point == NULL) {
+      entry_point = generate_normal_entry(synchronized);
+    }
+  }
+
+  return entry_point;
+}
 #endif // CC_INTERP
--- a/hotspot/src/share/vm/interpreter/cppInterpreter.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/cppInterpreter.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,40 +26,24 @@
 #define SHARE_VM_INTERPRETER_CPPINTERPRETER_HPP
 
 #include "interpreter/abstractInterpreter.hpp"
+#ifdef CC_INTERP
 
-#ifdef CC_INTERP
+class InterpreterCodelet;
 
 // This file contains the platform-independent parts
 // of the c++ interpreter
 
 class CppInterpreter: public AbstractInterpreter {
   friend class VMStructs;
-  friend class Interpreter; // contains()
-  friend class InterpreterGenerator; // result handlers
-  friend class CppInterpreterGenerator; // result handlers
- public:
-
-
- protected:
-
-  // tosca result -> stack result
-  static address    _tosca_to_stack[number_of_result_handlers];  // converts tosca to C++ interpreter stack result
-  // stack result -> stack result
-  static address    _stack_to_stack[number_of_result_handlers];  // pass result between C++ interpreter calls
-  // stack result -> native abi result
-  static address    _stack_to_native_abi[number_of_result_handlers];  // converts C++ interpreter results to native abi
-
-  // this is to allow frame and only frame to use contains().
-  friend class      frame;
-
  public:
   // Initialization/debugging
   static void       initialize();
   // this only returns whether a pc is within generated code for the interpreter.
 
-  // This is a moderately dubious interface for the c++ interpreter. Only
+  // These are moderately dubious interfaces for the c++ interpreter. Only
   // frame code and debug.cpp should be using it.
   static bool       contains(address pc);
+  static InterpreterCodelet* codelet_containing(address pc);
 
  public:
 
@@ -68,38 +52,17 @@
   static void notice_safepoints() {}
   static void ignore_safepoints() {}
 
-  static address    native_result_to_tosca()                    { return (address)_native_abi_to_tosca; } // aka result handler
-  static address    tosca_result_to_stack()                     { return (address)_tosca_to_stack; }
-  static address    stack_result_to_stack()                     { return (address)_stack_to_stack; }
-  static address    stack_result_to_native()                    { return (address)_stack_to_native_abi; }
-
-  static address    native_result_to_tosca(int index)           { return _native_abi_to_tosca[index]; } // aka result handler
-  static address    tosca_result_to_stack(int index)            { return _tosca_to_stack[index]; }
-  static address    stack_result_to_stack(int index)            { return _stack_to_stack[index]; }
-  static address    stack_result_to_native(int index)           { return _stack_to_native_abi[index]; }
-
   static address    return_entry  (TosState state, int length, Bytecodes::Code code);
   static address    deopt_entry   (TosState state, int length);
 
-#ifdef TARGET_ARCH_x86
-# include "cppInterpreter_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "cppInterpreter_sparc.hpp"
-#endif
+  static void invoke_method(Method* method, address entry_point, TRAPS);
+  static void invoke_osr(Method* method,
+                         address   entry_point,
+                         address   osr_buf,
+                         TRAPS);
 #ifdef TARGET_ARCH_zero
 # include "cppInterpreter_zero.hpp"
 #endif
-#ifdef TARGET_ARCH_arm
-# include "cppInterpreter_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "cppInterpreter_ppc.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "cppInterpreter_aarch64.hpp"
-#endif
-
 
 };
 
--- a/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,46 +29,48 @@
 // of the template interpreter generator.
 
 #ifdef CC_INTERP
-#ifdef TARGET_ARCH_zero
+#ifdef ZERO
 # include "entry_zero.hpp"
 # include "interpreter/interp_masm.hpp"
 #endif
 
 class CppInterpreterGenerator: public AbstractInterpreterGenerator {
-  protected:
-  // shared code sequences
-  // Converter for native abi result to tosca result
-  address generate_result_handler_for(BasicType type);
-  address generate_tosca_to_stack_converter(BasicType type);
-  address generate_stack_to_stack_converter(BasicType type);
-  address generate_stack_to_native_abi_converter(BasicType type);
+
+ private:
+  void generate_all();
 
-  void generate_all();
+  address generate_method_entry(AbstractInterpreter::MethodKind kind);
+  address generate_normal_entry(bool synchronized);
+  address generate_native_entry(bool synchronized);
+  address generate_abstract_entry();
+  address generate_math_entry(AbstractInterpreter::MethodKind kind);
+  address generate_empty_entry();
+  address generate_accessor_entry();
+  address generate_Reference_get_entry();
 
  public:
   CppInterpreterGenerator(StubQueue* _code);
 
-#ifdef TARGET_ARCH_x86
-# include "cppInterpreterGenerator_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "cppInterpreterGenerator_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "cppInterpreterGenerator_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "cppInterpreterGenerator_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "cppInterpreterGenerator_ppc.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "cppInterpreterGenerator_aarch64.hpp"
-#endif
+#ifdef ZERO
+ protected:
+  MacroAssembler* assembler() const {
+    return _masm;
+  }
 
+ public:
+  static address generate_entry_impl(MacroAssembler* masm, address entry_point) {
+    ZeroEntry *entry = (ZeroEntry *) masm->pc();
+    masm->advance(sizeof(ZeroEntry));
+    entry->set_entry_point(entry_point);
+    return (address) entry;
+  }
+
+ protected:
+  address generate_entry(address entry_point) {
+    return generate_entry_impl(assembler(), entry_point);
+  }
+#endif // ZERO
 };
 
 #endif // CC_INTERP
-
 #endif // SHARE_VM_INTERPRETER_CPPINTERPRETERGENERATOR_HPP
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -29,7 +29,6 @@
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/templateTable.hpp"
@@ -282,7 +281,7 @@
   // Special intrinsic method?
   // Note: This test must come _after_ the test for native methods,
   //       otherwise we will run into problems with JDK 1.2, see also
-  //       InterpreterGenerator::generate_method_entry() for
+  //       TemplateInterpreterGenerator::generate_method_entry() for
   //       for details.
   switch (m->intrinsic_id()) {
     case vmIntrinsics::_dsin  : return java_lang_math_sin  ;
@@ -548,87 +547,3 @@
     Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
   }
 }
-
-// Generate method entries
-address InterpreterGenerator::generate_method_entry(
-                                        AbstractInterpreter::MethodKind kind) {
-  // determine code generation flags
-  bool native = false;
-  bool synchronized = false;
-  address entry_point = NULL;
-
-  switch (kind) {
-  case Interpreter::zerolocals             :                                          break;
-  case Interpreter::zerolocals_synchronized:                synchronized = true;      break;
-  case Interpreter::native                 : native = true;                           break;
-  case Interpreter::native_synchronized    : native = true; synchronized = true;      break;
-  case Interpreter::empty                  : entry_point = generate_empty_entry();    break;
-  case Interpreter::accessor               : entry_point = generate_accessor_entry(); break;
-  case Interpreter::abstract               : entry_point = generate_abstract_entry(); break;
-
-  case Interpreter::java_lang_math_sin     : // fall thru
-  case Interpreter::java_lang_math_cos     : // fall thru
-  case Interpreter::java_lang_math_tan     : // fall thru
-  case Interpreter::java_lang_math_abs     : // fall thru
-  case Interpreter::java_lang_math_log     : // fall thru
-  case Interpreter::java_lang_math_log10   : // fall thru
-  case Interpreter::java_lang_math_sqrt    : // fall thru
-  case Interpreter::java_lang_math_pow     : // fall thru
-  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
-  case Interpreter::java_lang_ref_reference_get
-                                           : entry_point = generate_Reference_get_entry(); break;
-#ifndef CC_INTERP
-  case Interpreter::java_util_zip_CRC32_update
-                                           : native = true; entry_point = generate_CRC32_update_entry();  break;
-  case Interpreter::java_util_zip_CRC32_updateBytes
-                                           : // fall thru
-  case Interpreter::java_util_zip_CRC32_updateByteBuffer
-                                           : native = true; entry_point = generate_CRC32_updateBytes_entry(kind); break;
-  case Interpreter::java_util_zip_CRC32C_updateBytes
-                                           : // fall thru
-  case Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer
-                                           : entry_point = generate_CRC32C_updateBytes_entry(kind); break;
-#if defined(TARGET_ARCH_x86) && !defined(_LP64)
-  // On x86_32 platforms, a special entry is generated for the following four methods.
-  // On other platforms the normal entry is used to enter these methods.
-  case Interpreter::java_lang_Float_intBitsToFloat
-                                           : native = true; entry_point = generate_Float_intBitsToFloat_entry(); break;
-  case Interpreter::java_lang_Float_floatToRawIntBits
-                                           : native = true; entry_point = generate_Float_floatToRawIntBits_entry(); break;
-  case Interpreter::java_lang_Double_longBitsToDouble
-                                           : native = true; entry_point = generate_Double_longBitsToDouble_entry(); break;
-  case Interpreter::java_lang_Double_doubleToRawLongBits
-                                           : native = true; entry_point = generate_Double_doubleToRawLongBits_entry(); break;
-#else
-  case Interpreter::java_lang_Float_intBitsToFloat:
-  case Interpreter::java_lang_Float_floatToRawIntBits:
-  case Interpreter::java_lang_Double_longBitsToDouble:
-  case Interpreter::java_lang_Double_doubleToRawLongBits:
-    native = true;
-    break;
-#endif // defined(TARGET_ARCH_x86) && !defined(_LP64)
-#endif // CC_INTERP
-  default:
-    fatal("unexpected method kind: %d", kind);
-    break;
-  }
-
-  if (entry_point) {
-    return entry_point;
-  }
-
-  // We expect the normal and native entry points to be generated first so we can reuse them.
-  if (native) {
-    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
-    if (entry_point == NULL) {
-      entry_point = generate_native_entry(synchronized);
-    }
-  } else {
-    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
-    if (entry_point == NULL) {
-      entry_point = generate_normal_entry(synchronized);
-    }
-  }
-
-  return entry_point;
-}
--- a/hotspot/src/share/vm/interpreter/interpreter.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/interpreter.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -29,9 +29,6 @@
 #include "interpreter/cppInterpreter.hpp"
 #include "interpreter/templateInterpreter.hpp"
 #include "memory/resourceArea.hpp"
-#ifdef TARGET_ARCH_zero
-# include "entry_zero.hpp"
-#endif
 
 // This file contains the platform-independent parts
 // of the interpreter and the interpreter generator.
@@ -116,34 +113,9 @@
   ~CodeletMark();
 };
 
-// Wrapper classes to produce Interpreter/InterpreterGenerator from either
+// Wrapper typedef to use the name Interpreter to mean either
 // the c++ interpreter or the template interpreter.
 
-class Interpreter: public CC_INTERP_ONLY(CppInterpreter) NOT_CC_INTERP(TemplateInterpreter) {
-
- public:
-  // Debugging/printing
-  static InterpreterCodelet* codelet_containing(address pc) { return (InterpreterCodelet*)_code->stub_containing(pc); }
-
-#ifdef TARGET_ARCH_x86
-# include "interpreter_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "interpreter_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "interpreter_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "interpreter_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "interpreter_ppc.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "interpreter_aarch64.hpp"
-#endif
-
-};
+typedef CC_INTERP_ONLY(CppInterpreter) NOT_CC_INTERP(TemplateInterpreter) Interpreter;
 
 #endif // SHARE_VM_INTERPRETER_INTERPRETER_HPP
--- a/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_INTERPRETER_INTERPRETERGENERATOR_HPP
-#define SHARE_VM_INTERPRETER_INTERPRETERGENERATOR_HPP
-
-#include "interpreter/cppInterpreter.hpp"
-#include "interpreter/cppInterpreterGenerator.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateInterpreter.hpp"
-#include "interpreter/templateInterpreterGenerator.hpp"
-
-// This file contains the platform-independent parts
-// of the interpreter generator.
-
-
-class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
-                                   NOT_CC_INTERP(TemplateInterpreterGenerator) {
-
- public:
-
-  InterpreterGenerator(StubQueue* _code);
-  // entry point generator
-  address generate_method_entry(AbstractInterpreter::MethodKind kind);
-
-#ifdef TARGET_ARCH_x86
-# include "interpreterGenerator_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "interpreterGenerator_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "interpreterGenerator_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "interpreterGenerator_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "interpreterGenerator_ppc.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "interpreterGenerator_aarch64.hpp"
-#endif
-
-
-};
-
-#endif // SHARE_VM_INTERPRETER_INTERPRETERGENERATOR_HPP
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -115,7 +115,7 @@
   static void    note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci);
   static void    note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci);
   static void    note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci);
-  // A dummy for makros that shall not profile traps.
+  // A dummy for macros that shall not profile traps.
   static void    note_no_trap(JavaThread* thread, Method *method, int trap_bci) {}
 #endif // CC_INTERP
 
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -25,10 +25,10 @@
 #include "precompiled.hpp"
 #include "code/codeCacheExtensions.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/templateInterpreter.hpp"
+#include "interpreter/templateInterpreterGenerator.hpp"
 #include "interpreter/templateTable.hpp"
 
 #ifndef CC_INTERP
@@ -59,7 +59,7 @@
 #endif
     _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
                           "Interpreter");
-    InterpreterGenerator g(_code);
+    TemplateInterpreterGenerator g(_code);
   }
   if (PrintInterpreter) {
     if (CodeCacheExtensions::saving_generated_interpreter() &&
@@ -222,6 +222,7 @@
 TemplateInterpreterGenerator::TemplateInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
   _unimplemented_bytecode    = NULL;
   _illegal_bytecode_sequence = NULL;
+  generate_all();
 }
 
 static const BasicType types[Interpreter::number_of_result_handlers] = {
@@ -392,7 +393,7 @@
 
 #define method_entry(kind)                                              \
       { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
-        Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind); \
+        Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
       }
 
       // all non-native method kinds
@@ -719,4 +720,89 @@
   }
 }
 
+InterpreterCodelet* TemplateInterpreter::codelet_containing(address pc) {
+  return (InterpreterCodelet*)_code->stub_containing(pc);
+}
+
+// Generate method entries
+address TemplateInterpreterGenerator::generate_method_entry(
+                                        AbstractInterpreter::MethodKind kind) {
+  // determine code generation flags
+  bool native = false;
+  bool synchronized = false;
+  address entry_point = NULL;
+
+  switch (kind) {
+  case Interpreter::zerolocals             :                                          break;
+  case Interpreter::zerolocals_synchronized:                synchronized = true;      break;
+  case Interpreter::native                 : native = true;                           break;
+  case Interpreter::native_synchronized    : native = true; synchronized = true;      break;
+  case Interpreter::empty                  : break;
+  case Interpreter::accessor               : break;
+  case Interpreter::abstract               : entry_point = generate_abstract_entry(); break;
+
+  case Interpreter::java_lang_math_sin     : // fall thru
+  case Interpreter::java_lang_math_cos     : // fall thru
+  case Interpreter::java_lang_math_tan     : // fall thru
+  case Interpreter::java_lang_math_abs     : // fall thru
+  case Interpreter::java_lang_math_log     : // fall thru
+  case Interpreter::java_lang_math_log10   : // fall thru
+  case Interpreter::java_lang_math_sqrt    : // fall thru
+  case Interpreter::java_lang_math_pow     : // fall thru
+  case Interpreter::java_lang_math_exp     : entry_point = generate_math_entry(kind);      break;
+  case Interpreter::java_lang_ref_reference_get
+                                           : entry_point = generate_Reference_get_entry(); break;
+  case Interpreter::java_util_zip_CRC32_update
+                                           : native = true; entry_point = generate_CRC32_update_entry();  break;
+  case Interpreter::java_util_zip_CRC32_updateBytes
+                                           : // fall thru
+  case Interpreter::java_util_zip_CRC32_updateByteBuffer
+                                           : native = true; entry_point = generate_CRC32_updateBytes_entry(kind); break;
+  case Interpreter::java_util_zip_CRC32C_updateBytes
+                                           : // fall thru
+  case Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer
+                                           : entry_point = generate_CRC32C_updateBytes_entry(kind); break;
+#ifdef IA32
+  // On x86_32 platforms, a special entry is generated for the following four methods.
+  // On other platforms the normal entry is used to enter these methods.
+  case Interpreter::java_lang_Float_intBitsToFloat
+                                           : native = true; entry_point = generate_Float_intBitsToFloat_entry(); break;
+  case Interpreter::java_lang_Float_floatToRawIntBits
+                                           : native = true; entry_point = generate_Float_floatToRawIntBits_entry(); break;
+  case Interpreter::java_lang_Double_longBitsToDouble
+                                           : native = true; entry_point = generate_Double_longBitsToDouble_entry(); break;
+  case Interpreter::java_lang_Double_doubleToRawLongBits
+                                           : native = true; entry_point = generate_Double_doubleToRawLongBits_entry(); break;
+#else
+  case Interpreter::java_lang_Float_intBitsToFloat:
+  case Interpreter::java_lang_Float_floatToRawIntBits:
+  case Interpreter::java_lang_Double_longBitsToDouble:
+  case Interpreter::java_lang_Double_doubleToRawLongBits:
+    native = true;
+    break;
+#endif // defined(TARGET_ARCH_x86) && !defined(_LP64)
+  default:
+    fatal("unexpected method kind: %d", kind);
+    break;
+  }
+
+  if (entry_point) {
+    return entry_point;
+  }
+
+  // We expect the normal and native entry points to be generated first so we can reuse them.
+  if (native) {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native);
+    if (entry_point == NULL) {
+      entry_point = generate_native_entry(synchronized);
+    }
+  } else {
+    entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals);
+    if (entry_point == NULL) {
+      entry_point = generate_normal_entry(synchronized);
+    }
+  }
+
+  return entry_point;
+}
 #endif // !CC_INTERP
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -34,6 +34,7 @@
 #ifndef CC_INTERP
 
 class InterpreterMacroAssembler;
+class InterpreterCodelet;
 
 //------------------------------------------------------------------------------------------------------------------------
 // A little wrapper class to group tosca-specific entry points into a unit.
@@ -85,7 +86,6 @@
   friend class VMStructs;
   friend class InterpreterMacroAssembler;
   friend class TemplateInterpreterGenerator;
-  friend class InterpreterGenerator;
   friend class TemplateTable;
   friend class CodeCacheExtensions;
   // friend class Interpreter;
@@ -137,6 +137,9 @@
   static void       initialize();
   // this only returns whether a pc is within generated code for the interpreter.
   static bool       contains(address pc)                        { return _code != NULL && _code->contains(pc); }
+  // Debugging/printing
+  static InterpreterCodelet* codelet_containing(address pc);
+
 
  public:
 
@@ -188,26 +191,15 @@
   // Compute the address for reexecution
   static address deopt_reexecute_entry(Method* method, address bcp);
 
-#ifdef TARGET_ARCH_x86
-# include "templateInterpreter_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "templateInterpreter_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "templateInterpreter_zero.hpp"
+  // Size of interpreter code.  Max size with JVMTI
+  static int InterpreterCodeSize;
+
+#ifdef PPC
+ public:
+  // PPC-only: Support abs and sqrt like in compiler.
+  // For others we can use a normal (native) entry.
+  static bool math_entry_available(AbstractInterpreter::MethodKind kind);
 #endif
-#ifdef TARGET_ARCH_arm
-# include "templateInterpreter_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "templateInterpreter_ppc.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "templateInterpreter_aarch64.hpp"
-#endif
-
-
 };
 
 #endif // !CC_INTERP
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,29 +82,51 @@
 
   void generate_all();
 
+  // entry point generator
+  address generate_method_entry(AbstractInterpreter::MethodKind kind);
+
+  address generate_normal_entry(bool synchronized);
+  address generate_native_entry(bool synchronized);
+  address generate_abstract_entry(void);
+  address generate_math_entry(AbstractInterpreter::MethodKind kind);
+  address generate_Reference_get_entry();
+  address generate_CRC32_update_entry();
+  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
+  address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind);
+#ifdef IA32
+  address generate_Float_intBitsToFloat_entry();
+  address generate_Float_floatToRawIntBits_entry();
+  address generate_Double_longBitsToDouble_entry();
+  address generate_Double_doubleToRawLongBits_entry();
+#endif // IA32
+  void generate_stack_overflow_check(void);
+
+  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
+  void generate_counter_overflow(Label& continue_entry);
+
+  void generate_fixed_frame(bool native_call);
+#ifdef SPARC
+  void generate_stack_overflow_check(Register Rframe_size, Register Rscratch,
+                                     Register Rscratch2);
+  void save_native_result(void);
+  void restore_native_result(void);
+#endif // SPARC
+
+#ifdef AARCH64
+  void bang_stack_shadow_pages(bool native_call);
+  void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
+#endif // AARCH64
+
+#ifdef PPC
+  void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
+  void unlock_method(bool check_exceptions = true);
+
+  void generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals);
+  void generate_stack_overflow_check(Register Rframe_size, Register Rscratch1);
+#endif // PPC
+
  public:
   TemplateInterpreterGenerator(StubQueue* _code);
-
-#ifdef TARGET_ARCH_x86
-# include "templateInterpreterGenerator_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "templateInterpreterGenerator_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "templateInterpreterGenerator_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "templateInterpreterGenerator_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "templateInterpreterGenerator_ppc.hpp"
-#endif
-#ifdef TARGET_ARCH_aarch64
-# include "templateInterpreterGenerator_aarch64.hpp"
-#endif
-
-
 };
 
 #endif // !CC_INTERP
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/interpreter/templateTable.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -355,8 +355,6 @@
 # include "templateTable_x86.hpp"
 #elif defined TARGET_ARCH_MODEL_sparc
 # include "templateTable_sparc.hpp"
-#elif defined TARGET_ARCH_MODEL_zero
-# include "templateTable_zero.hpp"
 #elif defined TARGET_ARCH_MODEL_ppc_64
 # include "templateTable_ppc_64.hpp"
 #elif defined TARGET_ARCH_MODEL_aarch64
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -31,6 +31,12 @@
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
 
+#ifdef TARGET_ARCH_zero
+# include "entry_zero.hpp"
+#endif
+
+
+
 class MacroAssembler;
 class Label;
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Tue Dec 22 11:11:29 2015 -0500
@@ -4197,7 +4197,7 @@
     UseBiasedLocking = false;
   }
 
-#ifdef ZERO
+#ifdef CC_INTERP
   // Clear flags not supported on zero.
   FLAG_SET_DEFAULT(ProfileInterpreter, false);
   FLAG_SET_DEFAULT(UseBiasedLocking, false);
--- a/hotspot/src/share/vm/runtime/frame.inline.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/runtime/frame.inline.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,13 +50,6 @@
   return is_entry_frame() && entry_frame_is_first();
 }
 
-#ifdef CC_INTERP
-inline oop* frame::interpreter_frame_temp_oop_addr() const {
-  interpreterState istate = get_interpreterState();
-  return (oop *)&istate->_oop_temp;
-}
-#endif // CC_INTERP
-
 // here are the platform-dependent bodies:
 
 #ifdef TARGET_ARCH_x86
--- a/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp	Tue Dec 22 11:03:37 2015 +0100
+++ b/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp	Tue Dec 22 11:11:29 2015 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,6 @@
 friend class StubAssembler;
 friend class CallRuntimeDirectNode;
 friend class MacroAssembler;
-friend class InterpreterGenerator;
 friend class LIR_Assembler;
 friend class GraphKit;
 friend class StubGenerator;