Merge
authorjprovino
Mon, 07 Dec 2015 17:04:42 +0000
changeset 34657 c2e1cc3f503f
parent 34512 1fcd0a08036b (current diff)
parent 34655 d48cb26b07c2 (diff)
child 34658 7ffe56626bc2
Merge
hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
hotspot/src/os/aix/vm/thread_aix.inline.hpp
hotspot/src/os/bsd/vm/thread_bsd.inline.hpp
hotspot/src/os/linux/vm/thread_linux.inline.hpp
hotspot/src/os/solaris/vm/thread_solaris.inline.hpp
hotspot/src/os/windows/vm/thread_windows.inline.hpp
hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.cpp
hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp
hotspot/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp
hotspot/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.hpp
hotspot/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.cpp
hotspot/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.hpp
hotspot/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp
hotspot/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp
hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.cpp
hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.hpp
hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp
hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.hpp
hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp
hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp
hotspot/src/os_cpu/linux_zero/vm/threadLS_linux_zero.cpp
hotspot/src/os_cpu/linux_zero/vm/threadLS_linux_zero.hpp
hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp
hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp
hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp
hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp
hotspot/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp
hotspot/src/os_cpu/windows_x86/vm/threadLS_windows_x86.hpp
hotspot/src/share/vm/gc/g1/g1RootClosures.inline.hpp
hotspot/src/share/vm/prims/jvm.cpp
hotspot/src/share/vm/runtime/java.cpp
hotspot/src/share/vm/runtime/threadLocalStorage.cpp
hotspot/test/runtime/logging/SafepointTestMain.java
hotspot/test/runtime/logging/VMOperationTestMain.java
hotspot/test/testlibrary/jdk/test/lib/Platform.java
--- a/hotspot/make/aix/makefiles/xlc.make	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/make/aix/makefiles/xlc.make	Mon Dec 07 17:04:42 2015 +0000
@@ -74,6 +74,9 @@
 CFLAGS += -qnortti
 CFLAGS += -qnoeh
 
+# for compiler-level tls
+CFLAGS += -qtls=default
+
 CFLAGS += -D_REENTRANT
 # no xlc counterpart for -fcheck-new
 # CFLAGS += -fcheck-new
--- a/hotspot/make/linux/makefiles/gcc.make	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/make/linux/makefiles/gcc.make	Mon Dec 07 17:04:42 2015 +0000
@@ -260,6 +260,9 @@
 
 OPT_CFLAGS = $(OPT_CFLAGS/$(OPT_CFLAGS_DEFAULT)) $(OPT_EXTRAS)
 
+# Variable tracking size limit exceeded for VMStructs::init() 
+OPT_CFLAGS/vmStructs.o += -fno-var-tracking-assignments
+
 # The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp
 # if we use expensive-optimizations
 ifeq ($(BUILDARCH), ia64)
--- a/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -39,7 +39,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -259,20 +258,3 @@
 
   return entry_point;
 }
-
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -41,6 +41,7 @@
 #include "runtime/icache.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
 
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -4653,3 +4654,23 @@
     BIND(DONE);
       sub(result, result, len); // Return index where we stopped
 }
+
+// get_thread() can be called anywhere inside generated code so we
+// need to save whatever non-callee save context might get clobbered
+// by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
+// the call setup code.
+//
+// aarch64_get_thread_helper() clobbers only r0, r1, and flags.
+//
+void MacroAssembler::get_thread(Register dst) {
+  RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
+  push(saved_regs, sp);
+
+  mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
+  blrt(lr, 1, 0, 1);
+  if (dst != c_rarg0) {
+    mov(dst, c_rarg0);
+  }
+
+  pop(saved_regs, sp);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,1925 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "interpreter/bytecodeTracer.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include <sys/types.h>
+
+#ifndef PRODUCT
+#include "oops/method.hpp"
+#endif // !PRODUCT
+
+#ifdef BUILTIN_SIM
+#include "../../../../../../simulator/simulator.hpp"
+#endif
+
+#define __ _masm->
+
+#ifndef CC_INTERP
+
+//-----------------------------------------------------------------------------
+
+extern "C" void entry(CodeBuffer*);
+
+//-----------------------------------------------------------------------------
+
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ ldr(rscratch1, Address(rfp,
+                       frame::interpreter_frame_monitor_block_top_offset *
+                       wordSize));
+    __ mov(rscratch2, sp);
+    __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
+                           // grows negative)
+    __ br(Assembler::HS, L); // check if frame is complete
+    __ stop ("interpreter frame not set up");
+    __ bind(L);
+  }
+#endif // ASSERT
+  // Restore bcp under the assumption that the current frame is still
+  // interpreted
+  __ restore_bcp();
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // throw exception
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::throw_StackOverflowError));
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
+        const char* name) {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  // ??? convention: expect aberrant index in register r1
+  __ movw(c_rarg2, r1);
+  __ mov(c_rarg1, (address)name);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ArrayIndexOutOfBoundsException),
+             c_rarg1, c_rarg2);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+
+  // object is at TOS
+  __ pop(c_rarg1);
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ClassCastException),
+             c_rarg1);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(
+        const char* name, const char* message, bool pass_oop) {
+  assert(!pass_oop || message == NULL, "either oop or message but not both");
+  address entry = __ pc();
+  if (pass_oop) {
+    // object is at TOS
+    __ pop(c_rarg2);
+  }
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  __ lea(c_rarg1, Address((address)name));
+  if (pass_oop) {
+    __ call_VM(r0, CAST_FROM_FN_PTR(address,
+                                    InterpreterRuntime::
+                                    create_klass_exception),
+               c_rarg1, c_rarg2);
+  } else {
+    // kind of lame ExternalAddress can't take NULL because
+    // external_word_Relocation will assert.
+    if (message != NULL) {
+      __ lea(c_rarg2, Address((address)message));
+    } else {
+      __ mov(c_rarg2, NULL_WORD);
+    }
+    __ call_VM(r0,
+               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
+               c_rarg1, c_rarg2);
+  }
+  // throw exception
+  __ b(address(Interpreter::throw_exception_entry()));
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  // NULL last_sp until next java call
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ dispatch_next(state);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+  // Restore stack bottom in case i2c adjusted stack
+  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // and NULL it as marker that esp is now tos until next java call
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ restore_bcp();
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ get_method(rmethod);
+
+  // Pop N words from the stack
+  __ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
+  __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask);
+
+  __ add(esp, esp, r1, Assembler::LSL, 3);
+
+  // Restore machine SP
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
+  __ andr(sp, rscratch1, -16);
+
+#ifndef PRODUCT
+  // tell the simulator that the method has been reentered
+  if (NotifySimulator) {
+    __ notify(Assembler::method_reentry);
+  }
+#endif
+  __ get_dispatch();
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
+                                                               int step) {
+  address entry = __ pc();
+  __ restore_bcp();
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ get_method(rmethod);
+
+  // handle exceptions
+  {
+    Label L;
+    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
+    __ cbz(rscratch1, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  __ get_dispatch();
+
+  // Calculate stack limit
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
+  __ andr(sp, rscratch1, -16);
+
+  // Restore expression stack pointer
+  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // NULL last_sp until next java call
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_result_handler_for(
+        BasicType type) {
+    address entry = __ pc();
+  switch (type) {
+  case T_BOOLEAN: __ uxtb(r0, r0);        break;
+  case T_CHAR   : __ uxth(r0, r0);       break;
+  case T_BYTE   : __ sxtb(r0, r0);        break;
+  case T_SHORT  : __ sxth(r0, r0);        break;
+  case T_INT    : __ uxtw(r0, r0);        break;  // FIXME: We almost certainly don't need this
+  case T_LONG   : /* nothing to do */        break;
+  case T_VOID   : /* nothing to do */        break;
+  case T_FLOAT  : /* nothing to do */        break;
+  case T_DOUBLE : /* nothing to do */        break;
+  case T_OBJECT :
+    // retrieve result from frame
+    __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
+    // and verify it
+    __ verify_oop(r0);
+    break;
+  default       : ShouldNotReachHere();
+  }
+  __ ret(lr);                                  // return from result handler
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(
+        TosState state,
+        address runtime_entry) {
+  address entry = __ pc();
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ membar(Assembler::AnyAny);
+  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
+  return entry;
+}
+
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test
+//
+// rmethod: method
+//
+void InterpreterGenerator::generate_counter_incr(
+        Label* overflow,
+        Label* profile_method,
+        Label* profile_method_continue) {
+  Label done;
+  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
+  if (TieredCompilation) {
+    int increment = InvocationCounter::count_increment;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      // Are we profiling?
+      __ ldr(r0, Address(rmethod, Method::method_data_offset()));
+      __ cbz(r0, no_mdo);
+      // Increment counter in the MDO
+      const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
+                                                in_bytes(InvocationCounter::counter_offset()));
+      const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
+      __ b(done);
+    }
+    __ bind(no_mdo);
+    // Increment counter in MethodCounters
+    const Address invocation_counter(rscratch2,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+    __ get_method_counters(rmethod, rscratch2, done);
+    const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
+    __ bind(done);
+  } else { // not TieredCompilation
+    const Address backedge_counter(rscratch2,
+                  MethodCounters::backedge_counter_offset() +
+                  InvocationCounter::counter_offset());
+    const Address invocation_counter(rscratch2,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+
+    __ get_method_counters(rmethod, rscratch2, done);
+
+    if (ProfileInterpreter) { // %%% Merge this into MethodData*
+      __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
+      __ addw(r1, r1, 1);
+      __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
+    }
+    // Update standard invocation counters
+    __ ldrw(r1, invocation_counter);
+    __ ldrw(r0, backedge_counter);
+
+    __ addw(r1, r1, InvocationCounter::count_increment);
+    __ andw(r0, r0, InvocationCounter::count_mask_value);
+
+    __ strw(r1, invocation_counter);
+    __ addw(r0, r0, r1);                // add both counters
+
+    // profile_method is non-null only for interpreted method so
+    // profile_method != NULL == !native_call
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
+      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
+      __ cmpw(r0, rscratch2);
+      __ br(Assembler::LT, *profile_method_continue);
+
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(r0, *profile_method);
+    }
+
+    {
+      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
+      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
+      __ cmpw(r0, rscratch2);
+      __ br(Assembler::HS, *overflow);
+    }
+    __ bind(done);
+  }
+}
+
+void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
+
+  // Asm interpreter on entry
+  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
+  // Everything as it was on entry
+
+  // InterpreterRuntime::frequency_counter_overflow takes two
+  // arguments, the first (thread) is passed by call_VM, the second
+  // indicates if the counter overflow occurs at a backwards branch
+  // (NULL bcp).  We pass zero for it.  The call returns the address
+  // of the verified entry point for the method or NULL if the
+  // compilation did not complete (either went background or bailed
+  // out).
+  __ mov(c_rarg1, 0);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::frequency_counter_overflow),
+             c_rarg1);
+
+  __ b(*do_continue);
+}
+
+// See if we've got enough room on the stack for locals plus overhead.
+// The expression stack grows down incrementally, so the normal guard
+// page mechanism will work for that.
+//
+// NOTE: Since the additional locals are also always pushed (wasn't
+// obvious in generate_method_entry) so the guard should work for them
+// too.
+//
+// Args:
+//      r3: number of additional locals this frame needs (what we must check)
+//      rmethod: Method*
+//
+// Kills:
+//      r0
+void InterpreterGenerator::generate_stack_overflow_check(void) {
+
+  // monitor entry size: see picture of stack set
+  // (generate_method_entry) and frame_amd64.hpp
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+  // total overhead size: entry_size + (saved rbp through expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
+
+  const int page_size = os::vm_page_size();
+
+  Label after_frame_check;
+
+  // see if the frame is greater than one page in size. If so,
+  // then we need to verify there is enough stack space remaining
+  // for the additional locals.
+  //
+  // Note that we use SUBS rather than CMP here because the immediate
+  // field of this instruction may overflow.  SUBS can cope with this
+  // because it is a macro that will expand to some number of MOV
+  // instructions and a register operation.
+  __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize);
+  __ br(Assembler::LS, after_frame_check);
+
+  // compute rsp as if this were going to be the last frame on
+  // the stack before the red zone
+
+  const Address stack_base(rthread, Thread::stack_base_offset());
+  const Address stack_size(rthread, Thread::stack_size_offset());
+
+  // locals + overhead, in bytes
+  __ mov(r0, overhead_size);
+  __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize);  // 2 slots per parameter.
+
+  __ ldr(rscratch1, stack_base);
+  __ ldr(rscratch2, stack_size);
+
+#ifdef ASSERT
+  Label stack_base_okay, stack_size_okay;
+  // verify that thread stack base is non-zero
+  __ cbnz(rscratch1, stack_base_okay);
+  __ stop("stack base is zero");
+  __ bind(stack_base_okay);
+  // verify that thread stack size is non-zero
+  __ cbnz(rscratch2, stack_size_okay);
+  __ stop("stack size is zero");
+  __ bind(stack_size_okay);
+#endif
+
+  // Add stack base to locals and subtract stack size
+  __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
+  __ add(r0, r0, rscratch1);
+
+  // Use the maximum number of pages we might bang.
+  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+                                                                              (StackRedPages+StackYellowPages);
+
+  // add in the red and yellow zone sizes
+  __ add(r0, r0, max_pages * page_size * 2);
+
+  // check against the current stack bottom
+  __ cmp(sp, r0);
+  __ br(Assembler::HI, after_frame_check);
+
+  // Remove the incoming args, peeling the machine SP back to where it
+  // was in the caller.  This is not strictly necessary, but unless we
+  // do so the stack frame may have a garbage FP; this ensures a
+  // correct call stack that we can always unwind.  The ANDR should be
+  // unnecessary because the sender SP in r13 is always aligned, but
+  // it doesn't hurt.
+  __ andr(sp, r13, -16);
+
+  // Note: the restored frame is not necessarily interpreted.
+  // Use the shared runtime version of the StackOverflowError.
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
+  __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
+
+  // all done with frame size check
+  __ bind(after_frame_check);
+}
+
+// Allocate monitor and lock method (asm interpreter)
+//
+// Args:
+//      rmethod: Method*
+//      rlocals: locals
+//
+// Kills:
+//      r0
+//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
+//      rscratch1, rscratch2 (scratch regs)
+void TemplateInterpreterGenerator::lock_method() {
+  // synchronize method
+  const Address access_flags(rmethod, Method::access_flags_offset());
+  const Address monitor_block_top(
+        rfp,
+        frame::interpreter_frame_monitor_block_top_offset * wordSize);
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ ldrw(r0, access_flags);
+    __ tst(r0, JVM_ACC_SYNCHRONIZED);
+    __ br(Assembler::NE, L);
+    __ stop("method doesn't need synchronization");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  // get synchronization object
+  {
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    Label done;
+    __ ldrw(r0, access_flags);
+    __ tst(r0, JVM_ACC_STATIC);
+    // get receiver (assume this is frequent case)
+    __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
+    __ br(Assembler::EQ, done);
+    __ ldr(r0, Address(rmethod, Method::const_offset()));
+    __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
+    __ ldr(r0, Address(r0,
+                           ConstantPool::pool_holder_offset_in_bytes()));
+    __ ldr(r0, Address(r0, mirror_offset));
+
+#ifdef ASSERT
+    {
+      Label L;
+      __ cbnz(r0, L);
+      __ stop("synchronization object is NULL");
+      __ bind(L);
+    }
+#endif // ASSERT
+
+    __ bind(done);
+  }
+
+  // add space for monitor & lock
+  __ sub(sp, sp, entry_size); // add space for a monitor entry
+  __ sub(esp, esp, entry_size);
+  __ mov(rscratch1, esp);
+  __ str(rscratch1, monitor_block_top);  // set new monitor block top
+  // store object
+  __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
+  __ mov(c_rarg1, esp); // object address
+  __ lock_object(c_rarg1);
+}
+
+// Generate a fixed interpreter frame. This is identical setup for
+// interpreted methods and for native methods hence the shared code.
+//
+// Args:
+//      lr: return address
+//      rmethod: Method*
+//      rlocals: pointer to locals
+//      rcpool: cp cache
+//      stack_pointer: previous sp
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
+  // initialize fixed part of activation frame
+  if (native_call) {
+    __ sub(esp, sp, 12 *  wordSize);
+    __ mov(rbcp, zr);
+    __ stp(esp, zr, Address(__ pre(sp, -12 * wordSize)));
+    // add 2 zero-initialized slots for native calls
+    __ stp(zr, zr, Address(sp, 10 * wordSize));
+  } else {
+    __ sub(esp, sp, 10 *  wordSize);
+    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));      // get ConstMethod
+    __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
+    __ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize)));
+  }
+
+  if (ProfileInterpreter) {
+    Label method_data_continue;
+    __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
+    __ cbz(rscratch1, method_data_continue);
+    __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
+    __ bind(method_data_continue);
+    __ stp(rscratch1, rmethod, Address(sp, 4 * wordSize));  // save Method* and mdp (method data pointer)
+  } else {
+    __ stp(zr, rmethod, Address(sp, 4 * wordSize));        // save Method* (no mdp)
+  }
+
+  __ ldr(rcpool, Address(rmethod, Method::const_offset()));
+  __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
+  __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
+  __ stp(rlocals, rcpool, Address(sp, 2 * wordSize));
+
+  __ stp(rfp, lr, Address(sp, 8 * wordSize));
+  __ lea(rfp, Address(sp, 8 * wordSize));
+
+  // set sender sp
+  // leave last_sp as null
+  __ stp(zr, r13, Address(sp, 6 * wordSize));
+
+  // Move SP out of the way
+  if (! native_call) {
+    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+    __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+    __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
+    __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
+    __ andr(sp, rscratch1, -16);
+  }
+}
+
+// End of helpers
+
+// Various method entries
+//------------------------------------------------------------------------------------------------------------------------
+//
+//
+
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#if INCLUDE_ALL_GCS
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+  //
+  // rmethod: Method*
+  // r13: senderSP must preserve for slow path, set SP to it on fast path
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+    Label slow_path;
+    const Register local_0 = c_rarg0;
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ ldr(local_0, Address(esp, 0));
+    __ cbz(local_0, slow_path);
+
+
+    // Load the value of the referent field.
+    const Address field_address(local_0, referent_offset);
+    __ load_heap_oop(local_0, field_address);
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    __ enter(); // g1_write may call runtime
+    __ g1_write_barrier_pre(noreg /* obj */,
+                            local_0 /* pre_val */,
+                            rthread /* thread */,
+                            rscratch2 /* tmp */,
+                            true /* tosca_live */,
+                            true /* expand_call */);
+    __ leave();
+    // areturn
+    __ andr(sp, r13, -16);  // done with stack
+    __ ret(lr);
+
+    // generate a vanilla interpreter entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
+    return entry;
+  }
+#endif // INCLUDE_ALL_GCS
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rmethod: Method*
+    // r13: senderSP must preserved for slow path
+    // esp: args
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    unsigned long offset;
+    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
+    __ ldrw(rscratch1, Address(rscratch1, offset));
+    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
+    __ cbnz(rscratch1, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register val = c_rarg1;  // source java byte value
+    const Register tbl = c_rarg2;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ ldrw(val, Address(esp, 0));              // byte value
+    __ ldrw(crc, Address(esp, wordSize));       // Initial CRC
+
+    __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
+    __ add(tbl, tbl, offset);
+
+    __ ornw(crc, zr, crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ ornw(crc, zr, crc); // ~crc
+
+    // result in c_rarg0
+
+    __ andr(sp, r13, -16);
+    __ ret(lr);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rmethod,: Method*
+    // r13: senderSP must preserved for slow path
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    unsigned long offset;
+    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
+    __ ldrw(rscratch1, Address(rscratch1, offset));
+    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
+    __ cbnz(rscratch1, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register buf = c_rarg1;  // source java byte array address
+    const Register len = c_rarg2;  // length
+    const Register off = len;      // offset (never overlaps with 'len')
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ ldr(buf, Address(esp, 2*wordSize)); // long buf
+      __ ldrw(off, Address(esp, wordSize)); // offset
+      __ add(buf, buf, off); // + offset
+      __ ldrw(crc,   Address(esp, 4*wordSize)); // Initial CRC
+    } else {
+      __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
+      __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ ldrw(off, Address(esp, wordSize)); // offset
+      __ add(buf, buf, off); // + offset
+      __ ldrw(crc,   Address(esp, 3*wordSize)); // Initial CRC
+    }
+    // Can now load 'len' since we're finished with 'off'
+    __ ldrw(len, Address(esp, 0x0)); // Length
+
+    __ andr(sp, r13, -16); // Restore the caller's SP
+
+    // We are frameless so we can just jump to the stub.
+    __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
+  // Bang each page in the shadow zone. We can't assume it's been done for
+  // an interpreter frame with greater than a page of locals, so each page
+  // needs to be checked.  Only true for non-native.
+  if (UseStackBanging) {
+    const int start_page = native_call ? StackShadowPages : 1;
+    const int page_size = os::vm_page_size();
+    for (int pages = start_page; pages <= StackShadowPages ; pages++) {
+      __ sub(rscratch2, sp, pages*page_size);
+      __ str(zr, Address(rscratch2));
+    }
+  }
+}
+
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+address InterpreterGenerator::generate_native_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // r1: Method*
+  // rscratch1: sender sp
+
+  address entry_point = __ pc();
+
+  const Address constMethod       (rmethod, Method::const_offset());
+  const Address access_flags      (rmethod, Method::access_flags_offset());
+  const Address size_of_parameters(r2, ConstMethod::
+                                       size_of_parameters_offset());
+
+  // get parameter size (always needed)
+  __ ldr(r2, constMethod);
+  __ load_unsigned_short(r2, size_of_parameters);
+
+  // native calls don't need the stack size check since they have no
+  // expression stack and the arguments are already on the stack and
+  // we only add a handful of words to the stack
+
+  // rmethod: Method*
+  // r2: size of parameters
+  // rscratch1: sender sp
+
+  // for natives the size of locals is zero
+
+  // compute beginning of parameters (rlocals)
+  __ add(rlocals, esp, r2, ext::uxtx, 3);
+  __ add(rlocals, rlocals, -wordSize);
+
+  // Pull SP back to minimum size: this avoids holes in the stack
+  __ andr(sp, esp, -16);
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(true);
+#ifndef PRODUCT
+  // tell the simulator that a method has been entered
+  if (NotifySimulator) {
+    __ notify(Assembler::method_entry);
+  }
+#endif
+
+  // make sure method is native & not abstract
+#ifdef ASSERT
+  __ ldrw(r0, access_flags);
+  {
+    Label L;
+    __ tst(r0, JVM_ACC_NATIVE);
+    __ br(Assembler::NE, L);
+    __ stop("tried to execute non-native method as native");
+    __ bind(L);
+  }
+  {
+    Label L;
+    __ tst(r0, JVM_ACC_ABSTRACT);
+    __ br(Assembler::EQ, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception
+  // handler would try to exit the monitor of synchronized methods
+  // which hasn't been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation
+  // will check this flag.
+
+   const Address do_not_unlock_if_synchronized(rthread,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ mov(rscratch2, true);
+  __ strb(rscratch2, do_not_unlock_if_synchronized);
+
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  bang_stack_shadow_pages(true);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ strb(zr, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ ldrw(r0, access_flags);
+      __ tst(r0, JVM_ACC_SYNCHRONIZED);
+      __ br(Assembler::EQ, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+    const Address monitor_block_top(rfp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ ldr(rscratch1, monitor_block_top);
+    __ cmp(esp, rscratch1);
+    __ br(Assembler::EQ, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  // work registers
+  const Register t = r17;
+  const Register result_handler = r19;
+
+  // allocate space for parameters
+  __ ldr(t, Address(rmethod, Method::const_offset()));
+  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
+
+  __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize);
+  __ andr(sp, rscratch1, -16);
+  __ mov(esp, rscratch1);
+
+  // get signature handler
+  {
+    Label L;
+    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
+    __ cbnz(t, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               rmethod);
+    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
+    __ bind(L);
+  }
+
+  // call signature handler
+  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
+          "adjust this code");
+
+  // The generated handlers do not touch rmethod (the method).
+  // However, large signatures cannot be cached and are generated
+  // each time here.  The slow-path generator can do a GC on return,
+  // so we must reload it after the call.
+  __ blr(t);
+  __ get_method(rmethod);        // slow path can do a GC, reload rmethod
+
+
+  // result handler is in r0
+  // set result handler
+  __ mov(result_handler, r0);
+  // pass mirror handle if static call
+  {
+    Label L;
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
+    __ tst(t, JVM_ACC_STATIC);
+    __ br(Assembler::EQ, L);
+    // get mirror
+    __ ldr(t, Address(rmethod, Method::const_offset()));
+    __ ldr(t, Address(t, ConstMethod::constants_offset()));
+    __ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
+    __ ldr(t, Address(t, mirror_offset));
+    // copy mirror into activation frame
+    __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
+    // pass handle to mirror
+    __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
+    __ bind(L);
+  }
+
+  // get native function entry point in r10
+  {
+    Label L;
+    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
+    address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
+    __ mov(rscratch2, unsatisfied);
+    __ ldr(rscratch2, rscratch2);
+    __ cmp(r10, rscratch2);
+    __ br(Assembler::NE, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               rmethod);
+    __ get_method(rmethod);
+    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
+    __ bind(L);
+  }
+
+  // pass JNIEnv
+  __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
+
+  // It is enough that the pc() points into the right code
+  // segment. It does not have to be the correct return pc.
+  __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1);
+
+  // change thread state
+#ifdef ASSERT
+  {
+    Label L;
+    __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
+    __ cmp(t, _thread_in_Java);
+    __ br(Assembler::EQ, L);
+    __ stop("Wrong thread state in native stub");
+    __ bind(L);
+  }
+#endif
+
+  // Change state to native
+  __ mov(rscratch1, _thread_in_native);
+  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
+  __ stlrw(rscratch1, rscratch2);
+
+  // Call the native method.
+  __ blrt(r10, rscratch1);
+  __ maybe_isb();
+  __ get_method(rmethod);
+  // result potentially in r0 or v0
+
+  // make room for the pushes we're about to do
+  __ sub(rscratch1, esp, 4 * wordSize);
+  __ andr(sp, rscratch1, -16);
+
+  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
+  // in order to extract the result of a method call. If the order of these
+  // pushes change or anything else is added to the stack then the code in
+  // interpreter_frame_result must also change.
+  __ push(dtos);
+  __ push(ltos);
+
+  // change thread state
+  __ mov(rscratch1, _thread_in_native_trans);
+  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
+  __ stlrw(rscratch1, rscratch2);
+
+  if (os::is_MP()) {
+    if (UseMembar) {
+      // Force this write out before the read below
+      __ dsb(Assembler::SY);
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(rthread, rscratch2);
+    }
+  }
+
+  // check for safepoint operation in progress and/or pending suspend requests
+  {
+    Label Continue;
+    {
+      unsigned long offset;
+      __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
+      __ ldrw(rscratch2, Address(rscratch2, offset));
+    }
+    assert(SafepointSynchronize::_not_synchronized == 0,
+           "SafepointSynchronize::_not_synchronized");
+    Label L;
+    __ cbnz(rscratch2, L);
+    __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
+    __ cbz(rscratch2, Continue);
+    __ bind(L);
+
+    // Don't use call_VM as it will see a possible pending exception
+    // and forward it and never return here preventing us from
+    // clearing _last_native_pc down below. So we do a runtime call by
+    // hand.
+    //
+    __ mov(c_rarg0, rthread);
+    __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
+    __ blrt(rscratch2, 1, 0, 0);
+    __ maybe_isb();
+    __ get_method(rmethod);
+    __ reinit_heapbase();
+    __ bind(Continue);
+  }
+
+  // change thread state
+  __ mov(rscratch1, _thread_in_Java);
+  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
+  __ stlrw(rscratch1, rscratch2);
+
+  // reset_last_Java_frame
+  __ reset_last_Java_frame(true, true);
+
+  // reset handle block
+  __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
+  __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
+
+  // If result is an oop unbox and store it in frame where gc will see it
+  // and result handler will pick it up
+
+  {
+    Label no_oop, store_result;
+    __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
+    __ cmp(t, result_handler);
+    __ br(Assembler::NE, no_oop);
+    // retrieve result
+    __ pop(ltos);
+    __ cbz(r0, store_result);
+    __ ldr(r0, Address(r0, 0));
+    __ bind(store_result);
+    __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
+    // keep stack depth as expected by pushing oop which will eventually be discarded
+    __ push(ltos);
+    __ bind(no_oop);
+  }
+
+  {
+    Label no_reguard;
+    __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
+    __ ldrb(rscratch1, Address(rscratch1));
+    __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled);
+    __ br(Assembler::NE, no_reguard);
+
+    __ pusha(); // XXX only save smashed registers
+    __ mov(c_rarg0, rthread);
+    __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
+    __ blrt(rscratch2, 0, 0, 0);
+    __ popa(); // XXX only restore smashed registers
+    __ bind(no_reguard);
+  }
+
+  // The method register is junk from after the thread_in_native transition
+  // until here.  Also can't call_VM until the bcp has been
+  // restored.  Need bcp for throwing exception below so get it now.
+  __ get_method(rmethod);
+
+  // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
+  // rbcp == code_base()
+  __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
+  __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
+  // handle exceptions (exception handling will handle unlocking!)
+  {
+    Label L;
+    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
+    __ cbz(rscratch1, L);
+    // Note: At some point we may want to unify this with the code
+    // used in call_VM_base(); i.e., we should use the
+    // StubRoutines::forward_exception code. For now this doesn't work
+    // here because the rsp is not correctly set at this point.
+    __ MacroAssembler::call_VM(noreg,
+                               CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  // do unlocking if necessary
+  {
+    Label L;
+    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
+    __ tst(t, JVM_ACC_SYNCHRONIZED);
+    __ br(Assembler::EQ, L);
+    // the code below should be shared with interpreter macro
+    // assembler implementation
+    {
+      Label unlock;
+      // BasicObjectLock will be first in list, since this is a
+      // synchronized method. However, need to check that the object
+      // has not been unlocked by an explicit monitorexit bytecode.
+
+      // monitor expect in c_rarg1 for slow unlock path
+      __ lea (c_rarg1, Address(rfp,   // address of first monitor
+                               (intptr_t)(frame::interpreter_frame_initial_sp_offset *
+                                          wordSize - sizeof(BasicObjectLock))));
+
+      __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
+      __ cbnz(t, unlock);
+
+      // Entry already unlocked, need to throw exception
+      __ MacroAssembler::call_VM(noreg,
+                                 CAST_FROM_FN_PTR(address,
+                   InterpreterRuntime::throw_illegal_monitor_state_exception));
+      __ should_not_reach_here();
+
+      __ bind(unlock);
+      __ unlock_object(c_rarg1);
+    }
+    __ bind(L);
+  }
+
+  // jvmti support
+  // Note: This must happen _after_ handling/throwing any exceptions since
+  //       the exception handler code notifies the runtime of method exits
+  //       too. If this happens before, method entry/exit notifications are
+  //       not properly paired (was bug - gri 11/22/99).
+  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
+
+  // restore potential result in r0:d0, call result handler to
+  // restore potential result in ST0 & handle result
+
+  __ pop(ltos);
+  __ pop(dtos);
+
+  __ blr(result_handler);
+
+  // remove activation
+  __ ldr(esp, Address(rfp,
+                    frame::interpreter_frame_sender_sp_offset *
+                    wordSize)); // get sender sp
+  // remove frame anchor
+  __ leave();
+
+  // resture sender sp
+  __ mov(sp, esp);
+
+  __ ret(lr);
+
+  if (inc_counter) {
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//
+// Generic interpreted method entry to (asm) interpreter
+//
+address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // rscratch1: sender sp
+  address entry_point = __ pc();
+
+  const Address constMethod(rmethod, Method::const_offset());
+  const Address access_flags(rmethod, Method::access_flags_offset());
+  const Address size_of_parameters(r3,
+                                   ConstMethod::size_of_parameters_offset());
+  const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
+
+  // get parameter size (always needed)
+  // need to load the const method first
+  __ ldr(r3, constMethod);
+  __ load_unsigned_short(r2, size_of_parameters);
+
+  // r2: size of parameters
+
+  __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
+  __ sub(r3, r3, r2); // r3 = no. of additional locals
+
+  // see if we've got enough room on the stack for locals plus overhead.
+  generate_stack_overflow_check();
+
+  // compute beginning of parameters (rlocals)
+  __ add(rlocals, esp, r2, ext::uxtx, 3);
+  __ sub(rlocals, rlocals, wordSize);
+
+  // Make room for locals
+  __ sub(rscratch1, esp, r3, ext::uxtx, 3);
+  __ andr(sp, rscratch1, -16);
+
+  // r3 - # of additional locals
+  // allocate space for locals
+  // explicitly initialize locals
+  {
+    Label exit, loop;
+    __ ands(zr, r3, r3);
+    __ br(Assembler::LE, exit); // do nothing if r3 <= 0
+    __ bind(loop);
+    __ str(zr, Address(__ post(rscratch1, wordSize)));
+    __ sub(r3, r3, 1); // until everything initialized
+    __ cbnz(r3, loop);
+    __ bind(exit);
+  }
+
+  // And the base dispatch table
+  __ get_dispatch();
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(false);
+#ifndef PRODUCT
+  // tell the simulator that a method has been entered
+  if (NotifySimulator) {
+    __ notify(Assembler::method_entry);
+  }
+#endif
+  // make sure method is not native & not abstract
+#ifdef ASSERT
+  __ ldrw(r0, access_flags);
+  {
+    Label L;
+    __ tst(r0, JVM_ACC_NATIVE);
+    __ br(Assembler::EQ, L);
+    __ stop("tried to execute native method as non-native");
+    __ bind(L);
+  }
+ {
+    Label L;
+    __ tst(r0, JVM_ACC_ABSTRACT);
+    __ br(Assembler::EQ, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception
+  // handler would try to exit the monitor of synchronized methods
+  // which hasn't been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation
+  // will check this flag.
+
+   const Address do_not_unlock_if_synchronized(rthread,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ mov(rscratch2, true);
+  __ strb(rscratch2, do_not_unlock_if_synchronized);
+
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  Label profile_method;
+  Label profile_method_continue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow,
+                          &profile_method,
+                          &profile_method_continue);
+    if (ProfileInterpreter) {
+      __ bind(profile_method_continue);
+    }
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  bang_stack_shadow_pages(false);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ strb(zr, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    // Allocate monitor and lock method
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ ldrw(r0, access_flags);
+      __ tst(r0, JVM_ACC_SYNCHRONIZED);
+      __ br(Assembler::EQ, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+     const Address monitor_block_top (rfp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ ldr(rscratch1, monitor_block_top);
+    __ cmp(esp, rscratch1);
+    __ br(Assembler::EQ, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  __ dispatch_next(vtos);
+
+  // invocation counter overflow
+  if (inc_counter) {
+    if (ProfileInterpreter) {
+      // We have decided to profile this method in the interpreter
+      __ bind(profile_method);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      // don't think we need this
+      __ get_method(r1);
+      __ b(profile_method_continue);
+    }
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//-----------------------------------------------------------------------------
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() {
+  // Entry point in previous activation (i.e., if the caller was
+  // interpreted)
+  Interpreter::_rethrow_exception_entry = __ pc();
+  // Restore sp to interpreter_frame_last_sp even though we are going
+  // to empty the expression stack for the exception processing.
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // r0: exception
+  // r3: return address/pc that threw exception
+  __ restore_bcp();    // rbcp points to call/send
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ reinit_heapbase();  // restore rheapbase as heapbase.
+  __ get_dispatch();
+
+#ifndef PRODUCT
+  // tell the simulator that the caller method has been reentered
+  if (NotifySimulator) {
+    __ get_method(rmethod);
+    __ notify(Assembler::method_reentry);
+  }
+#endif
+  // Entry point for exceptions thrown within interpreter code
+  Interpreter::_throw_exception_entry = __ pc();
+  // If we came here via a NullPointerException on the receiver of a
+  // method, rmethod may be corrupt.
+  __ get_method(rmethod);
+  // expression stack is undefined here
+  // r0: exception
+  // rbcp: exception bcp
+  __ verify_oop(r0);
+  __ mov(c_rarg1, r0);
+
+  // expression stack must be empty before entering the VM in case of
+  // an exception
+  __ empty_expression_stack();
+  // find exception handler address and preserve exception oop
+  __ call_VM(r3,
+             CAST_FROM_FN_PTR(address,
+                          InterpreterRuntime::exception_handler_for_exception),
+             c_rarg1);
+
+  // Calculate stack limit
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
+  __ andr(sp, rscratch1, -16);
+
+  // r0: exception handler entry point
+  // r3: preserved exception oop
+  // rbcp: bcp for exception handler
+  __ push_ptr(r3); // push exception which is now the only value on the stack
+  __ br(r0); // jump to exception handler (may be _remove_activation_entry!)
+
+  // If the exception is not handled in the current frame the frame is
+  // removed and the exception is rethrown (i.e. exception
+  // continuation is _rethrow_exception).
+  //
+  // Note: At this point the bci is still the bxi for the instruction
+  // which caused the exception and the expression stack is
+  // empty. Thus, for any VM calls at this point, GC will find a legal
+  // oop map (with empty expression stack).
+
+  //
+  // JVMTI PopFrame support
+  //
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  __ empty_expression_stack();
+  // Set the popframe_processing bit in pending_popframe_condition
+  // indicating that we are currently handling popframe, so that
+  // call_VMs that may happen later do not trigger new popframe
+  // handling cycles.
+  __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
+  __ orr(r3, r3, JavaThread::popframe_processing_bit);
+  __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
+
+  {
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    //
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label caller_not_deoptimized;
+    __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::interpreter_contains), c_rarg1);
+    __ cbnz(r0, caller_not_deoptimized);
+
+    // Compute size of arguments for saving when returning to
+    // deoptimized caller
+    __ get_method(r0);
+    __ ldr(r0, Address(r0, Method::const_offset()));
+    __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
+                                                    size_of_parameters_offset())));
+    __ lsl(r0, r0, Interpreter::logStackElementSize);
+    __ restore_locals(); // XXX do we need this?
+    __ sub(rlocals, rlocals, r0);
+    __ add(rlocals, rlocals, wordSize);
+    // Save these arguments
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                                           Deoptimization::
+                                           popframe_preserve_args),
+                          rthread, r0, rlocals);
+
+    __ remove_activation(vtos,
+                         /* throw_monitor_exception */ false,
+                         /* install_monitor_exception */ false,
+                         /* notify_jvmdi */ false);
+
+    // Inform deoptimization that it is responsible for restoring
+    // these arguments
+    __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
+    __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
+
+    // Continue in deoptimization handler
+    __ ret(lr);
+
+    __ bind(caller_not_deoptimized);
+  }
+
+  __ remove_activation(vtos,
+                       /* throw_monitor_exception */ false,
+                       /* install_monitor_exception */ false,
+                       /* notify_jvmdi */ false);
+
+  // Restore the last_sp and null it out
+  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+
+  __ restore_bcp();
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ get_method(rmethod);
+
+  // The method data pointer was incremented already during
+  // call profiling. We have to restore the mdp for the current bcp.
+  if (ProfileInterpreter) {
+    __ set_method_data_pointer_for_bcp();
+  }
+
+  // Clear the popframe condition flag
+  __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset()));
+  assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
+
+#if INCLUDE_JVMTI
+  {
+    Label L_done;
+
+    __ ldrb(rscratch1, Address(rbcp, 0));
+    __ cmpw(r1, Bytecodes::_invokestatic);
+    __ br(Assembler::EQ, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ ldr(c_rarg0, Address(rlocals, 0));
+    __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
+
+    __ cbz(r0, L_done);
+
+    __ str(r0, Address(esp, 0));
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
+  // Restore machine SP
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
+  __ andr(sp, rscratch1, -16);
+
+  __ dispatch_next(vtos);
+  // end of PopFrame support
+
+  Interpreter::_remove_activation_entry = __ pc();
+
+  // preserve exception over this code sequence
+  __ pop_ptr(r0);
+  __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
+  // remove the activation (without doing throws on illegalMonitorExceptions)
+  __ remove_activation(vtos, false, true, false);
+  // restore exception
+  // restore exception
+  __ get_vm_result(r0, rthread);
+
+  // In between activations - previous activation type unknown yet
+  // compute continuation point - the continuation point expects the
+  // following registers set up:
+  //
+  // r0: exception
+  // lr: return address/pc that threw exception
+  // rsp: expression stack of caller
+  // rfp: fp of caller
+  // FIXME: There's no point saving LR here because VM calls don't trash it
+  __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                          SharedRuntime::exception_handler_for_return_address),
+                        rthread, lr);
+  __ mov(r1, r0);                               // save exception handler
+  __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
+  // We might be returning to a deopt handler that expects r3 to
+  // contain the exception pc
+  __ mov(r3, lr);
+  // Note that an "issuing PC" is actually the next PC after the call
+  __ br(r1);                                    // jump to exception
+                                                // handler of caller
+}
+
+
+//
+// JVMTI ForceEarlyReturn support
+//
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+  address entry = __ pc();
+
+  __ restore_bcp();
+  __ restore_locals();
+  __ empty_expression_stack();
+  __ load_earlyret_value(state);
+
+  __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
+  Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
+
+  // Clear the earlyret state
+  assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
+  __ str(zr, cond_addr);
+
+  __ remove_activation(state,
+                       false, /* throw_monitor_exception */
+                       false, /* install_monitor_exception */
+                       true); /* notify_jvmdi */
+  __ ret(lr);
+
+  return entry;
+} // end of ForceEarlyReturn support
+
+
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+                                                         address& bep,
+                                                         address& cep,
+                                                         address& sep,
+                                                         address& aep,
+                                                         address& iep,
+                                                         address& lep,
+                                                         address& fep,
+                                                         address& dep,
+                                                         address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+  aep = __ pc();  __ push_ptr();  __ b(L);
+  fep = __ pc();  __ push_f();    __ b(L);
+  dep = __ pc();  __ push_d();    __ b(L);
+  lep = __ pc();  __ push_l();    __ b(L);
+  bep = cep = sep =
+  iep = __ pc();  __ push_i();
+  vep = __ pc();
+  __ bind(L);
+  generate_and_dispatch(t);
+}
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+  : TemplateInterpreterGenerator(code) {
+   generate_all(); // down here so it can be "virtual"
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  address entry = __ pc();
+
+  __ push(lr);
+  __ push(state);
+  __ push(RegSet::range(r0, r15), sp);
+  __ mov(c_rarg2, r0);  // Pass itos
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
+             c_rarg1, c_rarg2, c_rarg3);
+  __ pop(RegSet::range(r0, r15), sp);
+  __ pop(state);
+  __ pop(lr);
+  __ ret(lr);                                   // return from result handler
+
+  return entry;
+}
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  Register rscratch3 = r0;
+  __ push(rscratch1);
+  __ push(rscratch2);
+  __ push(rscratch3);
+  Label L;
+  __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
+  __ bind(L);
+  __ ldxr(rscratch1, rscratch2);
+  __ add(rscratch1, rscratch1, 1);
+  __ stxr(rscratch3, rscratch1, rscratch2);
+  __ cbnzw(rscratch3, L);
+  __ pop(rscratch3);
+  __ pop(rscratch2);
+  __ pop(rscratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
+
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+
+  assert(Interpreter::trace_code(t->tos_in()) != NULL,
+         "entry must have been generated");
+  __ bl(Interpreter::trace_code(t->tos_in()));
+  __ reinit_heapbase();
+}
+
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  Label L;
+  __ push(rscratch1);
+  __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
+  __ ldr(rscratch1, Address(rscratch1));
+  __ mov(rscratch2, StopInterpreterAt);
+  __ cmpw(rscratch1, rscratch2);
+  __ br(Assembler::NE, L);
+  __ brk(0);
+  __ bind(L);
+  __ pop(rscratch1);
+}
+
+#ifdef BUILTIN_SIM
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+extern "C" {
+  static int PAGESIZE = getpagesize();
+  int is_mapped_address(u_int64_t address)
+  {
+    address = (address & ~((u_int64_t)PAGESIZE - 1));
+    if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) {
+      return true;
+    }
+    if (errno != ENOMEM) {
+      return true;
+    }
+    return false;
+  }
+
+  void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
+  {
+    if (method != 0) {
+      method[0] = '\0';
+    }
+    if (bcidx != 0) {
+      *bcidx = -2;
+    }
+    if (decode != 0) {
+      decode[0] = 0;
+    }
+
+    if (framesize != 0) {
+      *framesize = -1;
+    }
+
+    if (Interpreter::contains((address)pc)) {
+      AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
+      Method* meth;
+      address bcp;
+      if (fp) {
+#define FRAME_SLOT_METHOD 3
+#define FRAME_SLOT_BCP 7
+        meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3));
+        bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3));
+#undef FRAME_SLOT_METHOD
+#undef FRAME_SLOT_BCP
+      } else {
+        meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0);
+        bcp = (address)sim->getCPUState().xreg(RBCP, 0);
+      }
+      if (meth->is_native()) {
+        return;
+      }
+      if(method && meth->is_method()) {
+        ResourceMark rm;
+        method[0] = 'I';
+        method[1] = ' ';
+        meth->name_and_sig_as_C_string(method + 2, 398);
+      }
+      if (bcidx) {
+        if (meth->contains(bcp)) {
+          *bcidx = meth->bci_from(bcp);
+        } else {
+          *bcidx = -2;
+        }
+      }
+      if (decode) {
+        if (!BytecodeTracer::closure()) {
+          BytecodeTracer::set_closure(BytecodeTracer::std_closure());
+        }
+        stringStream str(decode, 400);
+        BytecodeTracer::trace(meth, bcp, &str);
+      }
+    } else {
+      if (method) {
+        CodeBlob *cb = CodeCache::find_blob((address)pc);
+        if (cb != NULL) {
+          if (cb->is_nmethod()) {
+            ResourceMark rm;
+            nmethod* nm = (nmethod*)cb;
+            method[0] = 'C';
+            method[1] = ' ';
+            nm->method()->name_and_sig_as_C_string(method + 2, 398);
+          } else if (cb->is_adapter_blob()) {
+            strcpy(method, "B adapter blob");
+          } else if (cb->is_runtime_stub()) {
+            strcpy(method, "B runtime stub");
+          } else if (cb->is_exception_stub()) {
+            strcpy(method, "B exception stub");
+          } else if (cb->is_deoptimization_stub()) {
+            strcpy(method, "B deoptimization stub");
+          } else if (cb->is_safepoint_stub()) {
+            strcpy(method, "B safepoint stub");
+          } else if (cb->is_uncommon_trap_stub()) {
+            strcpy(method, "B uncommon trap stub");
+          } else if (cb->contains((address)StubRoutines::call_stub())) {
+            strcpy(method, "B call stub");
+          } else {
+            strcpy(method, "B unknown blob : ");
+            strcat(method, cb->name());
+          }
+          if (framesize != NULL) {
+            *framesize = cb->frame_size();
+          }
+        }
+      }
+    }
+  }
+
+
+  JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
+  {
+    bccheck1(pc, fp, method, bcidx, framesize, decode);
+  }
+}
+
+#endif // BUILTIN_SIM
+#endif // !PRODUCT
+#endif // ! CC_INTERP
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -24,238 +24,12 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "interpreter/bytecodeTracer.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include <sys/types.h>
-
-#ifndef PRODUCT
+#include "oops/constMethod.hpp"
 #include "oops/method.hpp"
-#endif // !PRODUCT
-
-#ifdef BUILTIN_SIM
-#include "../../../../../../simulator/simulator.hpp"
-#endif
-
-#define __ _masm->
-
-#ifndef CC_INTERP
-
-//-----------------------------------------------------------------------------
-
-extern "C" void entry(CodeBuffer*);
-
-//-----------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ ldr(rscratch1, Address(rfp,
-                       frame::interpreter_frame_monitor_block_top_offset *
-                       wordSize));
-    __ mov(rscratch2, sp);
-    __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
-                           // grows negative)
-    __ br(Assembler::HS, L); // check if frame is complete
-    __ stop ("interpreter frame not set up");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // Restore bcp under the assumption that the current frame is still
-  // interpreted
-  __ restore_bcp();
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // throw exception
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_StackOverflowError));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
-        const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  // ??? convention: expect aberrant index in register r1
-  __ movw(c_rarg2, r1);
-  __ mov(c_rarg1, (address)name);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ArrayIndexOutOfBoundsException),
-             c_rarg1, c_rarg2);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-
-  // object is at TOS
-  __ pop(c_rarg1);
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ClassCastException),
-             c_rarg1);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(
-        const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  if (pass_oop) {
-    // object is at TOS
-    __ pop(c_rarg2);
-  }
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  __ lea(c_rarg1, Address((address)name));
-  if (pass_oop) {
-    __ call_VM(r0, CAST_FROM_FN_PTR(address,
-                                    InterpreterRuntime::
-                                    create_klass_exception),
-               c_rarg1, c_rarg2);
-  } else {
-    // kind of lame ExternalAddress can't take NULL because
-    // external_word_Relocation will assert.
-    if (message != NULL) {
-      __ lea(c_rarg2, Address((address)message));
-    } else {
-      __ mov(c_rarg2, NULL_WORD);
-    }
-    __ call_VM(r0,
-               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
-               c_rarg1, c_rarg2);
-  }
-  // throw exception
-  __ b(address(Interpreter::throw_exception_entry()));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ dispatch_next(state);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  // Restore stack bottom in case i2c adjusted stack
-  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // and NULL it as marker that esp is now tos until next java call
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ restore_bcp();
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ get_method(rmethod);
-
-  // Pop N words from the stack
-  __ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
-  __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask);
-
-  __ add(esp, esp, r1, Assembler::LSL, 3);
-
-  // Restore machine SP
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
-  __ andr(sp, rscratch1, -16);
-
-#ifndef PRODUCT
-  // tell the simulator that the method has been reentered
-  if (NotifySimulator) {
-    __ notify(Assembler::method_reentry);
-  }
-#endif
-  __ get_dispatch();
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
-                                                               int step) {
-  address entry = __ pc();
-  __ restore_bcp();
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ get_method(rmethod);
-
-  // handle exceptions
-  {
-    Label L;
-    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
-    __ cbz(rscratch1, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  __ get_dispatch();
-
-  // Calculate stack limit
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
-  __ andr(sp, rscratch1, -16);
-
-  // Restore expression stack pointer
-  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // NULL last_sp until next java call
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-
-  __ dispatch_next(state, step);
-  return entry;
-}
+#include "runtime/frame.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
 
 int AbstractInterpreter::BasicType_as_index(BasicType type) {
@@ -279,1195 +53,6 @@
   return i;
 }
 
-
-address TemplateInterpreterGenerator::generate_result_handler_for(
-        BasicType type) {
-    address entry = __ pc();
-  switch (type) {
-  case T_BOOLEAN: __ uxtb(r0, r0);        break;
-  case T_CHAR   : __ uxth(r0, r0);       break;
-  case T_BYTE   : __ sxtb(r0, r0);        break;
-  case T_SHORT  : __ sxth(r0, r0);        break;
-  case T_INT    : __ uxtw(r0, r0);        break;  // FIXME: We almost certainly don't need this
-  case T_LONG   : /* nothing to do */        break;
-  case T_VOID   : /* nothing to do */        break;
-  case T_FLOAT  : /* nothing to do */        break;
-  case T_DOUBLE : /* nothing to do */        break;
-  case T_OBJECT :
-    // retrieve result from frame
-    __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
-    // and verify it
-    __ verify_oop(r0);
-    break;
-  default       : ShouldNotReachHere();
-  }
-  __ ret(lr);                                  // return from result handler
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(
-        TosState state,
-        address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ membar(Assembler::AnyAny);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-  return entry;
-}
-
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// rmethod: method
-//
-void InterpreterGenerator::generate_counter_incr(
-        Label* overflow,
-        Label* profile_method,
-        Label* profile_method_continue) {
-  Label done;
-  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
-  if (TieredCompilation) {
-    int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // Are we profiling?
-      __ ldr(r0, Address(rmethod, Method::method_data_offset()));
-      __ cbz(r0, no_mdo);
-      // Increment counter in the MDO
-      const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
-                                                in_bytes(InvocationCounter::counter_offset()));
-      const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
-      __ b(done);
-    }
-    __ bind(no_mdo);
-    // Increment counter in MethodCounters
-    const Address invocation_counter(rscratch2,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-    __ get_method_counters(rmethod, rscratch2, done);
-    const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    const Address backedge_counter(rscratch2,
-                  MethodCounters::backedge_counter_offset() +
-                  InvocationCounter::counter_offset());
-    const Address invocation_counter(rscratch2,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rmethod, rscratch2, done);
-
-    if (ProfileInterpreter) { // %%% Merge this into MethodData*
-      __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
-      __ addw(r1, r1, 1);
-      __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
-    }
-    // Update standard invocation counters
-    __ ldrw(r1, invocation_counter);
-    __ ldrw(r0, backedge_counter);
-
-    __ addw(r1, r1, InvocationCounter::count_increment);
-    __ andw(r0, r0, InvocationCounter::count_mask_value);
-
-    __ strw(r1, invocation_counter);
-    __ addw(r0, r0, r1);                // add both counters
-
-    // profile_method is non-null only for interpreted method so
-    // profile_method != NULL == !native_call
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
-      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
-      __ cmpw(r0, rscratch2);
-      __ br(Assembler::LT, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(r0, *profile_method);
-    }
-
-    {
-      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
-      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
-      __ cmpw(r0, rscratch2);
-      __ br(Assembler::HS, *overflow);
-    }
-    __ bind(done);
-  }
-}
-
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
-
-  // Asm interpreter on entry
-  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
-  // Everything as it was on entry
-
-  // InterpreterRuntime::frequency_counter_overflow takes two
-  // arguments, the first (thread) is passed by call_VM, the second
-  // indicates if the counter overflow occurs at a backwards branch
-  // (NULL bcp).  We pass zero for it.  The call returns the address
-  // of the verified entry point for the method or NULL if the
-  // compilation did not complete (either went background or bailed
-  // out).
-  __ mov(c_rarg1, 0);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::frequency_counter_overflow),
-             c_rarg1);
-
-  __ b(*do_continue);
-}
-
-// See if we've got enough room on the stack for locals plus overhead.
-// The expression stack grows down incrementally, so the normal guard
-// page mechanism will work for that.
-//
-// NOTE: Since the additional locals are also always pushed (wasn't
-// obvious in generate_method_entry) so the guard should work for them
-// too.
-//
-// Args:
-//      r3: number of additional locals this frame needs (what we must check)
-//      rmethod: Method*
-//
-// Kills:
-//      r0
-void InterpreterGenerator::generate_stack_overflow_check(void) {
-
-  // monitor entry size: see picture of stack set
-  // (generate_method_entry) and frame_amd64.hpp
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-  // total overhead size: entry_size + (saved rbp through expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
-
-  const int page_size = os::vm_page_size();
-
-  Label after_frame_check;
-
-  // see if the frame is greater than one page in size. If so,
-  // then we need to verify there is enough stack space remaining
-  // for the additional locals.
-  //
-  // Note that we use SUBS rather than CMP here because the immediate
-  // field of this instruction may overflow.  SUBS can cope with this
-  // because it is a macro that will expand to some number of MOV
-  // instructions and a register operation.
-  __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize);
-  __ br(Assembler::LS, after_frame_check);
-
-  // compute rsp as if this were going to be the last frame on
-  // the stack before the red zone
-
-  const Address stack_base(rthread, Thread::stack_base_offset());
-  const Address stack_size(rthread, Thread::stack_size_offset());
-
-  // locals + overhead, in bytes
-  __ mov(r0, overhead_size);
-  __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize);  // 2 slots per parameter.
-
-  __ ldr(rscratch1, stack_base);
-  __ ldr(rscratch2, stack_size);
-
-#ifdef ASSERT
-  Label stack_base_okay, stack_size_okay;
-  // verify that thread stack base is non-zero
-  __ cbnz(rscratch1, stack_base_okay);
-  __ stop("stack base is zero");
-  __ bind(stack_base_okay);
-  // verify that thread stack size is non-zero
-  __ cbnz(rscratch2, stack_size_okay);
-  __ stop("stack size is zero");
-  __ bind(stack_size_okay);
-#endif
-
-  // Add stack base to locals and subtract stack size
-  __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
-  __ add(r0, r0, rscratch1);
-
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-
-  // add in the red and yellow zone sizes
-  __ add(r0, r0, max_pages * page_size * 2);
-
-  // check against the current stack bottom
-  __ cmp(sp, r0);
-  __ br(Assembler::HI, after_frame_check);
-
-  // Remove the incoming args, peeling the machine SP back to where it
-  // was in the caller.  This is not strictly necessary, but unless we
-  // do so the stack frame may have a garbage FP; this ensures a
-  // correct call stack that we can always unwind.  The ANDR should be
-  // unnecessary because the sender SP in r13 is always aligned, but
-  // it doesn't hurt.
-  __ andr(sp, r13, -16);
-
-  // Note: the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
-
-  // all done with frame size check
-  __ bind(after_frame_check);
-}
-
-// Allocate monitor and lock method (asm interpreter)
-//
-// Args:
-//      rmethod: Method*
-//      rlocals: locals
-//
-// Kills:
-//      r0
-//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
-//      rscratch1, rscratch2 (scratch regs)
-void TemplateInterpreterGenerator::lock_method() {
-  // synchronize method
-  const Address access_flags(rmethod, Method::access_flags_offset());
-  const Address monitor_block_top(
-        rfp,
-        frame::interpreter_frame_monitor_block_top_offset * wordSize);
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ ldrw(r0, access_flags);
-    __ tst(r0, JVM_ACC_SYNCHRONIZED);
-    __ br(Assembler::NE, L);
-    __ stop("method doesn't need synchronization");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  // get synchronization object
-  {
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    Label done;
-    __ ldrw(r0, access_flags);
-    __ tst(r0, JVM_ACC_STATIC);
-    // get receiver (assume this is frequent case)
-    __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
-    __ br(Assembler::EQ, done);
-    __ ldr(r0, Address(rmethod, Method::const_offset()));
-    __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
-    __ ldr(r0, Address(r0,
-                           ConstantPool::pool_holder_offset_in_bytes()));
-    __ ldr(r0, Address(r0, mirror_offset));
-
-#ifdef ASSERT
-    {
-      Label L;
-      __ cbnz(r0, L);
-      __ stop("synchronization object is NULL");
-      __ bind(L);
-    }
-#endif // ASSERT
-
-    __ bind(done);
-  }
-
-  // add space for monitor & lock
-  __ sub(sp, sp, entry_size); // add space for a monitor entry
-  __ sub(esp, esp, entry_size);
-  __ mov(rscratch1, esp);
-  __ str(rscratch1, monitor_block_top);  // set new monitor block top
-  // store object
-  __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
-  __ mov(c_rarg1, esp); // object address
-  __ lock_object(c_rarg1);
-}
-
-// Generate a fixed interpreter frame. This is identical setup for
-// interpreted methods and for native methods hence the shared code.
-//
-// Args:
-//      lr: return address
-//      rmethod: Method*
-//      rlocals: pointer to locals
-//      rcpool: cp cache
-//      stack_pointer: previous sp
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  // initialize fixed part of activation frame
-  if (native_call) {
-    __ sub(esp, sp, 12 *  wordSize);
-    __ mov(rbcp, zr);
-    __ stp(esp, zr, Address(__ pre(sp, -12 * wordSize)));
-    // add 2 zero-initialized slots for native calls
-    __ stp(zr, zr, Address(sp, 10 * wordSize));
-  } else {
-    __ sub(esp, sp, 10 *  wordSize);
-    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));      // get ConstMethod
-    __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
-    __ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize)));
-  }
-
-  if (ProfileInterpreter) {
-    Label method_data_continue;
-    __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
-    __ cbz(rscratch1, method_data_continue);
-    __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
-    __ bind(method_data_continue);
-    __ stp(rscratch1, rmethod, Address(sp, 4 * wordSize));  // save Method* and mdp (method data pointer)
-  } else {
-    __ stp(zr, rmethod, Address(sp, 4 * wordSize));        // save Method* (no mdp)
-  }
-
-  __ ldr(rcpool, Address(rmethod, Method::const_offset()));
-  __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
-  __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
-  __ stp(rlocals, rcpool, Address(sp, 2 * wordSize));
-
-  __ stp(rfp, lr, Address(sp, 8 * wordSize));
-  __ lea(rfp, Address(sp, 8 * wordSize));
-
-  // set sender sp
-  // leave last_sp as null
-  __ stp(zr, r13, Address(sp, 6 * wordSize));
-
-  // Move SP out of the way
-  if (! native_call) {
-    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-    __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-    __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
-    __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
-    __ andr(sp, rscratch1, -16);
-  }
-}
-
-// End of helpers
-
-// Various method entries
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // This code is based on generate_accessor_enty.
-  //
-  // rmethod: Method*
-  // r13: senderSP must preserve for slow path, set SP to it on fast path
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-    Label slow_path;
-    const Register local_0 = c_rarg0;
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ ldr(local_0, Address(esp, 0));
-    __ cbz(local_0, slow_path);
-
-
-    // Load the value of the referent field.
-    const Address field_address(local_0, referent_offset);
-    __ load_heap_oop(local_0, field_address);
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-    __ enter(); // g1_write may call runtime
-    __ g1_write_barrier_pre(noreg /* obj */,
-                            local_0 /* pre_val */,
-                            rthread /* thread */,
-                            rscratch2 /* tmp */,
-                            true /* tosca_live */,
-                            true /* expand_call */);
-    __ leave();
-    // areturn
-    __ andr(sp, r13, -16);  // done with stack
-    __ ret(lr);
-
-    // generate a vanilla interpreter entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return generate_accessor_entry();
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rmethod: Method*
-    // r13: senderSP must preserved for slow path
-    // esp: args
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    unsigned long offset;
-    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
-    __ ldrw(rscratch1, Address(rscratch1, offset));
-    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
-    __ cbnz(rscratch1, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register val = c_rarg1;  // source java byte value
-    const Register tbl = c_rarg2;  // scratch
-
-    // Arguments are reversed on java expression stack
-    __ ldrw(val, Address(esp, 0));              // byte value
-    __ ldrw(crc, Address(esp, wordSize));       // Initial CRC
-
-    __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
-    __ add(tbl, tbl, offset);
-
-    __ ornw(crc, zr, crc); // ~crc
-    __ update_byte_crc32(crc, val, tbl);
-    __ ornw(crc, zr, crc); // ~crc
-
-    // result in c_rarg0
-
-    __ andr(sp, r13, -16);
-    __ ret(lr);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rmethod,: Method*
-    // r13: senderSP must preserved for slow path
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    unsigned long offset;
-    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
-    __ ldrw(rscratch1, Address(rscratch1, offset));
-    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
-    __ cbnz(rscratch1, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register buf = c_rarg1;  // source java byte array address
-    const Register len = c_rarg2;  // length
-    const Register off = len;      // offset (never overlaps with 'len')
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ ldr(buf, Address(esp, 2*wordSize)); // long buf
-      __ ldrw(off, Address(esp, wordSize)); // offset
-      __ add(buf, buf, off); // + offset
-      __ ldrw(crc,   Address(esp, 4*wordSize)); // Initial CRC
-    } else {
-      __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
-      __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ ldrw(off, Address(esp, wordSize)); // offset
-      __ add(buf, buf, off); // + offset
-      __ ldrw(crc,   Address(esp, 3*wordSize)); // Initial CRC
-    }
-    // Can now load 'len' since we're finished with 'off'
-    __ ldrw(len, Address(esp, 0x0)); // Length
-
-    __ andr(sp, r13, -16); // Restore the caller's SP
-
-    // We are frameless so we can just jump to the stub.
-    __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
-  // Bang each page in the shadow zone. We can't assume it's been done for
-  // an interpreter frame with greater than a page of locals, so each page
-  // needs to be checked.  Only true for non-native.
-  if (UseStackBanging) {
-    const int start_page = native_call ? StackShadowPages : 1;
-    const int page_size = os::vm_page_size();
-    for (int pages = start_page; pages <= StackShadowPages ; pages++) {
-      __ sub(rscratch2, sp, pages*page_size);
-      __ str(zr, Address(rscratch2));
-    }
-  }
-}
-
-
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the
-// native method than the typical interpreter frame setup.
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // r1: Method*
-  // rscratch1: sender sp
-
-  address entry_point = __ pc();
-
-  const Address constMethod       (rmethod, Method::const_offset());
-  const Address access_flags      (rmethod, Method::access_flags_offset());
-  const Address size_of_parameters(r2, ConstMethod::
-                                       size_of_parameters_offset());
-
-  // get parameter size (always needed)
-  __ ldr(r2, constMethod);
-  __ load_unsigned_short(r2, size_of_parameters);
-
-  // native calls don't need the stack size check since they have no
-  // expression stack and the arguments are already on the stack and
-  // we only add a handful of words to the stack
-
-  // rmethod: Method*
-  // r2: size of parameters
-  // rscratch1: sender sp
-
-  // for natives the size of locals is zero
-
-  // compute beginning of parameters (rlocals)
-  __ add(rlocals, esp, r2, ext::uxtx, 3);
-  __ add(rlocals, rlocals, -wordSize);
-
-  // Pull SP back to minimum size: this avoids holes in the stack
-  __ andr(sp, esp, -16);
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(true);
-#ifndef PRODUCT
-  // tell the simulator that a method has been entered
-  if (NotifySimulator) {
-    __ notify(Assembler::method_entry);
-  }
-#endif
-
-  // make sure method is native & not abstract
-#ifdef ASSERT
-  __ ldrw(r0, access_flags);
-  {
-    Label L;
-    __ tst(r0, JVM_ACC_NATIVE);
-    __ br(Assembler::NE, L);
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  {
-    Label L;
-    __ tst(r0, JVM_ACC_ABSTRACT);
-    __ br(Assembler::EQ, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception
-  // handler would try to exit the monitor of synchronized methods
-  // which hasn't been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation
-  // will check this flag.
-
-   const Address do_not_unlock_if_synchronized(rthread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ mov(rscratch2, true);
-  __ strb(rscratch2, do_not_unlock_if_synchronized);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ strb(zr, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ ldrw(r0, access_flags);
-      __ tst(r0, JVM_ACC_SYNCHRONIZED);
-      __ br(Assembler::EQ, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-    const Address monitor_block_top(rfp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ ldr(rscratch1, monitor_block_top);
-    __ cmp(esp, rscratch1);
-    __ br(Assembler::EQ, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  // work registers
-  const Register t = r17;
-  const Register result_handler = r19;
-
-  // allocate space for parameters
-  __ ldr(t, Address(rmethod, Method::const_offset()));
-  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
-
-  __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize);
-  __ andr(sp, rscratch1, -16);
-  __ mov(esp, rscratch1);
-
-  // get signature handler
-  {
-    Label L;
-    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
-    __ cbnz(t, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               rmethod);
-    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
-    __ bind(L);
-  }
-
-  // call signature handler
-  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
-          "adjust this code");
-
-  // The generated handlers do not touch rmethod (the method).
-  // However, large signatures cannot be cached and are generated
-  // each time here.  The slow-path generator can do a GC on return,
-  // so we must reload it after the call.
-  __ blr(t);
-  __ get_method(rmethod);        // slow path can do a GC, reload rmethod
-
-
-  // result handler is in r0
-  // set result handler
-  __ mov(result_handler, r0);
-  // pass mirror handle if static call
-  {
-    Label L;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
-    __ tst(t, JVM_ACC_STATIC);
-    __ br(Assembler::EQ, L);
-    // get mirror
-    __ ldr(t, Address(rmethod, Method::const_offset()));
-    __ ldr(t, Address(t, ConstMethod::constants_offset()));
-    __ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
-    __ ldr(t, Address(t, mirror_offset));
-    // copy mirror into activation frame
-    __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
-    // pass handle to mirror
-    __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
-    __ bind(L);
-  }
-
-  // get native function entry point in r10
-  {
-    Label L;
-    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
-    address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
-    __ mov(rscratch2, unsatisfied);
-    __ ldr(rscratch2, rscratch2);
-    __ cmp(r10, rscratch2);
-    __ br(Assembler::NE, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               rmethod);
-    __ get_method(rmethod);
-    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
-    __ bind(L);
-  }
-
-  // pass JNIEnv
-  __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
-
-  // It is enough that the pc() points into the right code
-  // segment. It does not have to be the correct return pc.
-  __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1);
-
-  // change thread state
-#ifdef ASSERT
-  {
-    Label L;
-    __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
-    __ cmp(t, _thread_in_Java);
-    __ br(Assembler::EQ, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif
-
-  // Change state to native
-  __ mov(rscratch1, _thread_in_native);
-  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
-  __ stlrw(rscratch1, rscratch2);
-
-  // Call the native method.
-  __ blrt(r10, rscratch1);
-  __ maybe_isb();
-  __ get_method(rmethod);
-  // result potentially in r0 or v0
-
-  // make room for the pushes we're about to do
-  __ sub(rscratch1, esp, 4 * wordSize);
-  __ andr(sp, rscratch1, -16);
-
-  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
-  // in order to extract the result of a method call. If the order of these
-  // pushes change or anything else is added to the stack then the code in
-  // interpreter_frame_result must also change.
-  __ push(dtos);
-  __ push(ltos);
-
-  // change thread state
-  __ mov(rscratch1, _thread_in_native_trans);
-  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
-  __ stlrw(rscratch1, rscratch2);
-
-  if (os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ dsb(Assembler::SY);
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(rthread, rscratch2);
-    }
-  }
-
-  // check for safepoint operation in progress and/or pending suspend requests
-  {
-    Label Continue;
-    {
-      unsigned long offset;
-      __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
-      __ ldrw(rscratch2, Address(rscratch2, offset));
-    }
-    assert(SafepointSynchronize::_not_synchronized == 0,
-           "SafepointSynchronize::_not_synchronized");
-    Label L;
-    __ cbnz(rscratch2, L);
-    __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
-    __ cbz(rscratch2, Continue);
-    __ bind(L);
-
-    // Don't use call_VM as it will see a possible pending exception
-    // and forward it and never return here preventing us from
-    // clearing _last_native_pc down below. So we do a runtime call by
-    // hand.
-    //
-    __ mov(c_rarg0, rthread);
-    __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
-    __ blrt(rscratch2, 1, 0, 0);
-    __ maybe_isb();
-    __ get_method(rmethod);
-    __ reinit_heapbase();
-    __ bind(Continue);
-  }
-
-  // change thread state
-  __ mov(rscratch1, _thread_in_Java);
-  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
-  __ stlrw(rscratch1, rscratch2);
-
-  // reset_last_Java_frame
-  __ reset_last_Java_frame(true, true);
-
-  // reset handle block
-  __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
-  __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
-
-  // If result is an oop unbox and store it in frame where gc will see it
-  // and result handler will pick it up
-
-  {
-    Label no_oop, store_result;
-    __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
-    __ cmp(t, result_handler);
-    __ br(Assembler::NE, no_oop);
-    // retrieve result
-    __ pop(ltos);
-    __ cbz(r0, store_result);
-    __ ldr(r0, Address(r0, 0));
-    __ bind(store_result);
-    __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
-    // keep stack depth as expected by pushing oop which will eventually be discarded
-    __ push(ltos);
-    __ bind(no_oop);
-  }
-
-  {
-    Label no_reguard;
-    __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
-    __ ldrb(rscratch1, Address(rscratch1));
-    __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled);
-    __ br(Assembler::NE, no_reguard);
-
-    __ pusha(); // XXX only save smashed registers
-    __ mov(c_rarg0, rthread);
-    __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
-    __ blrt(rscratch2, 0, 0, 0);
-    __ popa(); // XXX only restore smashed registers
-    __ bind(no_reguard);
-  }
-
-  // The method register is junk from after the thread_in_native transition
-  // until here.  Also can't call_VM until the bcp has been
-  // restored.  Need bcp for throwing exception below so get it now.
-  __ get_method(rmethod);
-
-  // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
-  // rbcp == code_base()
-  __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
-  __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
-  // handle exceptions (exception handling will handle unlocking!)
-  {
-    Label L;
-    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
-    __ cbz(rscratch1, L);
-    // Note: At some point we may want to unify this with the code
-    // used in call_VM_base(); i.e., we should use the
-    // StubRoutines::forward_exception code. For now this doesn't work
-    // here because the rsp is not correctly set at this point.
-    __ MacroAssembler::call_VM(noreg,
-                               CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // do unlocking if necessary
-  {
-    Label L;
-    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
-    __ tst(t, JVM_ACC_SYNCHRONIZED);
-    __ br(Assembler::EQ, L);
-    // the code below should be shared with interpreter macro
-    // assembler implementation
-    {
-      Label unlock;
-      // BasicObjectLock will be first in list, since this is a
-      // synchronized method. However, need to check that the object
-      // has not been unlocked by an explicit monitorexit bytecode.
-
-      // monitor expect in c_rarg1 for slow unlock path
-      __ lea (c_rarg1, Address(rfp,   // address of first monitor
-                               (intptr_t)(frame::interpreter_frame_initial_sp_offset *
-                                          wordSize - sizeof(BasicObjectLock))));
-
-      __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
-      __ cbnz(t, unlock);
-
-      // Entry already unlocked, need to throw exception
-      __ MacroAssembler::call_VM(noreg,
-                                 CAST_FROM_FN_PTR(address,
-                   InterpreterRuntime::throw_illegal_monitor_state_exception));
-      __ should_not_reach_here();
-
-      __ bind(unlock);
-      __ unlock_object(c_rarg1);
-    }
-    __ bind(L);
-  }
-
-  // jvmti support
-  // Note: This must happen _after_ handling/throwing any exceptions since
-  //       the exception handler code notifies the runtime of method exits
-  //       too. If this happens before, method entry/exit notifications are
-  //       not properly paired (was bug - gri 11/22/99).
-  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
-
-  // restore potential result in r0:d0, call result handler to
-  // restore potential result in ST0 & handle result
-
-  __ pop(ltos);
-  __ pop(dtos);
-
-  __ blr(result_handler);
-
-  // remove activation
-  __ ldr(esp, Address(rfp,
-                    frame::interpreter_frame_sender_sp_offset *
-                    wordSize)); // get sender sp
-  // remove frame anchor
-  __ leave();
-
-  // resture sender sp
-  __ mov(sp, esp);
-
-  __ ret(lr);
-
-  if (inc_counter) {
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-//
-// Generic interpreted method entry to (asm) interpreter
-//
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rscratch1: sender sp
-  address entry_point = __ pc();
-
-  const Address constMethod(rmethod, Method::const_offset());
-  const Address access_flags(rmethod, Method::access_flags_offset());
-  const Address size_of_parameters(r3,
-                                   ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
-
-  // get parameter size (always needed)
-  // need to load the const method first
-  __ ldr(r3, constMethod);
-  __ load_unsigned_short(r2, size_of_parameters);
-
-  // r2: size of parameters
-
-  __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
-  __ sub(r3, r3, r2); // r3 = no. of additional locals
-
-  // see if we've got enough room on the stack for locals plus overhead.
-  generate_stack_overflow_check();
-
-  // compute beginning of parameters (rlocals)
-  __ add(rlocals, esp, r2, ext::uxtx, 3);
-  __ sub(rlocals, rlocals, wordSize);
-
-  // Make room for locals
-  __ sub(rscratch1, esp, r3, ext::uxtx, 3);
-  __ andr(sp, rscratch1, -16);
-
-  // r3 - # of additional locals
-  // allocate space for locals
-  // explicitly initialize locals
-  {
-    Label exit, loop;
-    __ ands(zr, r3, r3);
-    __ br(Assembler::LE, exit); // do nothing if r3 <= 0
-    __ bind(loop);
-    __ str(zr, Address(__ post(rscratch1, wordSize)));
-    __ sub(r3, r3, 1); // until everything initialized
-    __ cbnz(r3, loop);
-    __ bind(exit);
-  }
-
-  // And the base dispatch table
-  __ get_dispatch();
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(false);
-#ifndef PRODUCT
-  // tell the simulator that a method has been entered
-  if (NotifySimulator) {
-    __ notify(Assembler::method_entry);
-  }
-#endif
-  // make sure method is not native & not abstract
-#ifdef ASSERT
-  __ ldrw(r0, access_flags);
-  {
-    Label L;
-    __ tst(r0, JVM_ACC_NATIVE);
-    __ br(Assembler::EQ, L);
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
- {
-    Label L;
-    __ tst(r0, JVM_ACC_ABSTRACT);
-    __ br(Assembler::EQ, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception
-  // handler would try to exit the monitor of synchronized methods
-  // which hasn't been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation
-  // will check this flag.
-
-   const Address do_not_unlock_if_synchronized(rthread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ mov(rscratch2, true);
-  __ strb(rscratch2, do_not_unlock_if_synchronized);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow,
-                          &profile_method,
-                          &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ strb(zr, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    // Allocate monitor and lock method
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ ldrw(r0, access_flags);
-      __ tst(r0, JVM_ACC_SYNCHRONIZED);
-      __ br(Assembler::EQ, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-     const Address monitor_block_top (rfp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ ldr(rscratch1, monitor_block_top);
-    __ cmp(esp, rscratch1);
-    __ br(Assembler::EQ, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  __ dispatch_next(vtos);
-
-  // invocation counter overflow
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      // don't think we need this
-      __ get_method(r1);
-      __ b(profile_method_continue);
-    }
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
 // These should never be compiled since the interpreter will prefer
 // the compiled version to the intrinsic version.
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
@@ -1593,483 +178,3 @@
   *interpreter_frame->interpreter_frame_cache_addr() =
     method->constants()->cache();
 }
-
-
-//-----------------------------------------------------------------------------
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  // Entry point in previous activation (i.e., if the caller was
-  // interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  // Restore sp to interpreter_frame_last_sp even though we are going
-  // to empty the expression stack for the exception processing.
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // r0: exception
-  // r3: return address/pc that threw exception
-  __ restore_bcp();    // rbcp points to call/send
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ reinit_heapbase();  // restore rheapbase as heapbase.
-  __ get_dispatch();
-
-#ifndef PRODUCT
-  // tell the simulator that the caller method has been reentered
-  if (NotifySimulator) {
-    __ get_method(rmethod);
-    __ notify(Assembler::method_reentry);
-  }
-#endif
-  // Entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  // If we came here via a NullPointerException on the receiver of a
-  // method, rmethod may be corrupt.
-  __ get_method(rmethod);
-  // expression stack is undefined here
-  // r0: exception
-  // rbcp: exception bcp
-  __ verify_oop(r0);
-  __ mov(c_rarg1, r0);
-
-  // expression stack must be empty before entering the VM in case of
-  // an exception
-  __ empty_expression_stack();
-  // find exception handler address and preserve exception oop
-  __ call_VM(r3,
-             CAST_FROM_FN_PTR(address,
-                          InterpreterRuntime::exception_handler_for_exception),
-             c_rarg1);
-
-  // Calculate stack limit
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
-  __ andr(sp, rscratch1, -16);
-
-  // r0: exception handler entry point
-  // r3: preserved exception oop
-  // rbcp: bcp for exception handler
-  __ push_ptr(r3); // push exception which is now the only value on the stack
-  __ br(r0); // jump to exception handler (may be _remove_activation_entry!)
-
-  // If the exception is not handled in the current frame the frame is
-  // removed and the exception is rethrown (i.e. exception
-  // continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction
-  // which caused the exception and the expression stack is
-  // empty. Thus, for any VM calls at this point, GC will find a legal
-  // oop map (with empty expression stack).
-
-  //
-  // JVMTI PopFrame support
-  //
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  __ empty_expression_stack();
-  // Set the popframe_processing bit in pending_popframe_condition
-  // indicating that we are currently handling popframe, so that
-  // call_VMs that may happen later do not trigger new popframe
-  // handling cycles.
-  __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
-  __ orr(r3, r3, JavaThread::popframe_processing_bit);
-  __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::interpreter_contains), c_rarg1);
-    __ cbnz(r0, caller_not_deoptimized);
-
-    // Compute size of arguments for saving when returning to
-    // deoptimized caller
-    __ get_method(r0);
-    __ ldr(r0, Address(r0, Method::const_offset()));
-    __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
-                                                    size_of_parameters_offset())));
-    __ lsl(r0, r0, Interpreter::logStackElementSize);
-    __ restore_locals(); // XXX do we need this?
-    __ sub(rlocals, rlocals, r0);
-    __ add(rlocals, rlocals, wordSize);
-    // Save these arguments
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                                           Deoptimization::
-                                           popframe_preserve_args),
-                          rthread, r0, rlocals);
-
-    __ remove_activation(vtos,
-                         /* throw_monitor_exception */ false,
-                         /* install_monitor_exception */ false,
-                         /* notify_jvmdi */ false);
-
-    // Inform deoptimization that it is responsible for restoring
-    // these arguments
-    __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
-    __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
-
-    // Continue in deoptimization handler
-    __ ret(lr);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  __ remove_activation(vtos,
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false,
-                       /* notify_jvmdi */ false);
-
-  // Restore the last_sp and null it out
-  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ get_method(rmethod);
-
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-  // Clear the popframe condition flag
-  __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset()));
-  assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-
-    __ ldrb(rscratch1, Address(rbcp, 0));
-    __ cmpw(r1, Bytecodes::_invokestatic);
-    __ br(Assembler::EQ, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ ldr(c_rarg0, Address(rlocals, 0));
-    __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
-
-    __ cbz(r0, L_done);
-
-    __ str(r0, Address(esp, 0));
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  // Restore machine SP
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
-  __ andr(sp, rscratch1, -16);
-
-  __ dispatch_next(vtos);
-  // end of PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence
-  __ pop_ptr(r0);
-  __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
-  // remove the activation (without doing throws on illegalMonitorExceptions)
-  __ remove_activation(vtos, false, true, false);
-  // restore exception
-  // restore exception
-  __ get_vm_result(r0, rthread);
-
-  // In between activations - previous activation type unknown yet
-  // compute continuation point - the continuation point expects the
-  // following registers set up:
-  //
-  // r0: exception
-  // lr: return address/pc that threw exception
-  // rsp: expression stack of caller
-  // rfp: fp of caller
-  // FIXME: There's no point saving LR here because VM calls don't trash it
-  __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                          SharedRuntime::exception_handler_for_return_address),
-                        rthread, lr);
-  __ mov(r1, r0);                               // save exception handler
-  __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
-  // We might be returning to a deopt handler that expects r3 to
-  // contain the exception pc
-  __ mov(r3, lr);
-  // Note that an "issuing PC" is actually the next PC after the call
-  __ br(r1);                                    // jump to exception
-                                                // handler of caller
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ empty_expression_stack();
-  __ load_earlyret_value(state);
-
-  __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
-  Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
-  __ str(zr, cond_addr);
-
-  __ remove_activation(state,
-                       false, /* throw_monitor_exception */
-                       false, /* install_monitor_exception */
-                       true); /* notify_jvmdi */
-  __ ret(lr);
-
-  return entry;
-} // end of ForceEarlyReturn support
-
-
-
-//-----------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
-                                                         address& bep,
-                                                         address& cep,
-                                                         address& sep,
-                                                         address& aep,
-                                                         address& iep,
-                                                         address& lep,
-                                                         address& fep,
-                                                         address& dep,
-                                                         address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  aep = __ pc();  __ push_ptr();  __ b(L);
-  fep = __ pc();  __ push_f();    __ b(L);
-  dep = __ pc();  __ push_d();    __ b(L);
-  lep = __ pc();  __ push_l();    __ b(L);
-  bep = cep = sep =
-  iep = __ pc();  __ push_i();
-  vep = __ pc();
-  __ bind(L);
-  generate_and_dispatch(t);
-}
-
-//-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-//-----------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  __ push(lr);
-  __ push(state);
-  __ push(RegSet::range(r0, r15), sp);
-  __ mov(c_rarg2, r0);  // Pass itos
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
-             c_rarg1, c_rarg2, c_rarg3);
-  __ pop(RegSet::range(r0, r15), sp);
-  __ pop(state);
-  __ pop(lr);
-  __ ret(lr);                                   // return from result handler
-
-  return entry;
-}
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  Register rscratch3 = r0;
-  __ push(rscratch1);
-  __ push(rscratch2);
-  __ push(rscratch3);
-  Label L;
-  __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
-  __ bind(L);
-  __ ldxr(rscratch1, rscratch2);
-  __ add(rscratch1, rscratch1, 1);
-  __ stxr(rscratch3, rscratch1, rscratch2);
-  __ cbnzw(rscratch3, L);
-  __ pop(rscratch3);
-  __ pop(rscratch2);
-  __ pop(rscratch1);
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-  __ bl(Interpreter::trace_code(t->tos_in()));
-  __ reinit_heapbase();
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  __ push(rscratch1);
-  __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
-  __ ldr(rscratch1, Address(rscratch1));
-  __ mov(rscratch2, StopInterpreterAt);
-  __ cmpw(rscratch1, rscratch2);
-  __ br(Assembler::NE, L);
-  __ brk(0);
-  __ bind(L);
-  __ pop(rscratch1);
-}
-
-#ifdef BUILTIN_SIM
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-extern "C" {
-  static int PAGESIZE = getpagesize();
-  int is_mapped_address(u_int64_t address)
-  {
-    address = (address & ~((u_int64_t)PAGESIZE - 1));
-    if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) {
-      return true;
-    }
-    if (errno != ENOMEM) {
-      return true;
-    }
-    return false;
-  }
-
-  void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
-  {
-    if (method != 0) {
-      method[0] = '\0';
-    }
-    if (bcidx != 0) {
-      *bcidx = -2;
-    }
-    if (decode != 0) {
-      decode[0] = 0;
-    }
-
-    if (framesize != 0) {
-      *framesize = -1;
-    }
-
-    if (Interpreter::contains((address)pc)) {
-      AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
-      Method* meth;
-      address bcp;
-      if (fp) {
-#define FRAME_SLOT_METHOD 3
-#define FRAME_SLOT_BCP 7
-        meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3));
-        bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3));
-#undef FRAME_SLOT_METHOD
-#undef FRAME_SLOT_BCP
-      } else {
-        meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0);
-        bcp = (address)sim->getCPUState().xreg(RBCP, 0);
-      }
-      if (meth->is_native()) {
-        return;
-      }
-      if(method && meth->is_method()) {
-        ResourceMark rm;
-        method[0] = 'I';
-        method[1] = ' ';
-        meth->name_and_sig_as_C_string(method + 2, 398);
-      }
-      if (bcidx) {
-        if (meth->contains(bcp)) {
-          *bcidx = meth->bci_from(bcp);
-        } else {
-          *bcidx = -2;
-        }
-      }
-      if (decode) {
-        if (!BytecodeTracer::closure()) {
-          BytecodeTracer::set_closure(BytecodeTracer::std_closure());
-        }
-        stringStream str(decode, 400);
-        BytecodeTracer::trace(meth, bcp, &str);
-      }
-    } else {
-      if (method) {
-        CodeBlob *cb = CodeCache::find_blob((address)pc);
-        if (cb != NULL) {
-          if (cb->is_nmethod()) {
-            ResourceMark rm;
-            nmethod* nm = (nmethod*)cb;
-            method[0] = 'C';
-            method[1] = ' ';
-            nm->method()->name_and_sig_as_C_string(method + 2, 398);
-          } else if (cb->is_adapter_blob()) {
-            strcpy(method, "B adapter blob");
-          } else if (cb->is_runtime_stub()) {
-            strcpy(method, "B runtime stub");
-          } else if (cb->is_exception_stub()) {
-            strcpy(method, "B exception stub");
-          } else if (cb->is_deoptimization_stub()) {
-            strcpy(method, "B deoptimization stub");
-          } else if (cb->is_safepoint_stub()) {
-            strcpy(method, "B safepoint stub");
-          } else if (cb->is_uncommon_trap_stub()) {
-            strcpy(method, "B uncommon trap stub");
-          } else if (cb->contains((address)StubRoutines::call_stub())) {
-            strcpy(method, "B call stub");
-          } else {
-            strcpy(method, "B unknown blob : ");
-            strcat(method, cb->name());
-          }
-          if (framesize != NULL) {
-            *framesize = cb->frame_size();
-          }
-        }
-      }
-    }
-  }
-
-
-  JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
-  {
-    bccheck1(pc, fp, method, bcidx, framesize, decode);
-  }
-}
-
-#endif // BUILTIN_SIM
-#endif // !PRODUCT
-#endif // ! CC_INTERP
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -39,7 +39,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -61,26 +60,6 @@
 
 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
 address AbstractInterpreterGenerator::generate_slow_signature_handler() {
   // Slow_signature handler that respects the PPC C calling conventions.
   //
@@ -579,18 +558,3 @@
 
   return NULL;
 }
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,1802 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#ifndef CC_INTERP
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+#undef __
+#define __ _masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label)        __ bind(label); BLOCK_COMMENT(#label ":")
+
+//-----------------------------------------------------------------------------
+
+// Actually we should never reach here since we do stack overflow checks before pushing any frame.
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+  __ unimplemented("generate_StackOverflowError_handler");
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
+  address entry = __ pc();
+  __ empty_expression_stack();
+  __ load_const_optimized(R4_ARG2, (address) name);
+  // Index is in R17_tos.
+  __ mr(R5_ARG3, R17_tos);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
+  return entry;
+}
+
+#if 0
+// Call special ClassCastException constructor taking object to cast
+// and target class as arguments.
+address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
+  address entry = __ pc();
+
+  // Expression stack must be empty before entering the VM if an
+  // exception happened.
+  __ empty_expression_stack();
+
+  // Thread will be loaded to R3_ARG1.
+  // Target class oop is in register R5_ARG3 by convention!
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
+  // Above call must not return here since exception pending.
+  DEBUG_ONLY(__ should_not_reach_here();)
+  return entry;
+}
+#endif
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+  // Expression stack must be empty before entering the VM if an
+  // exception happened.
+  __ empty_expression_stack();
+
+  // Load exception object.
+  // Thread will be loaded to R3_ARG1.
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos);
+#ifdef ASSERT
+  // Above call must not return here since exception pending.
+  __ should_not_reach_here();
+#endif
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
+  address entry = __ pc();
+  //__ untested("generate_exception_handler_common");
+  Register Rexception = R17_tos;
+
+  // Expression stack must be empty before entering the VM if an exception happened.
+  __ empty_expression_stack();
+
+  __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
+  if (pass_oop) {
+    __ mr(R5_ARG3, Rexception);
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
+  } else {
+    __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
+  }
+
+  // Throw exception.
+  __ mr(R3_ARG1, Rexception);
+  __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2);
+  __ mtctr(R11_scratch1);
+  __ bctr();
+
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  __ unimplemented("generate_continuation_for");
+  return entry;
+}
+
+// This entry is returned to when a call returns to the interpreter.
+// When we arrive here, we expect that the callee stack frame is already popped.
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+  // Move the value out of the return register back to the TOS cache of current frame.
+  switch (state) {
+    case ltos:
+    case btos:
+    case ctos:
+    case stos:
+    case atos:
+    case itos: __ mr(R17_tos, R3_RET); break;   // RET -> TOS cache
+    case ftos:
+    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
+    case vtos: break;                           // Nothing to do, this was a void return.
+    default  : ShouldNotReachHere();
+  }
+
+  __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
+  __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+  __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+
+  // Compiled code destroys templateTableBase, reload.
+  __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
+
+  if (state == atos) {
+    __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
+  }
+
+  const Register cache = R11_scratch1;
+  const Register size  = R12_scratch2;
+  __ get_cache_and_index_at_bcp(cache, 1, index_size);
+
+  // Get least significant byte of 64 bit value:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
+#else
+  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
+#endif
+  __ sldi(size, size, Interpreter::logStackElementSize);
+  __ add(R15_esp, R15_esp, size);
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
+  address entry = __ pc();
+  // If state != vtos, we're returning from a native method, which put it's result
+  // into the result register. So move the value out of the return register back
+  // to the TOS cache of current frame.
+
+  switch (state) {
+    case ltos:
+    case btos:
+    case ctos:
+    case stos:
+    case atos:
+    case itos: __ mr(R17_tos, R3_RET); break;   // GR_RET -> TOS cache
+    case ftos:
+    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
+    case vtos: break;                           // Nothing to do, this was a void return.
+    default  : ShouldNotReachHere();
+  }
+
+  // Load LcpoolCache @@@ should be already set!
+  __ get_constant_pool_cache(R27_constPoolCache);
+
+  // Handle a pending exception, fall through if none.
+  __ check_and_forward_exception(R11_scratch1, R12_scratch2);
+
+  // Start executing bytecodes.
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+// A result handler converts the native result into java format.
+// Use the shared code between c++ and template interpreter.
+address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
+  return AbstractInterpreterGenerator::generate_result_handler_for(type);
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
+  address entry = __ pc();
+
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
+
+  return entry;
+}
+
+// Helpers for commoning out cases in the various type of method entries.
+
+// Increment invocation count & check for overflow.
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test.
+//
+void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
+  // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
+  Register Rscratch1   = R11_scratch1;
+  Register Rscratch2   = R12_scratch2;
+  Register R3_counters = R3_ARG1;
+  Label done;
+
+  if (TieredCompilation) {
+    const int increment = InvocationCounter::count_increment;
+    const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      const Register Rmdo = Rscratch1;
+      // If no method data exists, go to profile_continue.
+      __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
+      __ cmpdi(CCR0, Rmdo, 0);
+      __ beq(CCR0, no_mdo);
+
+      // Increment backedge counter in the MDO.
+      const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+      __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
+      __ addi(Rscratch2, Rscratch2, increment);
+      __ stw(Rscratch2, mdo_bc_offs, Rmdo);
+      __ load_const_optimized(Rscratch1, mask, R0);
+      __ and_(Rscratch1, Rscratch2, Rscratch1);
+      __ bne(CCR0, done);
+      __ b(*overflow);
+    }
+
+    // Increment counter in MethodCounters*.
+    const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+    __ bind(no_mdo);
+    __ get_method_counters(R19_method, R3_counters, done);
+    __ lwz(Rscratch2, mo_bc_offs, R3_counters);
+    __ addi(Rscratch2, Rscratch2, increment);
+    __ stw(Rscratch2, mo_bc_offs, R3_counters);
+    __ load_const_optimized(Rscratch1, mask, R0);
+    __ and_(Rscratch1, Rscratch2, Rscratch1);
+    __ beq(CCR0, *overflow);
+
+    __ bind(done);
+
+  } else {
+
+    // Update standard invocation counters.
+    Register Rsum_ivc_bec = R4_ARG2;
+    __ get_method_counters(R19_method, R3_counters, done);
+    __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
+    // Increment interpreter invocation counter.
+    if (ProfileInterpreter) {  // %%% Merge this into methodDataOop.
+      __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
+      __ addi(R12_scratch2, R12_scratch2, 1);
+      __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
+    }
+    // Check if we must create a method data obj.
+    if (ProfileInterpreter && profile_method != NULL) {
+      const Register profile_limit = Rscratch1;
+      int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true);
+      __ lwz(profile_limit, pl_offs, profile_limit);
+      // Test to see if we should create a method data oop.
+      __ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
+      __ blt(CCR0, *profile_method_continue);
+      // If no method data exists, go to profile_method.
+      __ test_method_data_pointer(*profile_method);
+    }
+    // Finally check for counter overflow.
+    if (overflow) {
+      const Register invocation_limit = Rscratch1;
+      int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true);
+      __ lwz(invocation_limit, il_offs, invocation_limit);
+      assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size");
+      __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
+      __ bge(CCR0, *overflow);
+    }
+
+    __ bind(done);
+  }
+}
+
+// Generate code to initiate compilation on invocation counter overflow.
+void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
+  // Generate code to initiate compilation on the counter overflow.
+
+  // InterpreterRuntime::frequency_counter_overflow takes one arguments,
+  // which indicates if the counter overflow occurs at a backwards branch (NULL bcp)
+  // We pass zero in.
+  // The call returns the address of the verified entry point for the method or NULL
+  // if the compilation did not complete (either went background or bailed out).
+  //
+  // Unlike the C++ interpreter above: Check exceptions!
+  // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
+  // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
+
+  __ li(R4_ARG2, 0);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
+
+  // Returns verified_entry_point or NULL.
+  // We ignore it in any case.
+  __ b(continue_entry);
+}
+
+void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
+  assert_different_registers(Rmem_frame_size, Rscratch1);
+  __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1);
+}
+
+void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
+  __ unlock_object(R26_monitor, check_exceptions);
+}
+
+// Lock the current method, interpreter register window must be set up!
+void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
+  const Register Robj_to_lock = Rscratch2;
+
+  {
+    if (!flags_preloaded) {
+      __ lwz(Rflags, method_(access_flags));
+    }
+
+#ifdef ASSERT
+    // Check if methods needs synchronization.
+    {
+      Label Lok;
+      __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
+      __ btrue(CCR0,Lok);
+      __ stop("method doesn't need synchronization");
+      __ bind(Lok);
+    }
+#endif // ASSERT
+  }
+
+  // Get synchronization object to Rscratch2.
+  {
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    Label Lstatic;
+    Label Ldone;
+
+    __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
+    __ btrue(CCR0, Lstatic);
+
+    // Non-static case: load receiver obj from stack and we're done.
+    __ ld(Robj_to_lock, R18_locals);
+    __ b(Ldone);
+
+    __ bind(Lstatic); // Static case: Lock the java mirror
+    __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method);
+    __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock);
+    __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock);
+    __ ld(Robj_to_lock, mirror_offset, Robj_to_lock);
+
+    __ bind(Ldone);
+    __ verify_oop(Robj_to_lock);
+  }
+
+  // Got the oop to lock => execute!
+  __ add_monitor_to_stack(true, Rscratch1, R0);
+
+  __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
+  __ lock_object(R26_monitor, Robj_to_lock);
+}
+
+// Generate a fixed interpreter frame for pure interpreter
+// and I2N native transition frames.
+//
+// Before (stack grows downwards):
+//
+//         |  ...         |
+//         |------------- |
+//         |  java arg0   |
+//         |  ...         |
+//         |  java argn   |
+//         |              |   <-   R15_esp
+//         |              |
+//         |--------------|
+//         | abi_112      |
+//         |              |   <-   R1_SP
+//         |==============|
+//
+//
+// After:
+//
+//         |  ...         |
+//         |  java arg0   |<-   R18_locals
+//         |  ...         |
+//         |  java argn   |
+//         |--------------|
+//         |              |
+//         |  java locals |
+//         |              |
+//         |--------------|
+//         |  abi_48      |
+//         |==============|
+//         |              |
+//         |   istate     |
+//         |              |
+//         |--------------|
+//         |   monitor    |<-   R26_monitor
+//         |--------------|
+//         |              |<-   R15_esp
+//         | expression   |
+//         | stack        |
+//         |              |
+//         |--------------|
+//         |              |
+//         | abi_112      |<-   R1_SP
+//         |==============|
+//
+// The top most frame needs an abi space of 112 bytes. This space is needed,
+// since we call to c. The c function may spill their arguments to the caller
+// frame. When we call to java, we don't need these spill slots. In order to save
+// space on the stack, we resize the caller. However, java local reside in
+// the caller frame and the frame has to be increased. The frame_size for the
+// current frame was calculated based on max_stack as size for the expression
+// stack. At the call, just a part of the expression stack might be used.
+// We don't want to waste this space and cut the frame back accordingly.
+// The resulting amount for resizing is calculated as follows:
+// resize =   (number_of_locals - number_of_arguments) * slot_size
+//          + (R1_SP - R15_esp) + 48
+//
+// The size for the callee frame is calculated:
+// framesize = 112 + max_stack + monitor + state_size
+//
+// maxstack:   Max number of slots on the expression stack, loaded from the method.
+// monitor:    We statically reserve room for one monitor object.
+// state_size: We save the current state of the interpreter to this area.
+//
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) {
+  Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes.
+           top_frame_size      = R7_ARG5,
+           Rconst_method       = R8_ARG6;
+
+  assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size);
+
+  __ ld(Rconst_method, method_(const));
+  __ lhz(Rsize_of_parameters /* number of params */,
+         in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method);
+  if (native_call) {
+    // If we're calling a native method, we reserve space for the worst-case signature
+    // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2).
+    // We add two slots to the parameter_count, one for the jni
+    // environment and one for a possible native mirror.
+    Label skip_native_calculate_max_stack;
+    __ addi(top_frame_size, Rsize_of_parameters, 2);
+    __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters);
+    __ bge(CCR0, skip_native_calculate_max_stack);
+    __ li(top_frame_size, Argument::n_register_parameters);
+    __ bind(skip_native_calculate_max_stack);
+    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
+    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
+    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
+    assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters.
+  } else {
+    __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method);
+    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
+    __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize);
+    __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method);
+    __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0
+    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
+    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
+    __ add(parent_frame_resize, parent_frame_resize, R11_scratch1);
+  }
+
+  // Compute top frame size.
+  __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size);
+
+  // Cut back area between esp and max_stack.
+  __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize);
+
+  __ round_to(top_frame_size, frame::alignment_in_bytes);
+  __ round_to(parent_frame_resize, frame::alignment_in_bytes);
+  // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size.
+  // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48.
+
+  {
+    // --------------------------------------------------------------------------
+    // Stack overflow check
+
+    Label cont;
+    __ add(R11_scratch1, parent_frame_resize, top_frame_size);
+    generate_stack_overflow_check(R11_scratch1, R12_scratch2);
+  }
+
+  // Set up interpreter state registers.
+
+  __ add(R18_locals, R15_esp, Rsize_of_parameters);
+  __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
+  __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache);
+
+  // Set method data pointer.
+  if (ProfileInterpreter) {
+    Label zero_continue;
+    __ ld(R28_mdx, method_(method_data));
+    __ cmpdi(CCR0, R28_mdx, 0);
+    __ beq(CCR0, zero_continue);
+    __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
+    __ bind(zero_continue);
+  }
+
+  if (native_call) {
+    __ li(R14_bcp, 0); // Must initialize.
+  } else {
+    __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method);
+  }
+
+  // Resize parent frame.
+  __ mflr(R12_scratch2);
+  __ neg(parent_frame_resize, parent_frame_resize);
+  __ resize_frame(parent_frame_resize, R11_scratch1);
+  __ std(R12_scratch2, _abi(lr), R1_SP);
+
+  __ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
+  __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
+
+  // Store values.
+  // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
+  // in InterpreterMacroAssembler::call_from_interpreter.
+  __ std(R19_method, _ijava_state_neg(method), R1_SP);
+  __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP);
+  __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP);
+  __ std(R18_locals, _ijava_state_neg(locals), R1_SP);
+
+  // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only
+  // be found in the frame after save_interpreter_state is done. This is always true
+  // for non-top frames. But when a signal occurs, dumping the top frame can go wrong,
+  // because e.g. frame::interpreter_frame_bcp() will not access the correct value
+  // (Enhanced Stack Trace).
+  // The signal handler does not save the interpreter state into the frame.
+  __ li(R0, 0);
+#ifdef ASSERT
+  // Fill remaining slots with constants.
+  __ load_const_optimized(R11_scratch1, 0x5afe);
+  __ load_const_optimized(R12_scratch2, 0xdead);
+#endif
+  // We have to initialize some frame slots for native calls (accessed by GC).
+  if (native_call) {
+    __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP);
+    __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP);
+    if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); }
+  }
+#ifdef ASSERT
+  else {
+    __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP);
+    __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP);
+    __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP);
+  }
+  __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
+#endif
+  __ subf(R12_scratch2, top_frame_size, R1_SP);
+  __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
+
+  // Push top frame.
+  __ push_frame(top_frame_size, R11_scratch1);
+}
+
+// End of helpers
+
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+  if (!TemplateInterpreter::math_entry_available(kind)) {
+    NOT_PRODUCT(__ should_not_reach_here();)
+    return NULL;
+  }
+
+  address entry = __ pc();
+
+  __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
+
+  // Pop c2i arguments (if any) off when we return.
+#ifdef ASSERT
+  __ ld(R9_ARG7, 0, R1_SP);
+  __ ld(R10_ARG8, 0, R21_sender_SP);
+  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
+  __ asm_assert_eq("backlink", 0x545);
+#endif // ASSERT
+  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+
+  if (kind == Interpreter::java_lang_math_sqrt) {
+    __ fsqrt(F1_RET, F1_RET);
+  } else if (kind == Interpreter::java_lang_math_abs) {
+    __ fabs(F1_RET, F1_RET);
+  } else {
+    ShouldNotReachHere();
+  }
+
+  // And we're done.
+  __ blr();
+
+  __ flush();
+
+  return entry;
+}
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+//
+// On entry:
+//   R19_method    - method
+//   R16_thread    - JavaThread*
+//   R15_esp       - intptr_t* sender tos
+//
+//   abstract stack (grows up)
+//     [  IJava (caller of JNI callee)  ]  <-- ASP
+//        ...
+address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
+
+  address entry = __ pc();
+
+  const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // -----------------------------------------------------------------------------
+  // Allocate a new frame that represents the native callee (i2n frame).
+  // This is not a full-blown interpreter frame, but in particular, the
+  // following registers are valid after this:
+  // - R19_method
+  // - R18_local (points to start of argumuments to native function)
+  //
+  //   abstract stack (grows up)
+  //     [  IJava (caller of JNI callee)  ]  <-- ASP
+  //        ...
+
+  const Register signature_handler_fd = R11_scratch1;
+  const Register pending_exception    = R0;
+  const Register result_handler_addr  = R31;
+  const Register native_method_fd     = R11_scratch1;
+  const Register access_flags         = R22_tmp2;
+  const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
+  const Register sync_state           = R12_scratch2;
+  const Register sync_state_addr      = sync_state;   // Address is dead after use.
+  const Register suspend_flags        = R11_scratch1;
+
+  //=============================================================================
+  // Allocate new frame and initialize interpreter state.
+
+  Label exception_return;
+  Label exception_return_sync_check;
+  Label stack_overflow_return;
+
+  // Generate new interpreter state and jump to stack_overflow_return in case of
+  // a stack overflow.
+  //generate_compute_interpreter_state(stack_overflow_return);
+
+  Register size_of_parameters = R22_tmp2;
+
+  generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
+
+  //=============================================================================
+  // Increment invocation counter. On overflow, entry to JNI method
+  // will be compiled.
+  Label invocation_counter_overflow, continue_after_compile;
+  if (inc_counter) {
+    if (synchronized) {
+      // Since at this point in the method invocation the exception handler
+      // would try to exit the monitor of synchronized methods which hasn't
+      // been entered yet, we set the thread local variable
+      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+      // runtime, exception handling i.e. unlock_if_synchronized_method will
+      // check this thread local flag.
+      // This flag has two effects, one is to force an unwind in the topmost
+      // interpreter frame and not perform an unlock while doing so.
+      __ li(R0, 1);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+
+    BIND(continue_after_compile);
+    // Reset the _do_not_unlock_if_synchronized flag.
+    if (synchronized) {
+      __ li(R0, 0);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+  }
+
+  // access_flags = method->access_flags();
+  // Load access flags.
+  assert(access_flags->is_nonvolatile(),
+         "access_flags must be in a non-volatile register");
+  // Type check.
+  assert(4 == sizeof(AccessFlags), "unexpected field size");
+  __ lwz(access_flags, method_(access_flags));
+
+  // We don't want to reload R19_method and access_flags after calls
+  // to some helper functions.
+  assert(R19_method->is_nonvolatile(),
+         "R19_method must be a non-volatile register");
+
+  // Check for synchronized methods. Must happen AFTER invocation counter
+  // check, so method is not locked if counter overflows.
+
+  if (synchronized) {
+    lock_method(access_flags, R11_scratch1, R12_scratch2, true);
+
+    // Update monitor in state.
+    __ ld(R11_scratch1, 0, R1_SP);
+    __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1);
+  }
+
+  // jvmti/jvmpi support
+  __ notify_method_entry();
+
+  //=============================================================================
+  // Get and call the signature handler.
+
+  __ ld(signature_handler_fd, method_(signature_handler));
+  Label call_signature_handler;
+
+  __ cmpdi(CCR0, signature_handler_fd, 0);
+  __ bne(CCR0, call_signature_handler);
+
+  // Method has never been called. Either generate a specialized
+  // handler or point to the slow one.
+  //
+  // Pass parameter 'false' to avoid exception check in call_VM.
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
+
+  // Check for an exception while looking up the target method. If we
+  // incurred one, bail.
+  __ ld(pending_exception, thread_(pending_exception));
+  __ cmpdi(CCR0, pending_exception, 0);
+  __ bne(CCR0, exception_return_sync_check); // Has pending exception.
+
+  // Reload signature handler, it may have been created/assigned in the meanwhile.
+  __ ld(signature_handler_fd, method_(signature_handler));
+  __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
+
+  BIND(call_signature_handler);
+
+  // Before we call the signature handler we push a new frame to
+  // protect the interpreter frame volatile registers when we return
+  // from jni but before we can get back to Java.
+
+  // First set the frame anchor while the SP/FP registers are
+  // convenient and the slow signature handler can use this same frame
+  // anchor.
+
+  // We have a TOP_IJAVA_FRAME here, which belongs to us.
+  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
+
+  // Now the interpreter frame (and its call chain) have been
+  // invalidated and flushed. We are now protected against eager
+  // being enabled in native code. Even if it goes eager the
+  // registers will be reloaded as clean and we will invalidate after
+  // the call so no spurious flush should be possible.
+
+  // Call signature handler and pass locals address.
+  //
+  // Our signature handlers copy required arguments to the C stack
+  // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
+  __ mr(R3_ARG1, R18_locals);
+#if !defined(ABI_ELFv2)
+  __ ld(signature_handler_fd, 0, signature_handler_fd);
+#endif
+
+  __ call_stub(signature_handler_fd);
+
+  // Remove the register parameter varargs slots we allocated in
+  // compute_interpreter_state. SP+16 ends up pointing to the ABI
+  // outgoing argument area.
+  //
+  // Not needed on PPC64.
+  //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
+
+  assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
+  // Save across call to native method.
+  __ mr(result_handler_addr, R3_RET);
+
+  __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
+
+  // Set up fixed parameters and call the native method.
+  // If the method is static, get mirror into R4_ARG2.
+  {
+    Label method_is_not_static;
+    // Access_flags is non-volatile and still, no need to restore it.
+
+    // Restore access flags.
+    __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
+    __ bfalse(CCR0, method_is_not_static);
+
+    // constants = method->constants();
+    __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
+    __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
+    // pool_holder = method->constants()->pool_holder();
+    __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
+          R11_scratch1/*constants*/);
+
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+
+    // mirror = pool_holder->klass_part()->java_mirror();
+    __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
+    // state->_native_mirror = mirror;
+
+    __ ld(R11_scratch1, 0, R1_SP);
+    __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
+    // R4_ARG2 = &state->_oop_temp;
+    __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
+    BIND(method_is_not_static);
+  }
+
+  // At this point, arguments have been copied off the stack into
+  // their JNI positions. Oops are boxed in-place on the stack, with
+  // handles copied to arguments. The result handler address is in a
+  // register.
+
+  // Pass JNIEnv address as first parameter.
+  __ addir(R3_ARG1, thread_(jni_environment));
+
+  // Load the native_method entry before we change the thread state.
+  __ ld(native_method_fd, method_(native_function));
+
+  //=============================================================================
+  // Transition from _thread_in_Java to _thread_in_native. As soon as
+  // we make this change the safepoint code needs to be certain that
+  // the last Java frame we established is good. The pc in that frame
+  // just needs to be near here not an actual return address.
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0, _thread_in_native);
+  __ release();
+
+  // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
+  __ stw(R0, thread_(thread_state));
+
+  if (UseMembar) {
+    __ fence();
+  }
+
+  //=============================================================================
+  // Call the native method. Argument registers must not have been
+  // overwritten since "__ call_stub(signature_handler);" (except for
+  // ARG1 and ARG2 for static methods).
+  __ call_c(native_method_fd);
+
+  __ li(R0, 0);
+  __ ld(R11_scratch1, 0, R1_SP);
+  __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
+  __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
+  __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset
+
+  // Note: C++ interpreter needs the following here:
+  // The frame_manager_lr field, which we use for setting the last
+  // java frame, gets overwritten by the signature handler. Restore
+  // it now.
+  //__ get_PC_trash_LR(R11_scratch1);
+  //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+
+  // Because of GC R19_method may no longer be valid.
+
+  // Block, if necessary, before resuming in _thread_in_Java state.
+  // In order for GC to work, don't clear the last_Java_sp until after
+  // blocking.
+
+  //=============================================================================
+  // Switch thread to "native transition" state before reading the
+  // synchronization state. This additional state is necessary
+  // because reading and testing the synchronization state is not
+  // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
+  // in _thread_in_native state, loads _not_synchronized and is
+  // preempted. VM thread changes sync state to synchronizing and
+  // suspends threads for GC. Thread A is resumed to finish this
+  // native method, but doesn't block here since it didn't see any
+  // synchronization in progress, and escapes.
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0/*thread_state*/, _thread_in_native_trans);
+  __ release();
+  __ stw(R0/*thread_state*/, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+  // Write serialization page so that the VM thread can do a pseudo remote
+  // membar. We use the current thread pointer to calculate a thread
+  // specific offset to write to within the page. This minimizes bus
+  // traffic due to cache line collision.
+  else {
+    __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
+  }
+
+  // Now before we return to java we must look for a current safepoint
+  // (a new safepoint can not start since we entered native_trans).
+  // We must check here because a current safepoint could be modifying
+  // the callers registers right this moment.
+
+  // Acquire isn't strictly necessary here because of the fence, but
+  // sync_state is declared to be volatile, so we do it anyway
+  // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
+  int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
+
+  // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
+  __ lwz(sync_state, sync_state_offs, sync_state_addr);
+
+  // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
+  __ lwz(suspend_flags, thread_(suspend_flags));
+
+  Label sync_check_done;
+  Label do_safepoint;
+  // No synchronization in progress nor yet synchronized.
+  __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+  // Not suspended.
+  __ cmpwi(CCR1, suspend_flags, 0);
+
+  __ bne(CCR0, do_safepoint);
+  __ beq(CCR1, sync_check_done);
+  __ bind(do_safepoint);
+  __ isync();
+  // Block. We do the call directly and leave the current
+  // last_Java_frame setup undisturbed. We must save any possible
+  // native result across the call. No oop is present.
+
+  __ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+  __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+#else
+  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+#endif
+
+  __ bind(sync_check_done);
+
+  //=============================================================================
+  // <<<<<< Back in Interpreter Frame >>>>>
+
+  // We are in thread_in_native_trans here and back in the normal
+  // interpreter frame. We don't have to do anything special about
+  // safepoints and we can switch to Java mode anytime we are ready.
+
+  // Note: frame::interpreter_frame_result has a dependency on how the
+  // method result is saved across the call to post_method_exit. For
+  // native methods it assumes that the non-FPU/non-void result is
+  // saved in _native_lresult and a FPU result in _native_fresult. If
+  // this changes then the interpreter_frame_result implementation
+  // will need to be updated too.
+
+  // On PPC64, we have stored the result directly after the native call.
+
+  //=============================================================================
+  // Back in Java
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0/*thread_state*/, _thread_in_Java);
+  __ release();
+  __ stw(R0/*thread_state*/, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+
+  __ reset_last_Java_frame();
+
+  // Jvmdi/jvmpi support. Whether we've got an exception pending or
+  // not, and whether unlocking throws an exception or not, we notify
+  // on native method exit. If we do have an exception, we'll end up
+  // in the caller's context to handle it, so if we don't do the
+  // notify here, we'll drop it on the floor.
+  __ notify_method_exit(true/*native method*/,
+                        ilgl /*illegal state (not used for native methods)*/,
+                        InterpreterMacroAssembler::NotifyJVMTI,
+                        false /*check_exceptions*/);
+
+  //=============================================================================
+  // Handle exceptions
+
+  if (synchronized) {
+    // Don't check for exceptions since we're still in the i2n frame. Do that
+    // manually afterwards.
+    unlock_method(false);
+  }
+
+  // Reset active handles after returning from native.
+  // thread->active_handles()->clear();
+  __ ld(active_handles, thread_(active_handles));
+  // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
+  __ li(R0, 0);
+  __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
+
+  Label exception_return_sync_check_already_unlocked;
+  __ ld(R0/*pending_exception*/, thread_(pending_exception));
+  __ cmpdi(CCR0, R0/*pending_exception*/, 0);
+  __ bne(CCR0, exception_return_sync_check_already_unlocked);
+
+  //-----------------------------------------------------------------------------
+  // No exception pending.
+
+  // Move native method result back into proper registers and return.
+  // Invoke result handler (may unbox/promote).
+  __ ld(R11_scratch1, 0, R1_SP);
+  __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
+  __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
+  __ call_stub(result_handler_addr);
+
+  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
+
+  // Must use the return pc which was loaded from the caller's frame
+  // as the VM uses return-pc-patching for deoptimization.
+  __ mtlr(R0);
+  __ blr();
+
+  //-----------------------------------------------------------------------------
+  // An exception is pending. We call into the runtime only if the
+  // caller was not interpreted. If it was interpreted the
+  // interpreter will do the correct thing. If it isn't interpreted
+  // (call stub/compiled code) we will change our return and continue.
+
+  BIND(exception_return_sync_check);
+
+  if (synchronized) {
+    // Don't check for exceptions since we're still in the i2n frame. Do that
+    // manually afterwards.
+    unlock_method(false);
+  }
+  BIND(exception_return_sync_check_already_unlocked);
+
+  const Register return_pc = R31;
+
+  __ ld(return_pc, 0, R1_SP);
+  __ ld(return_pc, _abi(lr), return_pc);
+
+  // Get the address of the exception handler.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                  R16_thread,
+                  return_pc /* return pc */);
+  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
+
+  // Load the PC of the the exception handler into LR.
+  __ mtlr(R3_RET);
+
+  // Load exception into R3_ARG1 and clear pending exception in thread.
+  __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
+  __ li(R4_ARG2, 0);
+  __ std(R4_ARG2, thread_(pending_exception));
+
+  // Load the original return pc into R4_ARG2.
+  __ mr(R4_ARG2/*issuing_pc*/, return_pc);
+
+  // Return to exception handler.
+  __ blr();
+
+  //=============================================================================
+  // Counter overflow.
+
+  if (inc_counter) {
+    // Handle invocation counter overflow.
+    __ bind(invocation_counter_overflow);
+
+    generate_counter_overflow(continue_after_compile);
+  }
+
+  return entry;
+}
+
+// Generic interpreted method entry to (asm) interpreter.
+//
+address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
+  bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+  address entry = __ pc();
+  // Generate the code to allocate the interpreter stack frame.
+  Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
+           Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
+
+  generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
+
+  // --------------------------------------------------------------------------
+  // Zero out non-parameter locals.
+  // Note: *Always* zero out non-parameter locals as Sparc does. It's not
+  // worth to ask the flag, just do it.
+  Register Rslot_addr = R6_ARG4,
+           Rnum       = R7_ARG5;
+  Label Lno_locals, Lzero_loop;
+
+  // Set up the zeroing loop.
+  __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
+  __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
+  __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
+  __ beq(CCR0, Lno_locals);
+  __ li(R0, 0);
+  __ mtctr(Rnum);
+
+  // The zero locals loop.
+  __ bind(Lzero_loop);
+  __ std(R0, 0, Rslot_addr);
+  __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize);
+  __ bdnz(Lzero_loop);
+
+  __ bind(Lno_locals);
+
+  // --------------------------------------------------------------------------
+  // Counter increment and overflow check.
+  Label invocation_counter_overflow,
+        profile_method,
+        profile_method_continue;
+  if (inc_counter || ProfileInterpreter) {
+
+    Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1;
+    if (synchronized) {
+      // Since at this point in the method invocation the exception handler
+      // would try to exit the monitor of synchronized methods which hasn't
+      // been entered yet, we set the thread local variable
+      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+      // runtime, exception handling i.e. unlock_if_synchronized_method will
+      // check this thread local flag.
+      // This flag has two effects, one is to force an unwind in the topmost
+      // interpreter frame and not perform an unlock while doing so.
+      __ li(R0, 1);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+
+    // Argument and return type profiling.
+    __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
+
+    // Increment invocation counter and check for overflow.
+    if (inc_counter) {
+      generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
+    }
+
+    __ bind(profile_method_continue);
+
+    // Reset the _do_not_unlock_if_synchronized flag.
+    if (synchronized) {
+      __ li(R0, 0);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+  }
+
+  // --------------------------------------------------------------------------
+  // Locking of synchronized methods. Must happen AFTER invocation_counter
+  // check and stack overflow check, so method is not locked if overflows.
+  if (synchronized) {
+    lock_method(R3_ARG1, R4_ARG2, R5_ARG3);
+  }
+#ifdef ASSERT
+  else {
+    Label Lok;
+    __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
+    __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED);
+    __ asm_assert_eq("method needs synchronization", 0x8521);
+    __ bind(Lok);
+  }
+#endif // ASSERT
+
+  __ verify_thread();
+
+  // --------------------------------------------------------------------------
+  // JVMTI support
+  __ notify_method_entry();
+
+  // --------------------------------------------------------------------------
+  // Start executing instructions.
+  __ dispatch_next(vtos);
+
+  // --------------------------------------------------------------------------
+  // Out of line counter overflow and MDO creation code.
+  if (ProfileInterpreter) {
+    // We have decided to profile this method in the interpreter.
+    __ bind(profile_method);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+    __ set_method_data_pointer_for_bcp();
+    __ b(profile_method_continue);
+  }
+
+  if (inc_counter) {
+    // Handle invocation counter overflow.
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(profile_method_continue);
+  }
+  return entry;
+}
+
+// CRC32 Intrinsics.
+//
+// Contract on scratch and work registers.
+// =======================================
+//
+// On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers.
+// You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set.
+// You can't rely on these registers across calls.
+//
+// The generators for CRC32_update and for CRC32_updateBytes use the
+// scratch/work register set internally, passing the work registers
+// as arguments to the MacroAssembler emitters as required.
+//
+// R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments.
+// Their contents is not constant but may change according to the requirements
+// of the emitted code.
+//
+// All other registers from the scratch/work register set are used "internally"
+// and contain garbage (i.e. unpredictable values) once blr() is reached.
+// Basically, only R3_RET contains a defined value which is the function result.
+//
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address start = __ pc();  // Remember stub start address (is rtn value).
+    Label slow_path;
+
+    // Safepoint check
+    const Register sync_state = R11_scratch1;
+    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
+    __ lwz(sync_state, sync_state_offs, sync_state);
+    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+    __ bne(CCR0, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we not even call stub code (we generate the code inline)
+    // and there is no safepoint on this path.
+
+    // Load java parameters.
+    // R15_esp is callers operand stack pointer, i.e. it points to the parameters.
+    const Register argP    = R15_esp;
+    const Register crc     = R3_ARG1;  // crc value
+    const Register data    = R4_ARG2;  // address of java byte value (kernel_crc32 needs address)
+    const Register dataLen = R5_ARG3;  // source data len (1 byte). Not used because calling the single-byte emitter.
+    const Register table   = R6_ARG4;  // address of crc32 table
+    const Register tmp     = dataLen;  // Reuse unused len register to show we don't actually need a separate tmp here.
+
+    BLOCK_COMMENT("CRC32_update {");
+
+    // Arguments are reversed on java expression stack
+#ifdef VM_LITTLE_ENDIAN
+    __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
+                                       // Being passed as an int, the single byte is at offset +0.
+#else
+    __ addi(data, argP, 3+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
+                                       // Being passed from java as an int, the single byte is at offset +3.
+#endif
+    __ lwz(crc,  2*wordSize, argP);    // Current crc state, zero extend to 64 bit to have a clean register.
+
+    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
+    __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
+
+    // Restore caller sp for c2i case and return.
+    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+    __ blr();
+
+    // Generate a vanilla native entry as the slow path.
+    BLOCK_COMMENT("} CRC32_update");
+    BIND(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
+    return start;
+  }
+
+  return NULL;
+}
+
+// CRC32 Intrinsics.
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(     int crc, byte[] b,  int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address start = __ pc();  // Remember stub start address (is rtn value).
+    Label slow_path;
+
+    // Safepoint check
+    const Register sync_state = R11_scratch1;
+    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
+    __ lwz(sync_state, sync_state_offs, sync_state);
+    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+    __ bne(CCR0, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we not even call stub code (we generate the code inline)
+    // and there is no safepoint on this path.
+
+    // Load parameters.
+    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
+    const Register argP    = R15_esp;
+    const Register crc     = R3_ARG1;  // crc value
+    const Register data    = R4_ARG2;  // address of java byte array
+    const Register dataLen = R5_ARG3;  // source data len
+    const Register table   = R6_ARG4;  // address of crc32 table
+
+    const Register t0      = R9;       // scratch registers for crc calculation
+    const Register t1      = R10;
+    const Register t2      = R11;
+    const Register t3      = R12;
+
+    const Register tc0     = R2;       // registers to hold pre-calculated column addresses
+    const Register tc1     = R7;
+    const Register tc2     = R8;
+    const Register tc3     = table;    // table address is reconstructed at the end of kernel_crc32_* emitters
+
+    const Register tmp     = t0;       // Only used very locally to calculate byte buffer address.
+
+    // Arguments are reversed on java expression stack.
+    // Calculate address of start element.
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct".
+      BLOCK_COMMENT("CRC32_updateByteBuffer {");
+      // crc     @ (SP + 5W) (32bit)
+      // buf     @ (SP + 3W) (64bit ptr to long array)
+      // off     @ (SP + 2W) (32bit)
+      // dataLen @ (SP + 1W) (32bit)
+      // data = buf + off
+      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
+      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
+      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
+      __ lwz( crc,     5*wordSize, argP);  // current crc state
+      __ add( data, data, tmp);            // Add byte buffer offset.
+    } else {                                                         // Used for "updateBytes update".
+      BLOCK_COMMENT("CRC32_updateBytes {");
+      // crc     @ (SP + 4W) (32bit)
+      // buf     @ (SP + 3W) (64bit ptr to byte array)
+      // off     @ (SP + 2W) (32bit)
+      // dataLen @ (SP + 1W) (32bit)
+      // data = buf + off + base_offset
+      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
+      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
+      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
+      __ add( data, data, tmp);            // add byte buffer offset
+      __ lwz( crc,     4*wordSize, argP);  // current crc state
+      __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
+    }
+
+    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
+
+    // Performance measurements show the 1word and 2word variants to be almost equivalent,
+    // with very light advantages for the 1word variant. We chose the 1word variant for
+    // code compactness.
+    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
+
+    // Restore caller sp for c2i case and return.
+    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+    __ blr();
+
+    // Generate a vanilla native entry as the slow path.
+    BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
+    BIND(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
+    return start;
+  }
+
+  return NULL;
+}
+
+// =============================================================================
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() {
+  Register Rexception    = R17_tos,
+           Rcontinuation = R3_RET;
+
+  // --------------------------------------------------------------------------
+  // Entry point if an method returns with a pending exception (rethrow).
+  Interpreter::_rethrow_exception_entry = __ pc();
+  {
+    __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
+    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+
+    // Compiled code destroys templateTableBase, reload.
+    __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
+  }
+
+  // Entry point if a interpreted method throws an exception (throw).
+  Interpreter::_throw_exception_entry = __ pc();
+  {
+    __ mr(Rexception, R3_RET);
+
+    __ verify_thread();
+    __ verify_oop(Rexception);
+
+    // Expression stack must be empty before entering the VM in case of an exception.
+    __ empty_expression_stack();
+    // Find exception handler address and preserve exception oop.
+    // Call C routine to find handler and jump to it.
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception);
+    __ mtctr(Rcontinuation);
+    // Push exception for exception handler bytecodes.
+    __ push_ptr(Rexception);
+
+    // Jump to exception handler (may be remove activation entry!).
+    __ bctr();
+  }
+
+  // If the exception is not handled in the current frame the frame is
+  // removed and the exception is rethrown (i.e. exception
+  // continuation is _rethrow_exception).
+  //
+  // Note: At this point the bci is still the bxi for the instruction
+  // which caused the exception and the expression stack is
+  // empty. Thus, for any VM calls at this point, GC will find a legal
+  // oop map (with empty expression stack).
+
+  // In current activation
+  // tos: exception
+  // bcp: exception bcp
+
+  // --------------------------------------------------------------------------
+  // JVMTI PopFrame support
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  {
+    // Set the popframe_processing bit in popframe_condition indicating that we are
+    // currently handling popframe, so that call_VMs that may happen later do not
+    // trigger new popframe handling cycles.
+    __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+    __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit);
+    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+    // Empty the expression stack, as in normal exception handling.
+    __ empty_expression_stack();
+    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
+
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label Lcaller_not_deoptimized;
+    Register return_pc = R3_ARG1;
+    __ ld(return_pc, 0, R1_SP);
+    __ ld(return_pc, _abi(lr), return_pc);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
+    __ cmpdi(CCR0, R3_RET, 0);
+    __ bne(CCR0, Lcaller_not_deoptimized);
+
+    // The deoptimized case.
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method);
+    __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2);
+    __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize);
+    __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize);
+    __ subf(R5_ARG3, R4_ARG2, R5_ARG3);
+    // Save these arguments.
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3);
+
+    // Inform deoptimization that it is responsible for restoring these arguments.
+    __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit);
+    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+    // Return from the current method into the deoptimization blob. Will eventually
+    // end up in the deopt interpeter entry, deoptimization prepared everything that
+    // we will reexecute the call that called us.
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2);
+    __ mtlr(return_pc);
+    __ blr();
+
+    // The non-deoptimized case.
+    __ bind(Lcaller_not_deoptimized);
+
+    // Clear the popframe condition flag.
+    __ li(R0, 0);
+    __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+    // Get out of the current method and re-execute the call that called us.
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
+    __ restore_interpreter_state(R11_scratch1);
+    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+    if (ProfileInterpreter) {
+      __ set_method_data_pointer_for_bcp();
+      __ ld(R11_scratch1, 0, R1_SP);
+      __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
+    }
+#if INCLUDE_JVMTI
+    Label L_done;
+
+    __ lbz(R11_scratch1, 0, R14_bcp);
+    __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
+    __ bne(CCR0, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+    __ ld(R4_ARG2, 0, R18_locals);
+    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
+    __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
+    __ cmpdi(CCR0, R4_ARG2, 0);
+    __ beq(CCR0, L_done);
+    __ std(R4_ARG2, wordSize, R15_esp);
+    __ bind(L_done);
+#endif // INCLUDE_JVMTI
+    __ dispatch_next(vtos);
+  }
+  // end of JVMTI PopFrame support
+
+  // --------------------------------------------------------------------------
+  // Remove activation exception entry.
+  // This is jumped to if an interpreted method can't handle an exception itself
+  // (we come from the throw/rethrow exception entry above). We're going to call
+  // into the VM to find the exception handler in the caller, pop the current
+  // frame and return the handler we calculated.
+  Interpreter::_remove_activation_entry = __ pc();
+  {
+    __ pop_ptr(Rexception);
+    __ verify_thread();
+    __ verify_oop(Rexception);
+    __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread);
+
+    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true);
+    __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false);
+
+    __ get_vm_result(Rexception);
+
+    // We are done with this activation frame; find out where to go next.
+    // The continuation point will be an exception handler, which expects
+    // the following registers set up:
+    //
+    // RET:  exception oop
+    // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled.
+
+    Register return_pc = R31; // Needs to survive the runtime call.
+    __ ld(return_pc, 0, R1_SP);
+    __ ld(return_pc, _abi(lr), return_pc);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc);
+
+    // Remove the current activation.
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
+
+    __ mr(R4_ARG2, return_pc);
+    __ mtlr(R3_RET);
+    __ mr(R3_RET, Rexception);
+    __ blr();
+  }
+}
+
+// JVMTI ForceEarlyReturn support.
+// Returns "in the middle" of a method with a "fake" return value.
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+
+  Register Rscratch1 = R11_scratch1,
+           Rscratch2 = R12_scratch2;
+
+  address entry = __ pc();
+  __ empty_expression_stack();
+
+  __ load_earlyret_value(state, Rscratch1);
+
+  __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
+  // Clear the earlyret state.
+  __ li(R0, 0);
+  __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1);
+
+  __ remove_activation(state, false, false);
+  // Copied from TemplateTable::_return.
+  // Restoration of lr done by remove_activation.
+  switch (state) {
+    case ltos:
+    case btos:
+    case ctos:
+    case stos:
+    case atos:
+    case itos: __ mr(R3_RET, R17_tos); break;
+    case ftos:
+    case dtos: __ fmr(F1_RET, F15_ftos); break;
+    case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
+               // to get visible before the reference to the object gets stored anywhere.
+               __ membar(Assembler::StoreStore); break;
+    default  : ShouldNotReachHere();
+  }
+  __ blr();
+
+  return entry;
+} // end of ForceEarlyReturn support
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+                                                         address& bep,
+                                                         address& cep,
+                                                         address& sep,
+                                                         address& aep,
+                                                         address& iep,
+                                                         address& lep,
+                                                         address& fep,
+                                                         address& dep,
+                                                         address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+
+  aep = __ pc();  __ push_ptr();  __ b(L);
+  fep = __ pc();  __ push_f();    __ b(L);
+  dep = __ pc();  __ push_d();    __ b(L);
+  lep = __ pc();  __ push_l();    __ b(L);
+  __ align(32, 12, 24); // align L
+  bep = cep = sep =
+  iep = __ pc();  __ push_i();
+  vep = __ pc();
+  __ bind(L);
+  generate_and_dispatch(t);
+}
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+  : TemplateInterpreterGenerator(code) {
+  generate_all(); // Down here so it can be "virtual".
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  //__ flush_bundle();
+  address entry = __ pc();
+
+  const char *bname = NULL;
+  uint tsize = 0;
+  switch(state) {
+  case ftos:
+    bname = "trace_code_ftos {";
+    tsize = 2;
+    break;
+  case btos:
+    bname = "trace_code_btos {";
+    tsize = 2;
+    break;
+  case ctos:
+    bname = "trace_code_ctos {";
+    tsize = 2;
+    break;
+  case stos:
+    bname = "trace_code_stos {";
+    tsize = 2;
+    break;
+  case itos:
+    bname = "trace_code_itos {";
+    tsize = 2;
+    break;
+  case ltos:
+    bname = "trace_code_ltos {";
+    tsize = 3;
+    break;
+  case atos:
+    bname = "trace_code_atos {";
+    tsize = 2;
+    break;
+  case vtos:
+    // Note: In case of vtos, the topmost of stack value could be a int or doubl
+    // In case of a double (2 slots) we won't see the 2nd stack value.
+    // Maybe we simply should print the topmost 3 stack slots to cope with the problem.
+    bname = "trace_code_vtos {";
+    tsize = 2;
+
+    break;
+  case dtos:
+    bname = "trace_code_dtos {";
+    tsize = 3;
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+  BLOCK_COMMENT(bname);
+
+  // Support short-cut for TraceBytecodesAt.
+  // Don't call into the VM if we don't want to trace to speed up things.
+  Label Lskip_vm_call;
+  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
+    int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true);
+    int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
+    __ ld(R11_scratch1, offs1, R11_scratch1);
+    __ lwa(R12_scratch2, offs2, R12_scratch2);
+    __ cmpd(CCR0, R12_scratch2, R11_scratch1);
+    __ blt(CCR0, Lskip_vm_call);
+  }
+
+  __ push(state);
+  // Load 2 topmost expression stack values.
+  __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
+  __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
+  __ mflr(R31);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
+  __ mtlr(R31);
+  __ pop(state);
+
+  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
+    __ bind(Lskip_vm_call);
+  }
+  __ blr();
+  BLOCK_COMMENT("} trace_code");
+  return entry;
+}
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
+  __ lwz(R12_scratch2, offs, R11_scratch1);
+  __ addi(R12_scratch2, R12_scratch2, 1);
+  __ stw(R12_scratch2, offs, R11_scratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
+  __ lwz(R12_scratch2, offs, R11_scratch1);
+  __ addi(R12_scratch2, R12_scratch2, 1);
+  __ stw(R12_scratch2, offs, R11_scratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
+  const Register addr = R11_scratch1,
+                 tmp  = R12_scratch2;
+  // Get index, shift out old bytecode, bring in new bytecode, and store it.
+  // _index = (_index >> log2_number_of_codes) |
+  //          (bytecode << log2_number_of_codes);
+  int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
+  __ lwz(tmp, offs1, addr);
+  __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
+  __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
+  __ stw(tmp, offs1, addr);
+
+  // Bump bucket contents.
+  // _counters[_index] ++;
+  int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
+  __ sldi(tmp, tmp, LogBytesPerInt);
+  __ add(addr, tmp, addr);
+  __ lwz(tmp, offs2, addr);
+  __ addi(tmp, tmp, 1);
+  __ stw(tmp, offs2, addr);
+}
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+
+  assert(Interpreter::trace_code(t->tos_in()) != NULL,
+         "entry must have been generated");
+
+  // Note: we destroy LR here.
+  __ bl(Interpreter::trace_code(t->tos_in()));
+}
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  Label L;
+  int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true);
+  int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
+  __ ld(R11_scratch1, offs1, R11_scratch1);
+  __ lwa(R12_scratch2, offs2, R12_scratch2);
+  __ cmpd(CCR0, R12_scratch2, R11_scratch1);
+  __ bne(CCR0, L);
+  __ illtrap();
+  __ bind(L);
+}
+
+#endif // !PRODUCT
+#endif // !CC_INTERP
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, 2015 SAP AG. All rights reserved.
+ * Copyright (c) 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,1389 +24,38 @@
  */
 
 #include "precompiled.hpp"
-#ifndef CC_INTERP
-#include "asm/macroAssembler.inline.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
+#include "oops/constMethod.hpp"
 #include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
-#undef __
-#define __ _masm->
 
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) /* nothing */
-#else
-#define BLOCK_COMMENT(str) __ block_comment(str)
-#endif
-
-#define BIND(label)        __ bind(label); BLOCK_COMMENT(#label ":")
-
-//-----------------------------------------------------------------------------
-
-// Actually we should never reach here since we do stack overflow checks before pushing any frame.
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-  __ unimplemented("generate_StackOverflowError_handler");
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
-  address entry = __ pc();
-  __ empty_expression_stack();
-  __ load_const_optimized(R4_ARG2, (address) name);
-  // Index is in R17_tos.
-  __ mr(R5_ARG3, R17_tos);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
-  return entry;
-}
-
-#if 0
-// Call special ClassCastException constructor taking object to cast
-// and target class as arguments.
-address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
-  address entry = __ pc();
-
-  // Expression stack must be empty before entering the VM if an
-  // exception happened.
-  __ empty_expression_stack();
-
-  // Thread will be loaded to R3_ARG1.
-  // Target class oop is in register R5_ARG3 by convention!
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
-  // Above call must not return here since exception pending.
-  DEBUG_ONLY(__ should_not_reach_here();)
-  return entry;
-}
-#endif
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-  // Expression stack must be empty before entering the VM if an
-  // exception happened.
-  __ empty_expression_stack();
-
-  // Load exception object.
-  // Thread will be loaded to R3_ARG1.
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos);
-#ifdef ASSERT
-  // Above call must not return here since exception pending.
-  __ should_not_reach_here();
-#endif
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
-  address entry = __ pc();
-  //__ untested("generate_exception_handler_common");
-  Register Rexception = R17_tos;
-
-  // Expression stack must be empty before entering the VM if an exception happened.
-  __ empty_expression_stack();
-
-  __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
-  if (pass_oop) {
-    __ mr(R5_ARG3, Rexception);
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
-  } else {
-    __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
-  }
-
-  // Throw exception.
-  __ mr(R3_ARG1, Rexception);
-  __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2);
-  __ mtctr(R11_scratch1);
-  __ bctr();
-
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  __ unimplemented("generate_continuation_for");
-  return entry;
-}
-
-// This entry is returned to when a call returns to the interpreter.
-// When we arrive here, we expect that the callee stack frame is already popped.
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  // Move the value out of the return register back to the TOS cache of current frame.
-  switch (state) {
-    case ltos:
-    case btos:
-    case ctos:
-    case stos:
-    case atos:
-    case itos: __ mr(R17_tos, R3_RET); break;   // RET -> TOS cache
-    case ftos:
-    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
-    case vtos: break;                           // Nothing to do, this was a void return.
-    default  : ShouldNotReachHere();
-  }
-
-  __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
-  __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
-  __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-
-  // Compiled code destroys templateTableBase, reload.
-  __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
-
-  if (state == atos) {
-    __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
-  }
-
-  const Register cache = R11_scratch1;
-  const Register size  = R12_scratch2;
-  __ get_cache_and_index_at_bcp(cache, 1, index_size);
-
-  // Get least significant byte of 64 bit value:
-#if defined(VM_LITTLE_ENDIAN)
-  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
-#else
-  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
-#endif
-  __ sldi(size, size, Interpreter::logStackElementSize);
-  __ add(R15_esp, R15_esp, size);
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-  // If state != vtos, we're returning from a native method, which put it's result
-  // into the result register. So move the value out of the return register back
-  // to the TOS cache of current frame.
-
-  switch (state) {
-    case ltos:
-    case btos:
-    case ctos:
-    case stos:
-    case atos:
-    case itos: __ mr(R17_tos, R3_RET); break;   // GR_RET -> TOS cache
-    case ftos:
-    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
-    case vtos: break;                           // Nothing to do, this was a void return.
-    default  : ShouldNotReachHere();
-  }
-
-  // Load LcpoolCache @@@ should be already set!
-  __ get_constant_pool_cache(R27_constPoolCache);
-
-  // Handle a pending exception, fall through if none.
-  __ check_and_forward_exception(R11_scratch1, R12_scratch2);
-
-  // Start executing bytecodes.
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-// A result handler converts the native result into java format.
-// Use the shared code between c++ and template interpreter.
-address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  return AbstractInterpreterGenerator::generate_result_handler_for(type);
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
-  address entry = __ pc();
-
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-
-  return entry;
-}
-
-// Helpers for commoning out cases in the various type of method entries.
-
-// Increment invocation count & check for overflow.
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test.
-//
-void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
-  Register Rscratch1   = R11_scratch1;
-  Register Rscratch2   = R12_scratch2;
-  Register R3_counters = R3_ARG1;
-  Label done;
-
-  if (TieredCompilation) {
-    const int increment = InvocationCounter::count_increment;
-    const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      const Register Rmdo = Rscratch1;
-      // If no method data exists, go to profile_continue.
-      __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
-      __ cmpdi(CCR0, Rmdo, 0);
-      __ beq(CCR0, no_mdo);
-
-      // Increment backedge counter in the MDO.
-      const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
-      __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
-      __ addi(Rscratch2, Rscratch2, increment);
-      __ stw(Rscratch2, mdo_bc_offs, Rmdo);
-      __ load_const_optimized(Rscratch1, mask, R0);
-      __ and_(Rscratch1, Rscratch2, Rscratch1);
-      __ bne(CCR0, done);
-      __ b(*overflow);
-    }
-
-    // Increment counter in MethodCounters*.
-    const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
-    __ bind(no_mdo);
-    __ get_method_counters(R19_method, R3_counters, done);
-    __ lwz(Rscratch2, mo_bc_offs, R3_counters);
-    __ addi(Rscratch2, Rscratch2, increment);
-    __ stw(Rscratch2, mo_bc_offs, R3_counters);
-    __ load_const_optimized(Rscratch1, mask, R0);
-    __ and_(Rscratch1, Rscratch2, Rscratch1);
-    __ beq(CCR0, *overflow);
-
-    __ bind(done);
-
-  } else {
-
-    // Update standard invocation counters.
-    Register Rsum_ivc_bec = R4_ARG2;
-    __ get_method_counters(R19_method, R3_counters, done);
-    __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
-    // Increment interpreter invocation counter.
-    if (ProfileInterpreter) {  // %%% Merge this into methodDataOop.
-      __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
-      __ addi(R12_scratch2, R12_scratch2, 1);
-      __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
-    }
-    // Check if we must create a method data obj.
-    if (ProfileInterpreter && profile_method != NULL) {
-      const Register profile_limit = Rscratch1;
-      int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true);
-      __ lwz(profile_limit, pl_offs, profile_limit);
-      // Test to see if we should create a method data oop.
-      __ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
-      __ blt(CCR0, *profile_method_continue);
-      // If no method data exists, go to profile_method.
-      __ test_method_data_pointer(*profile_method);
-    }
-    // Finally check for counter overflow.
-    if (overflow) {
-      const Register invocation_limit = Rscratch1;
-      int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true);
-      __ lwz(invocation_limit, il_offs, invocation_limit);
-      assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size");
-      __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
-      __ bge(CCR0, *overflow);
-    }
-
-    __ bind(done);
-  }
-}
-
-// Generate code to initiate compilation on invocation counter overflow.
-void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
-  // Generate code to initiate compilation on the counter overflow.
-
-  // InterpreterRuntime::frequency_counter_overflow takes one arguments,
-  // which indicates if the counter overflow occurs at a backwards branch (NULL bcp)
-  // We pass zero in.
-  // The call returns the address of the verified entry point for the method or NULL
-  // if the compilation did not complete (either went background or bailed out).
-  //
-  // Unlike the C++ interpreter above: Check exceptions!
-  // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
-  // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
-
-  __ li(R4_ARG2, 0);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
-
-  // Returns verified_entry_point or NULL.
-  // We ignore it in any case.
-  __ b(continue_entry);
-}
-
-void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
-  assert_different_registers(Rmem_frame_size, Rscratch1);
-  __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1);
-}
-
-void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
-  __ unlock_object(R26_monitor, check_exceptions);
-}
-
-// Lock the current method, interpreter register window must be set up!
-void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
-  const Register Robj_to_lock = Rscratch2;
-
-  {
-    if (!flags_preloaded) {
-      __ lwz(Rflags, method_(access_flags));
-    }
-
-#ifdef ASSERT
-    // Check if methods needs synchronization.
-    {
-      Label Lok;
-      __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
-      __ btrue(CCR0,Lok);
-      __ stop("method doesn't need synchronization");
-      __ bind(Lok);
-    }
-#endif // ASSERT
-  }
-
-  // Get synchronization object to Rscratch2.
-  {
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    Label Lstatic;
-    Label Ldone;
-
-    __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
-    __ btrue(CCR0, Lstatic);
-
-    // Non-static case: load receiver obj from stack and we're done.
-    __ ld(Robj_to_lock, R18_locals);
-    __ b(Ldone);
-
-    __ bind(Lstatic); // Static case: Lock the java mirror
-    __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method);
-    __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock);
-    __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock);
-    __ ld(Robj_to_lock, mirror_offset, Robj_to_lock);
-
-    __ bind(Ldone);
-    __ verify_oop(Robj_to_lock);
-  }
-
-  // Got the oop to lock => execute!
-  __ add_monitor_to_stack(true, Rscratch1, R0);
-
-  __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
-  __ lock_object(R26_monitor, Robj_to_lock);
-}
-
-// Generate a fixed interpreter frame for pure interpreter
-// and I2N native transition frames.
-//
-// Before (stack grows downwards):
-//
-//         |  ...         |
-//         |------------- |
-//         |  java arg0   |
-//         |  ...         |
-//         |  java argn   |
-//         |              |   <-   R15_esp
-//         |              |
-//         |--------------|
-//         | abi_112      |
-//         |              |   <-   R1_SP
-//         |==============|
-//
-//
-// After:
-//
-//         |  ...         |
-//         |  java arg0   |<-   R18_locals
-//         |  ...         |
-//         |  java argn   |
-//         |--------------|
-//         |              |
-//         |  java locals |
-//         |              |
-//         |--------------|
-//         |  abi_48      |
-//         |==============|
-//         |              |
-//         |   istate     |
-//         |              |
-//         |--------------|
-//         |   monitor    |<-   R26_monitor
-//         |--------------|
-//         |              |<-   R15_esp
-//         | expression   |
-//         | stack        |
-//         |              |
-//         |--------------|
-//         |              |
-//         | abi_112      |<-   R1_SP
-//         |==============|
-//
-// The top most frame needs an abi space of 112 bytes. This space is needed,
-// since we call to c. The c function may spill their arguments to the caller
-// frame. When we call to java, we don't need these spill slots. In order to save
-// space on the stack, we resize the caller. However, java local reside in
-// the caller frame and the frame has to be increased. The frame_size for the
-// current frame was calculated based on max_stack as size for the expression
-// stack. At the call, just a part of the expression stack might be used.
-// We don't want to waste this space and cut the frame back accordingly.
-// The resulting amount for resizing is calculated as follows:
-// resize =   (number_of_locals - number_of_arguments) * slot_size
-//          + (R1_SP - R15_esp) + 48
-//
-// The size for the callee frame is calculated:
-// framesize = 112 + max_stack + monitor + state_size
-//
-// maxstack:   Max number of slots on the expression stack, loaded from the method.
-// monitor:    We statically reserve room for one monitor object.
-// state_size: We save the current state of the interpreter to this area.
-//
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) {
-  Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes.
-           top_frame_size      = R7_ARG5,
-           Rconst_method       = R8_ARG6;
-
-  assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size);
-
-  __ ld(Rconst_method, method_(const));
-  __ lhz(Rsize_of_parameters /* number of params */,
-         in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method);
-  if (native_call) {
-    // If we're calling a native method, we reserve space for the worst-case signature
-    // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2).
-    // We add two slots to the parameter_count, one for the jni
-    // environment and one for a possible native mirror.
-    Label skip_native_calculate_max_stack;
-    __ addi(top_frame_size, Rsize_of_parameters, 2);
-    __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters);
-    __ bge(CCR0, skip_native_calculate_max_stack);
-    __ li(top_frame_size, Argument::n_register_parameters);
-    __ bind(skip_native_calculate_max_stack);
-    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
-    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
-    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
-    assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters.
-  } else {
-    __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method);
-    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
-    __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize);
-    __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method);
-    __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0
-    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
-    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
-    __ add(parent_frame_resize, parent_frame_resize, R11_scratch1);
-  }
-
-  // Compute top frame size.
-  __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size);
-
-  // Cut back area between esp and max_stack.
-  __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize);
-
-  __ round_to(top_frame_size, frame::alignment_in_bytes);
-  __ round_to(parent_frame_resize, frame::alignment_in_bytes);
-  // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size.
-  // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48.
-
-  {
-    // --------------------------------------------------------------------------
-    // Stack overflow check
-
-    Label cont;
-    __ add(R11_scratch1, parent_frame_resize, top_frame_size);
-    generate_stack_overflow_check(R11_scratch1, R12_scratch2);
-  }
-
-  // Set up interpreter state registers.
-
-  __ add(R18_locals, R15_esp, Rsize_of_parameters);
-  __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
-  __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache);
-
-  // Set method data pointer.
-  if (ProfileInterpreter) {
-    Label zero_continue;
-    __ ld(R28_mdx, method_(method_data));
-    __ cmpdi(CCR0, R28_mdx, 0);
-    __ beq(CCR0, zero_continue);
-    __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
-    __ bind(zero_continue);
-  }
-
-  if (native_call) {
-    __ li(R14_bcp, 0); // Must initialize.
-  } else {
-    __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method);
-  }
-
-  // Resize parent frame.
-  __ mflr(R12_scratch2);
-  __ neg(parent_frame_resize, parent_frame_resize);
-  __ resize_frame(parent_frame_resize, R11_scratch1);
-  __ std(R12_scratch2, _abi(lr), R1_SP);
-
-  __ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
-  __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
-
-  // Store values.
-  // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
-  // in InterpreterMacroAssembler::call_from_interpreter.
-  __ std(R19_method, _ijava_state_neg(method), R1_SP);
-  __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP);
-  __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP);
-  __ std(R18_locals, _ijava_state_neg(locals), R1_SP);
-
-  // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only
-  // be found in the frame after save_interpreter_state is done. This is always true
-  // for non-top frames. But when a signal occurs, dumping the top frame can go wrong,
-  // because e.g. frame::interpreter_frame_bcp() will not access the correct value
-  // (Enhanced Stack Trace).
-  // The signal handler does not save the interpreter state into the frame.
-  __ li(R0, 0);
-#ifdef ASSERT
-  // Fill remaining slots with constants.
-  __ load_const_optimized(R11_scratch1, 0x5afe);
-  __ load_const_optimized(R12_scratch2, 0xdead);
-#endif
-  // We have to initialize some frame slots for native calls (accessed by GC).
-  if (native_call) {
-    __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP);
-    __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP);
-    if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); }
-  }
-#ifdef ASSERT
-  else {
-    __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP);
-    __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP);
-    __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP);
-  }
-  __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
-#endif
-  __ subf(R12_scratch2, top_frame_size, R1_SP);
-  __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
-
-  // Push top frame.
-  __ push_frame(top_frame_size, R11_scratch1);
-}
-
-// End of helpers
-
-
-// Support abs and sqrt like in compiler.
-// For others we can use a normal (native) entry.
-
-inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
-  if (!InlineIntrinsics) return false;
-
-  return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
-          (kind==Interpreter::java_lang_math_abs));
-}
-
-address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
-  if (!math_entry_available(kind)) {
-    NOT_PRODUCT(__ should_not_reach_here();)
-    return NULL;
-  }
-
-  address entry = __ pc();
-
-  __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
-
-  // Pop c2i arguments (if any) off when we return.
-#ifdef ASSERT
-  __ ld(R9_ARG7, 0, R1_SP);
-  __ ld(R10_ARG8, 0, R21_sender_SP);
-  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
-  __ asm_assert_eq("backlink", 0x545);
-#endif // ASSERT
-  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
-  if (kind == Interpreter::java_lang_math_sqrt) {
-    __ fsqrt(F1_RET, F1_RET);
-  } else if (kind == Interpreter::java_lang_math_abs) {
-    __ fabs(F1_RET, F1_RET);
-  } else {
-    ShouldNotReachHere();
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
   }
-
-  // And we're done.
-  __ blr();
-
-  __ flush();
-
-  return entry;
-}
-
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the
-// native method than the typical interpreter frame setup.
-//
-// On entry:
-//   R19_method    - method
-//   R16_thread    - JavaThread*
-//   R15_esp       - intptr_t* sender tos
-//
-//   abstract stack (grows up)
-//     [  IJava (caller of JNI callee)  ]  <-- ASP
-//        ...
-address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
-
-  address entry = __ pc();
-
-  const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // -----------------------------------------------------------------------------
-  // Allocate a new frame that represents the native callee (i2n frame).
-  // This is not a full-blown interpreter frame, but in particular, the
-  // following registers are valid after this:
-  // - R19_method
-  // - R18_local (points to start of argumuments to native function)
-  //
-  //   abstract stack (grows up)
-  //     [  IJava (caller of JNI callee)  ]  <-- ASP
-  //        ...
-
-  const Register signature_handler_fd = R11_scratch1;
-  const Register pending_exception    = R0;
-  const Register result_handler_addr  = R31;
-  const Register native_method_fd     = R11_scratch1;
-  const Register access_flags         = R22_tmp2;
-  const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
-  const Register sync_state           = R12_scratch2;
-  const Register sync_state_addr      = sync_state;   // Address is dead after use.
-  const Register suspend_flags        = R11_scratch1;
-
-  //=============================================================================
-  // Allocate new frame and initialize interpreter state.
-
-  Label exception_return;
-  Label exception_return_sync_check;
-  Label stack_overflow_return;
-
-  // Generate new interpreter state and jump to stack_overflow_return in case of
-  // a stack overflow.
-  //generate_compute_interpreter_state(stack_overflow_return);
-
-  Register size_of_parameters = R22_tmp2;
-
-  generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
-
-  //=============================================================================
-  // Increment invocation counter. On overflow, entry to JNI method
-  // will be compiled.
-  Label invocation_counter_overflow, continue_after_compile;
-  if (inc_counter) {
-    if (synchronized) {
-      // Since at this point in the method invocation the exception handler
-      // would try to exit the monitor of synchronized methods which hasn't
-      // been entered yet, we set the thread local variable
-      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-      // runtime, exception handling i.e. unlock_if_synchronized_method will
-      // check this thread local flag.
-      // This flag has two effects, one is to force an unwind in the topmost
-      // interpreter frame and not perform an unlock while doing so.
-      __ li(R0, 1);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-
-    BIND(continue_after_compile);
-    // Reset the _do_not_unlock_if_synchronized flag.
-    if (synchronized) {
-      __ li(R0, 0);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-  }
-
-  // access_flags = method->access_flags();
-  // Load access flags.
-  assert(access_flags->is_nonvolatile(),
-         "access_flags must be in a non-volatile register");
-  // Type check.
-  assert(4 == sizeof(AccessFlags), "unexpected field size");
-  __ lwz(access_flags, method_(access_flags));
-
-  // We don't want to reload R19_method and access_flags after calls
-  // to some helper functions.
-  assert(R19_method->is_nonvolatile(),
-         "R19_method must be a non-volatile register");
-
-  // Check for synchronized methods. Must happen AFTER invocation counter
-  // check, so method is not locked if counter overflows.
-
-  if (synchronized) {
-    lock_method(access_flags, R11_scratch1, R12_scratch2, true);
-
-    // Update monitor in state.
-    __ ld(R11_scratch1, 0, R1_SP);
-    __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1);
-  }
-
-  // jvmti/jvmpi support
-  __ notify_method_entry();
-
-  //=============================================================================
-  // Get and call the signature handler.
-
-  __ ld(signature_handler_fd, method_(signature_handler));
-  Label call_signature_handler;
-
-  __ cmpdi(CCR0, signature_handler_fd, 0);
-  __ bne(CCR0, call_signature_handler);
-
-  // Method has never been called. Either generate a specialized
-  // handler or point to the slow one.
-  //
-  // Pass parameter 'false' to avoid exception check in call_VM.
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
-
-  // Check for an exception while looking up the target method. If we
-  // incurred one, bail.
-  __ ld(pending_exception, thread_(pending_exception));
-  __ cmpdi(CCR0, pending_exception, 0);
-  __ bne(CCR0, exception_return_sync_check); // Has pending exception.
-
-  // Reload signature handler, it may have been created/assigned in the meanwhile.
-  __ ld(signature_handler_fd, method_(signature_handler));
-  __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
-
-  BIND(call_signature_handler);
-
-  // Before we call the signature handler we push a new frame to
-  // protect the interpreter frame volatile registers when we return
-  // from jni but before we can get back to Java.
-
-  // First set the frame anchor while the SP/FP registers are
-  // convenient and the slow signature handler can use this same frame
-  // anchor.
-
-  // We have a TOP_IJAVA_FRAME here, which belongs to us.
-  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
-
-  // Now the interpreter frame (and its call chain) have been
-  // invalidated and flushed. We are now protected against eager
-  // being enabled in native code. Even if it goes eager the
-  // registers will be reloaded as clean and we will invalidate after
-  // the call so no spurious flush should be possible.
-
-  // Call signature handler and pass locals address.
-  //
-  // Our signature handlers copy required arguments to the C stack
-  // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
-  __ mr(R3_ARG1, R18_locals);
-#if !defined(ABI_ELFv2)
-  __ ld(signature_handler_fd, 0, signature_handler_fd);
-#endif
-
-  __ call_stub(signature_handler_fd);
-
-  // Remove the register parameter varargs slots we allocated in
-  // compute_interpreter_state. SP+16 ends up pointing to the ABI
-  // outgoing argument area.
-  //
-  // Not needed on PPC64.
-  //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
-
-  assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
-  // Save across call to native method.
-  __ mr(result_handler_addr, R3_RET);
-
-  __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
-
-  // Set up fixed parameters and call the native method.
-  // If the method is static, get mirror into R4_ARG2.
-  {
-    Label method_is_not_static;
-    // Access_flags is non-volatile and still, no need to restore it.
-
-    // Restore access flags.
-    __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
-    __ bfalse(CCR0, method_is_not_static);
-
-    // constants = method->constants();
-    __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
-    __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
-    // pool_holder = method->constants()->pool_holder();
-    __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
-          R11_scratch1/*constants*/);
-
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-
-    // mirror = pool_holder->klass_part()->java_mirror();
-    __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
-    // state->_native_mirror = mirror;
-
-    __ ld(R11_scratch1, 0, R1_SP);
-    __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
-    // R4_ARG2 = &state->_oop_temp;
-    __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
-    BIND(method_is_not_static);
-  }
-
-  // At this point, arguments have been copied off the stack into
-  // their JNI positions. Oops are boxed in-place on the stack, with
-  // handles copied to arguments. The result handler address is in a
-  // register.
-
-  // Pass JNIEnv address as first parameter.
-  __ addir(R3_ARG1, thread_(jni_environment));
-
-  // Load the native_method entry before we change the thread state.
-  __ ld(native_method_fd, method_(native_function));
-
-  //=============================================================================
-  // Transition from _thread_in_Java to _thread_in_native. As soon as
-  // we make this change the safepoint code needs to be certain that
-  // the last Java frame we established is good. The pc in that frame
-  // just needs to be near here not an actual return address.
-
-  // We use release_store_fence to update values like the thread state, where
-  // we don't want the current thread to continue until all our prior memory
-  // accesses (including the new thread state) are visible to other threads.
-  __ li(R0, _thread_in_native);
-  __ release();
-
-  // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
-  __ stw(R0, thread_(thread_state));
-
-  if (UseMembar) {
-    __ fence();
-  }
-
-  //=============================================================================
-  // Call the native method. Argument registers must not have been
-  // overwritten since "__ call_stub(signature_handler);" (except for
-  // ARG1 and ARG2 for static methods).
-  __ call_c(native_method_fd);
-
-  __ li(R0, 0);
-  __ ld(R11_scratch1, 0, R1_SP);
-  __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
-  __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
-  __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset
-
-  // Note: C++ interpreter needs the following here:
-  // The frame_manager_lr field, which we use for setting the last
-  // java frame, gets overwritten by the signature handler. Restore
-  // it now.
-  //__ get_PC_trash_LR(R11_scratch1);
-  //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
-
-  // Because of GC R19_method may no longer be valid.
-
-  // Block, if necessary, before resuming in _thread_in_Java state.
-  // In order for GC to work, don't clear the last_Java_sp until after
-  // blocking.
-
-  //=============================================================================
-  // Switch thread to "native transition" state before reading the
-  // synchronization state. This additional state is necessary
-  // because reading and testing the synchronization state is not
-  // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
-  // in _thread_in_native state, loads _not_synchronized and is
-  // preempted. VM thread changes sync state to synchronizing and
-  // suspends threads for GC. Thread A is resumed to finish this
-  // native method, but doesn't block here since it didn't see any
-  // synchronization in progress, and escapes.
-
-  // We use release_store_fence to update values like the thread state, where
-  // we don't want the current thread to continue until all our prior memory
-  // accesses (including the new thread state) are visible to other threads.
-  __ li(R0/*thread_state*/, _thread_in_native_trans);
-  __ release();
-  __ stw(R0/*thread_state*/, thread_(thread_state));
-  if (UseMembar) {
-    __ fence();
-  }
-  // Write serialization page so that the VM thread can do a pseudo remote
-  // membar. We use the current thread pointer to calculate a thread
-  // specific offset to write to within the page. This minimizes bus
-  // traffic due to cache line collision.
-  else {
-    __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
-  }
-
-  // Now before we return to java we must look for a current safepoint
-  // (a new safepoint can not start since we entered native_trans).
-  // We must check here because a current safepoint could be modifying
-  // the callers registers right this moment.
-
-  // Acquire isn't strictly necessary here because of the fence, but
-  // sync_state is declared to be volatile, so we do it anyway
-  // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
-  int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
-
-  // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
-  __ lwz(sync_state, sync_state_offs, sync_state_addr);
-
-  // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
-  __ lwz(suspend_flags, thread_(suspend_flags));
-
-  Label sync_check_done;
-  Label do_safepoint;
-  // No synchronization in progress nor yet synchronized.
-  __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
-  // Not suspended.
-  __ cmpwi(CCR1, suspend_flags, 0);
-
-  __ bne(CCR0, do_safepoint);
-  __ beq(CCR1, sync_check_done);
-  __ bind(do_safepoint);
-  __ isync();
-  // Block. We do the call directly and leave the current
-  // last_Java_frame setup undisturbed. We must save any possible
-  // native result across the call. No oop is present.
-
-  __ mr(R3_ARG1, R16_thread);
-#if defined(ABI_ELFv2)
-  __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
-            relocInfo::none);
-#else
-  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
-            relocInfo::none);
-#endif
-
-  __ bind(sync_check_done);
-
-  //=============================================================================
-  // <<<<<< Back in Interpreter Frame >>>>>
-
-  // We are in thread_in_native_trans here and back in the normal
-  // interpreter frame. We don't have to do anything special about
-  // safepoints and we can switch to Java mode anytime we are ready.
-
-  // Note: frame::interpreter_frame_result has a dependency on how the
-  // method result is saved across the call to post_method_exit. For
-  // native methods it assumes that the non-FPU/non-void result is
-  // saved in _native_lresult and a FPU result in _native_fresult. If
-  // this changes then the interpreter_frame_result implementation
-  // will need to be updated too.
-
-  // On PPC64, we have stored the result directly after the native call.
-
-  //=============================================================================
-  // Back in Java
-
-  // We use release_store_fence to update values like the thread state, where
-  // we don't want the current thread to continue until all our prior memory
-  // accesses (including the new thread state) are visible to other threads.
-  __ li(R0/*thread_state*/, _thread_in_Java);
-  __ release();
-  __ stw(R0/*thread_state*/, thread_(thread_state));
-  if (UseMembar) {
-    __ fence();
-  }
-
-  __ reset_last_Java_frame();
-
-  // Jvmdi/jvmpi support. Whether we've got an exception pending or
-  // not, and whether unlocking throws an exception or not, we notify
-  // on native method exit. If we do have an exception, we'll end up
-  // in the caller's context to handle it, so if we don't do the
-  // notify here, we'll drop it on the floor.
-  __ notify_method_exit(true/*native method*/,
-                        ilgl /*illegal state (not used for native methods)*/,
-                        InterpreterMacroAssembler::NotifyJVMTI,
-                        false /*check_exceptions*/);
-
-  //=============================================================================
-  // Handle exceptions
-
-  if (synchronized) {
-    // Don't check for exceptions since we're still in the i2n frame. Do that
-    // manually afterwards.
-    unlock_method(false);
-  }
-
-  // Reset active handles after returning from native.
-  // thread->active_handles()->clear();
-  __ ld(active_handles, thread_(active_handles));
-  // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
-  __ li(R0, 0);
-  __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
-
-  Label exception_return_sync_check_already_unlocked;
-  __ ld(R0/*pending_exception*/, thread_(pending_exception));
-  __ cmpdi(CCR0, R0/*pending_exception*/, 0);
-  __ bne(CCR0, exception_return_sync_check_already_unlocked);
-
-  //-----------------------------------------------------------------------------
-  // No exception pending.
-
-  // Move native method result back into proper registers and return.
-  // Invoke result handler (may unbox/promote).
-  __ ld(R11_scratch1, 0, R1_SP);
-  __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
-  __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
-  __ call_stub(result_handler_addr);
-
-  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
-
-  // Must use the return pc which was loaded from the caller's frame
-  // as the VM uses return-pc-patching for deoptimization.
-  __ mtlr(R0);
-  __ blr();
-
-  //-----------------------------------------------------------------------------
-  // An exception is pending. We call into the runtime only if the
-  // caller was not interpreted. If it was interpreted the
-  // interpreter will do the correct thing. If it isn't interpreted
-  // (call stub/compiled code) we will change our return and continue.
-
-  BIND(exception_return_sync_check);
-
-  if (synchronized) {
-    // Don't check for exceptions since we're still in the i2n frame. Do that
-    // manually afterwards.
-    unlock_method(false);
-  }
-  BIND(exception_return_sync_check_already_unlocked);
-
-  const Register return_pc = R31;
-
-  __ ld(return_pc, 0, R1_SP);
-  __ ld(return_pc, _abi(lr), return_pc);
-
-  // Get the address of the exception handler.
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
-                  R16_thread,
-                  return_pc /* return pc */);
-  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
-
-  // Load the PC of the the exception handler into LR.
-  __ mtlr(R3_RET);
-
-  // Load exception into R3_ARG1 and clear pending exception in thread.
-  __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
-  __ li(R4_ARG2, 0);
-  __ std(R4_ARG2, thread_(pending_exception));
-
-  // Load the original return pc into R4_ARG2.
-  __ mr(R4_ARG2/*issuing_pc*/, return_pc);
-
-  // Return to exception handler.
-  __ blr();
-
-  //=============================================================================
-  // Counter overflow.
-
-  if (inc_counter) {
-    // Handle invocation counter overflow.
-    __ bind(invocation_counter_overflow);
-
-    generate_counter_overflow(continue_after_compile);
-  }
-
-  return entry;
-}
-
-// Generic interpreted method entry to (asm) interpreter.
-//
-address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
-  bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-  address entry = __ pc();
-  // Generate the code to allocate the interpreter stack frame.
-  Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
-           Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
-
-  generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
-
-  // --------------------------------------------------------------------------
-  // Zero out non-parameter locals.
-  // Note: *Always* zero out non-parameter locals as Sparc does. It's not
-  // worth to ask the flag, just do it.
-  Register Rslot_addr = R6_ARG4,
-           Rnum       = R7_ARG5;
-  Label Lno_locals, Lzero_loop;
-
-  // Set up the zeroing loop.
-  __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
-  __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
-  __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
-  __ beq(CCR0, Lno_locals);
-  __ li(R0, 0);
-  __ mtctr(Rnum);
-
-  // The zero locals loop.
-  __ bind(Lzero_loop);
-  __ std(R0, 0, Rslot_addr);
-  __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize);
-  __ bdnz(Lzero_loop);
-
-  __ bind(Lno_locals);
-
-  // --------------------------------------------------------------------------
-  // Counter increment and overflow check.
-  Label invocation_counter_overflow,
-        profile_method,
-        profile_method_continue;
-  if (inc_counter || ProfileInterpreter) {
-
-    Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1;
-    if (synchronized) {
-      // Since at this point in the method invocation the exception handler
-      // would try to exit the monitor of synchronized methods which hasn't
-      // been entered yet, we set the thread local variable
-      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-      // runtime, exception handling i.e. unlock_if_synchronized_method will
-      // check this thread local flag.
-      // This flag has two effects, one is to force an unwind in the topmost
-      // interpreter frame and not perform an unlock while doing so.
-      __ li(R0, 1);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-
-    // Argument and return type profiling.
-    __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
-
-    // Increment invocation counter and check for overflow.
-    if (inc_counter) {
-      generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
-    }
-
-    __ bind(profile_method_continue);
-
-    // Reset the _do_not_unlock_if_synchronized flag.
-    if (synchronized) {
-      __ li(R0, 0);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-  }
-
-  // --------------------------------------------------------------------------
-  // Locking of synchronized methods. Must happen AFTER invocation_counter
-  // check and stack overflow check, so method is not locked if overflows.
-  if (synchronized) {
-    lock_method(R3_ARG1, R4_ARG2, R5_ARG3);
-  }
-#ifdef ASSERT
-  else {
-    Label Lok;
-    __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
-    __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED);
-    __ asm_assert_eq("method needs synchronization", 0x8521);
-    __ bind(Lok);
-  }
-#endif // ASSERT
-
-  __ verify_thread();
-
-  // --------------------------------------------------------------------------
-  // JVMTI support
-  __ notify_method_entry();
-
-  // --------------------------------------------------------------------------
-  // Start executing instructions.
-  __ dispatch_next(vtos);
-
-  // --------------------------------------------------------------------------
-  // Out of line counter overflow and MDO creation code.
-  if (ProfileInterpreter) {
-    // We have decided to profile this method in the interpreter.
-    __ bind(profile_method);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-    __ set_method_data_pointer_for_bcp();
-    __ b(profile_method_continue);
-  }
-
-  if (inc_counter) {
-    // Handle invocation counter overflow.
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(profile_method_continue);
-  }
-  return entry;
-}
-
-// CRC32 Intrinsics.
-//
-// Contract on scratch and work registers.
-// =======================================
-//
-// On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers.
-// You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set.
-// You can't rely on these registers across calls.
-//
-// The generators for CRC32_update and for CRC32_updateBytes use the
-// scratch/work register set internally, passing the work registers
-// as arguments to the MacroAssembler emitters as required.
-//
-// R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments.
-// Their contents is not constant but may change according to the requirements
-// of the emitted code.
-//
-// All other registers from the scratch/work register set are used "internally"
-// and contain garbage (i.e. unpredictable values) once blr() is reached.
-// Basically, only R3_RET contains a defined value which is the function result.
-//
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address start = __ pc();  // Remember stub start address (is rtn value).
-    Label slow_path;
-
-    // Safepoint check
-    const Register sync_state = R11_scratch1;
-    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
-    __ lwz(sync_state, sync_state_offs, sync_state);
-    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
-    __ bne(CCR0, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we not even call stub code (we generate the code inline)
-    // and there is no safepoint on this path.
-
-    // Load java parameters.
-    // R15_esp is callers operand stack pointer, i.e. it points to the parameters.
-    const Register argP    = R15_esp;
-    const Register crc     = R3_ARG1;  // crc value
-    const Register data    = R4_ARG2;  // address of java byte value (kernel_crc32 needs address)
-    const Register dataLen = R5_ARG3;  // source data len (1 byte). Not used because calling the single-byte emitter.
-    const Register table   = R6_ARG4;  // address of crc32 table
-    const Register tmp     = dataLen;  // Reuse unused len register to show we don't actually need a separate tmp here.
-
-    BLOCK_COMMENT("CRC32_update {");
-
-    // Arguments are reversed on java expression stack
-#ifdef VM_LITTLE_ENDIAN
-    __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
-                                       // Being passed as an int, the single byte is at offset +0.
-#else
-    __ addi(data, argP, 3+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
-                                       // Being passed from java as an int, the single byte is at offset +3.
-#endif
-    __ lwz(crc,  2*wordSize, argP);    // Current crc state, zero extend to 64 bit to have a clean register.
-
-    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
-    __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
-
-    // Restore caller sp for c2i case and return.
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-    __ blr();
-
-    // Generate a vanilla native entry as the slow path.
-    BLOCK_COMMENT("} CRC32_update");
-    BIND(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
-    return start;
-  }
-
-  return NULL;
-}
-
-// CRC32 Intrinsics.
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(     int crc, byte[] b,  int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address start = __ pc();  // Remember stub start address (is rtn value).
-    Label slow_path;
-
-    // Safepoint check
-    const Register sync_state = R11_scratch1;
-    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
-    __ lwz(sync_state, sync_state_offs, sync_state);
-    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
-    __ bne(CCR0, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we not even call stub code (we generate the code inline)
-    // and there is no safepoint on this path.
-
-    // Load parameters.
-    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
-    const Register argP    = R15_esp;
-    const Register crc     = R3_ARG1;  // crc value
-    const Register data    = R4_ARG2;  // address of java byte array
-    const Register dataLen = R5_ARG3;  // source data len
-    const Register table   = R6_ARG4;  // address of crc32 table
-
-    const Register t0      = R9;       // scratch registers for crc calculation
-    const Register t1      = R10;
-    const Register t2      = R11;
-    const Register t3      = R12;
-
-    const Register tc0     = R2;       // registers to hold pre-calculated column addresses
-    const Register tc1     = R7;
-    const Register tc2     = R8;
-    const Register tc3     = table;    // table address is reconstructed at the end of kernel_crc32_* emitters
-
-    const Register tmp     = t0;       // Only used very locally to calculate byte buffer address.
-
-    // Arguments are reversed on java expression stack.
-    // Calculate address of start element.
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct".
-      BLOCK_COMMENT("CRC32_updateByteBuffer {");
-      // crc     @ (SP + 5W) (32bit)
-      // buf     @ (SP + 3W) (64bit ptr to long array)
-      // off     @ (SP + 2W) (32bit)
-      // dataLen @ (SP + 1W) (32bit)
-      // data = buf + off
-      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
-      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
-      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
-      __ lwz( crc,     5*wordSize, argP);  // current crc state
-      __ add( data, data, tmp);            // Add byte buffer offset.
-    } else {                                                         // Used for "updateBytes update".
-      BLOCK_COMMENT("CRC32_updateBytes {");
-      // crc     @ (SP + 4W) (32bit)
-      // buf     @ (SP + 3W) (64bit ptr to byte array)
-      // off     @ (SP + 2W) (32bit)
-      // dataLen @ (SP + 1W) (32bit)
-      // data = buf + off + base_offset
-      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
-      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
-      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
-      __ add( data, data, tmp);            // add byte buffer offset
-      __ lwz( crc,     4*wordSize, argP);  // current crc state
-      __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
-    }
-
-    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
-
-    // Performance measurements show the 1word and 2word variants to be almost equivalent,
-    // with very light advantages for the 1word variant. We chose the 1word variant for
-    // code compactness.
-    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
-
-    // Restore caller sp for c2i case and return.
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-    __ blr();
-
-    // Generate a vanilla native entry as the slow path.
-    BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
-    BIND(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
-    return start;
-  }
-
-  return NULL;
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
 }
 
 // These should never be compiled since the interpreter will prefer
 // the compiled version to the intrinsic version.
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  return !math_entry_available(method_kind(m));
+  return !TemplateInterpreter::math_entry_available(method_kind(m));
 }
 
 // How much stack a method activation needs in stack slots.
@@ -1505,411 +154,14 @@
   }
 }
 
-// =============================================================================
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  Register Rexception    = R17_tos,
-           Rcontinuation = R3_RET;
-
-  // --------------------------------------------------------------------------
-  // Entry point if an method returns with a pending exception (rethrow).
-  Interpreter::_rethrow_exception_entry = __ pc();
-  {
-    __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
-    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
-    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-
-    // Compiled code destroys templateTableBase, reload.
-    __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
-  }
-
-  // Entry point if a interpreted method throws an exception (throw).
-  Interpreter::_throw_exception_entry = __ pc();
-  {
-    __ mr(Rexception, R3_RET);
-
-    __ verify_thread();
-    __ verify_oop(Rexception);
-
-    // Expression stack must be empty before entering the VM in case of an exception.
-    __ empty_expression_stack();
-    // Find exception handler address and preserve exception oop.
-    // Call C routine to find handler and jump to it.
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception);
-    __ mtctr(Rcontinuation);
-    // Push exception for exception handler bytecodes.
-    __ push_ptr(Rexception);
-
-    // Jump to exception handler (may be remove activation entry!).
-    __ bctr();
-  }
-
-  // If the exception is not handled in the current frame the frame is
-  // removed and the exception is rethrown (i.e. exception
-  // continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction
-  // which caused the exception and the expression stack is
-  // empty. Thus, for any VM calls at this point, GC will find a legal
-  // oop map (with empty expression stack).
-
-  // In current activation
-  // tos: exception
-  // bcp: exception bcp
-
-  // --------------------------------------------------------------------------
-  // JVMTI PopFrame support
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  {
-    // Set the popframe_processing bit in popframe_condition indicating that we are
-    // currently handling popframe, so that call_VMs that may happen later do not
-    // trigger new popframe handling cycles.
-    __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-    __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit);
-    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-
-    // Empty the expression stack, as in normal exception handling.
-    __ empty_expression_stack();
-    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
-
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label Lcaller_not_deoptimized;
-    Register return_pc = R3_ARG1;
-    __ ld(return_pc, 0, R1_SP);
-    __ ld(return_pc, _abi(lr), return_pc);
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
-    __ cmpdi(CCR0, R3_RET, 0);
-    __ bne(CCR0, Lcaller_not_deoptimized);
+// Support abs and sqrt like in compiler.
+// For others we can use a normal (native) entry.
 
-    // The deoptimized case.
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method);
-    __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2);
-    __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize);
-    __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize);
-    __ subf(R5_ARG3, R4_ARG2, R5_ARG3);
-    // Save these arguments.
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3);
-
-    // Inform deoptimization that it is responsible for restoring these arguments.
-    __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit);
-    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-
-    // Return from the current method into the deoptimization blob. Will eventually
-    // end up in the deopt interpeter entry, deoptimization prepared everything that
-    // we will reexecute the call that called us.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2);
-    __ mtlr(return_pc);
-    __ blr();
-
-    // The non-deoptimized case.
-    __ bind(Lcaller_not_deoptimized);
-
-    // Clear the popframe condition flag.
-    __ li(R0, 0);
-    __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-
-    // Get out of the current method and re-execute the call that called us.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
-    __ restore_interpreter_state(R11_scratch1);
-    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
-    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-    if (ProfileInterpreter) {
-      __ set_method_data_pointer_for_bcp();
-      __ ld(R11_scratch1, 0, R1_SP);
-      __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
-    }
-#if INCLUDE_JVMTI
-    Label L_done;
-
-    __ lbz(R11_scratch1, 0, R14_bcp);
-    __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
-    __ bne(CCR0, L_done);
+bool TemplateInterpreter::math_entry_available(AbstractInterpreter::MethodKind kind) {
+  if (!InlineIntrinsics) return false;
 
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-    __ ld(R4_ARG2, 0, R18_locals);
-    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
-    __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
-    __ cmpdi(CCR0, R4_ARG2, 0);
-    __ beq(CCR0, L_done);
-    __ std(R4_ARG2, wordSize, R15_esp);
-    __ bind(L_done);
-#endif // INCLUDE_JVMTI
-    __ dispatch_next(vtos);
-  }
-  // end of JVMTI PopFrame support
-
-  // --------------------------------------------------------------------------
-  // Remove activation exception entry.
-  // This is jumped to if an interpreted method can't handle an exception itself
-  // (we come from the throw/rethrow exception entry above). We're going to call
-  // into the VM to find the exception handler in the caller, pop the current
-  // frame and return the handler we calculated.
-  Interpreter::_remove_activation_entry = __ pc();
-  {
-    __ pop_ptr(Rexception);
-    __ verify_thread();
-    __ verify_oop(Rexception);
-    __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread);
-
-    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true);
-    __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false);
-
-    __ get_vm_result(Rexception);
-
-    // We are done with this activation frame; find out where to go next.
-    // The continuation point will be an exception handler, which expects
-    // the following registers set up:
-    //
-    // RET:  exception oop
-    // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled.
-
-    Register return_pc = R31; // Needs to survive the runtime call.
-    __ ld(return_pc, 0, R1_SP);
-    __ ld(return_pc, _abi(lr), return_pc);
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc);
-
-    // Remove the current activation.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
-
-    __ mr(R4_ARG2, return_pc);
-    __ mtlr(R3_RET);
-    __ mr(R3_RET, Rexception);
-    __ blr();
-  }
+  return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
+          (kind==Interpreter::java_lang_math_abs));
 }
 
-// JVMTI ForceEarlyReturn support.
-// Returns "in the middle" of a method with a "fake" return value.
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
 
-  Register Rscratch1 = R11_scratch1,
-           Rscratch2 = R12_scratch2;
-
-  address entry = __ pc();
-  __ empty_expression_stack();
-
-  __ load_earlyret_value(state, Rscratch1);
-
-  __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
-  // Clear the earlyret state.
-  __ li(R0, 0);
-  __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1);
-
-  __ remove_activation(state, false, false);
-  // Copied from TemplateTable::_return.
-  // Restoration of lr done by remove_activation.
-  switch (state) {
-    case ltos:
-    case btos:
-    case ctos:
-    case stos:
-    case atos:
-    case itos: __ mr(R3_RET, R17_tos); break;
-    case ftos:
-    case dtos: __ fmr(F1_RET, F15_ftos); break;
-    case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
-               // to get visible before the reference to the object gets stored anywhere.
-               __ membar(Assembler::StoreStore); break;
-    default  : ShouldNotReachHere();
-  }
-  __ blr();
-
-  return entry;
-} // end of ForceEarlyReturn support
-
-//-----------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
-                                                         address& bep,
-                                                         address& cep,
-                                                         address& sep,
-                                                         address& aep,
-                                                         address& iep,
-                                                         address& lep,
-                                                         address& fep,
-                                                         address& dep,
-                                                         address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-
-  aep = __ pc();  __ push_ptr();  __ b(L);
-  fep = __ pc();  __ push_f();    __ b(L);
-  dep = __ pc();  __ push_d();    __ b(L);
-  lep = __ pc();  __ push_l();    __ b(L);
-  __ align(32, 12, 24); // align L
-  bep = cep = sep =
-  iep = __ pc();  __ push_i();
-  vep = __ pc();
-  __ bind(L);
-  generate_and_dispatch(t);
-}
-
-//-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-  generate_all(); // Down here so it can be "virtual".
-}
-
-//-----------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  //__ flush_bundle();
-  address entry = __ pc();
-
-  const char *bname = NULL;
-  uint tsize = 0;
-  switch(state) {
-  case ftos:
-    bname = "trace_code_ftos {";
-    tsize = 2;
-    break;
-  case btos:
-    bname = "trace_code_btos {";
-    tsize = 2;
-    break;
-  case ctos:
-    bname = "trace_code_ctos {";
-    tsize = 2;
-    break;
-  case stos:
-    bname = "trace_code_stos {";
-    tsize = 2;
-    break;
-  case itos:
-    bname = "trace_code_itos {";
-    tsize = 2;
-    break;
-  case ltos:
-    bname = "trace_code_ltos {";
-    tsize = 3;
-    break;
-  case atos:
-    bname = "trace_code_atos {";
-    tsize = 2;
-    break;
-  case vtos:
-    // Note: In case of vtos, the topmost of stack value could be a int or doubl
-    // In case of a double (2 slots) we won't see the 2nd stack value.
-    // Maybe we simply should print the topmost 3 stack slots to cope with the problem.
-    bname = "trace_code_vtos {";
-    tsize = 2;
-
-    break;
-  case dtos:
-    bname = "trace_code_dtos {";
-    tsize = 3;
-    break;
-  default:
-    ShouldNotReachHere();
-  }
-  BLOCK_COMMENT(bname);
-
-  // Support short-cut for TraceBytecodesAt.
-  // Don't call into the VM if we don't want to trace to speed up things.
-  Label Lskip_vm_call;
-  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
-    int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true);
-    int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
-    __ ld(R11_scratch1, offs1, R11_scratch1);
-    __ lwa(R12_scratch2, offs2, R12_scratch2);
-    __ cmpd(CCR0, R12_scratch2, R11_scratch1);
-    __ blt(CCR0, Lskip_vm_call);
-  }
-
-  __ push(state);
-  // Load 2 topmost expression stack values.
-  __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
-  __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
-  __ mflr(R31);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
-  __ mtlr(R31);
-  __ pop(state);
-
-  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
-    __ bind(Lskip_vm_call);
-  }
-  __ blr();
-  BLOCK_COMMENT("} trace_code");
-  return entry;
-}
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
-  __ lwz(R12_scratch2, offs, R11_scratch1);
-  __ addi(R12_scratch2, R12_scratch2, 1);
-  __ stw(R12_scratch2, offs, R11_scratch1);
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
-  __ lwz(R12_scratch2, offs, R11_scratch1);
-  __ addi(R12_scratch2, R12_scratch2, 1);
-  __ stw(R12_scratch2, offs, R11_scratch1);
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  const Register addr = R11_scratch1,
-                 tmp  = R12_scratch2;
-  // Get index, shift out old bytecode, bring in new bytecode, and store it.
-  // _index = (_index >> log2_number_of_codes) |
-  //          (bytecode << log2_number_of_codes);
-  int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
-  __ lwz(tmp, offs1, addr);
-  __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
-  __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
-  __ stw(tmp, offs1, addr);
-
-  // Bump bucket contents.
-  // _counters[_index] ++;
-  int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
-  __ sldi(tmp, tmp, LogBytesPerInt);
-  __ add(addr, tmp, addr);
-  __ lwz(tmp, offs2, addr);
-  __ addi(tmp, tmp, 1);
-  __ stw(tmp, offs2, addr);
-}
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-
-  // Note: we destroy LR here.
-  __ bl(Interpreter::trace_code(t->tos_in()));
-}
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true);
-  int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
-  __ ld(R11_scratch1, offs1, R11_scratch1);
-  __ lwa(R12_scratch2, offs2, R12_scratch2);
-  __ cmpd(CCR0, R12_scratch2, R11_scratch1);
-  __ bne(CCR0, L);
-  __ illtrap();
-  __ bind(L);
-}
-
-#endif // !PRODUCT
-#endif // !CC_INTERP
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, 2015 SAP AG. All rights reserved.
+ * Copyright (c) 2013, 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,14 +28,17 @@
 
  protected:
 
-  // Size of interpreter code.  Increase if too small.  Interpreter will
+  // Size of interpreter code. Increase if too small.  Interpreter will
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
   // Run with +PrintInterpreter to get the VM to print out the size.
   // Max size with JVMTI
-
   const static int InterpreterCodeSize = 230*K;
 
+ public:
+  // Support abs and sqrt like in compiler.
+  // For others we can use a normal (native) entry.
+  static bool math_entry_available(AbstractInterpreter::MethodKind kind);
 #endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
 
 
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -62,30 +61,6 @@
 
 //----------------------------------------------------------------------------------------------------
 
-
-
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
-
 #ifndef _LP64
 address AbstractInterpreterGenerator::generate_slow_signature_handler() {
   address entry = __ pc();
@@ -254,28 +229,3 @@
   return entry;
 
 }
-
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  // No special entry points that preclude compilation
-  return true;
-}
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Exceptions
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -360,10 +360,10 @@
 #ifdef ASSERT
 // a hook for debugging
 static Thread* reinitialize_thread() {
-  return ThreadLocalStorage::thread();
+  return Thread::current();
 }
 #else
-#define reinitialize_thread ThreadLocalStorage::thread
+#define reinitialize_thread Thread::current
 #endif
 
 #ifdef ASSERT
@@ -393,7 +393,7 @@
 }
 
 static Thread* verify_thread_subroutine(Thread* gthread_value) {
-  Thread* correct_value = ThreadLocalStorage::thread();
+  Thread* correct_value = Thread::current();
   guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
   return correct_value;
 }
--- a/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -52,7 +52,7 @@
 
 inline void fill_subword(void* start, void* end, int value) {
   STATIC_ASSERT(BytesPerWord == 8);
-  assert(pointer_delta(end, start, 1) < BytesPerWord, "precondition");
+  assert(pointer_delta(end, start, 1) < (size_t)BytesPerWord, "precondition");
   // Dispatch on (end - start).
   void* pc;
   __asm__ volatile(
@@ -73,10 +73,10 @@
     " stb %[value], [%[end]-3]\n\t"
     " stb %[value], [%[end]-2]\n\t"
     " stb %[value], [%[end]-1]\n\t" // end[-1] = value
-    : /* no outputs */
-      [pc] "&=r" (pc)               // temp
-    : [offset] "&+r" (start),
-      [end] "r" (end),
+    : /* only temporaries/overwritten outputs */
+      [pc] "=&r" (pc),               // temp
+      [offset] "+&r" (start)
+    : [end] "r" (end),
       [value] "r" (value)
     : "memory");
 }
@@ -84,7 +84,7 @@
 void memset_with_concurrent_readers(void* to, int value, size_t size) {
   Prefetch::write(to, 0);
   void* end = static_cast<char*>(to) + size;
-  if (size >= BytesPerWord) {
+  if (size >= (size_t)BytesPerWord) {
     // Fill any partial word prefix.
     uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
     fill_subword(to, aligned_to, value);
@@ -144,10 +144,10 @@
       " stx %[xvalue], [%[aend]-24]\n\t"
       " stx %[xvalue], [%[aend]-16]\n\t"
       " stx %[xvalue], [%[aend]-8]\n\t"  // aligned_end[-1] = xvalue
-      : /* no outputs */
-        [temp] "&=r" (temp)
-      : [ato] "&+r" (aligned_to),
-        [aend] "r" (aligned_end),
+      : /* only temporaries/overwritten outputs */
+        [temp] "=&r" (temp),
+        [ato] "+&r" (aligned_to)
+      : [aend] "r" (aligned_end),
         [xvalue] "r" (xvalue)
       : "cc", "memory");
     to = aligned_end;           // setup for suffix
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -36,7 +36,7 @@
   address _flush_reg_windows();   // in .s file.
   // Flush registers to stack. In case of error we will need to stack walk.
   address bootstrap_flush_windows(void) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    Thread* thread = Thread::current_or_null();
     // Very early in process there is no thread.
     if (thread != NULL) {
       guarantee(thread->is_Java_thread(), "Not a Java thread.");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,1832 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+#ifndef CC_INTERP
+#ifndef FAST_DISPATCH
+#define FAST_DISPATCH 1
+#endif
+#undef FAST_DISPATCH
+
+
+// Generation of Interpreter
+//
+// The InterpreterGenerator generates the interpreter into Interpreter::_code.
+
+
+#define __ _masm->
+
+
+//----------------------------------------------------------------------------------------------------
+
+
+void InterpreterGenerator::save_native_result(void) {
+  // result potentially in O0/O1: save it across calls
+  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
+
+  // result potentially in F0/F1: save it across calls
+  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
+
+  // save and restore any potential method result value around the unlocking operation
+  __ stf(FloatRegisterImpl::D, F0, d_tmp);
+#ifdef _LP64
+  __ stx(O0, l_tmp);
+#else
+  __ std(O0, l_tmp);
+#endif
+}
+
+void InterpreterGenerator::restore_native_result(void) {
+  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
+  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
+
+  // Restore any method result value
+  __ ldf(FloatRegisterImpl::D, d_tmp, F0);
+#ifdef _LP64
+  __ ldx(l_tmp, O0);
+#else
+  __ ldd(l_tmp, O0);
+#endif
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
+  assert(!pass_oop || message == NULL, "either oop or message but not both");
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception happened
+  __ empty_expression_stack();
+  // load exception object
+  __ set((intptr_t)name, G3_scratch);
+  if (pass_oop) {
+    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
+  } else {
+    __ set((intptr_t)message, G4_scratch);
+    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
+  }
+  // throw exception
+  assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
+  AddressLiteral thrower(Interpreter::throw_exception_entry());
+  __ jump_to(thrower, G3_scratch);
+  __ delayed()->nop();
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception
+  // happened
+  __ empty_expression_stack();
+  // load exception object
+  __ call_VM(Oexception,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::throw_ClassCastException),
+             Otos_i);
+  __ should_not_reach_here();
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception happened
+  __ empty_expression_stack();
+  // convention: expect aberrant index in register G3_scratch, then shuffle the
+  // index to G4_scratch for the VM call
+  __ mov(G3_scratch, G4_scratch);
+  __ set((intptr_t)name, G3_scratch);
+  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
+  __ should_not_reach_here();
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception happened
+  __ empty_expression_stack();
+  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
+  __ should_not_reach_here();
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+  if (state == atos) {
+    __ profile_return_type(O0, G3_scratch, G1_scratch);
+  }
+
+#if !defined(_LP64) && defined(COMPILER2)
+  // All return values are where we want them, except for Longs.  C2 returns
+  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
+  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
+  // build even if we are returning from interpreted we just do a little
+  // stupid shuffing.
+  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
+  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
+  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
+
+  if (state == ltos) {
+    __ srl (G1,  0, O1);
+    __ srlx(G1, 32, O0);
+  }
+#endif // !_LP64 && COMPILER2
+
+  // The callee returns with the stack possibly adjusted by adapter transition
+  // We remove that possible adjustment here.
+  // All interpreter local registers are untouched. Any result is passed back
+  // in the O0/O1 or float registers. Before continuing, the arguments must be
+  // popped from the java expression stack; i.e., Lesp must be adjusted.
+
+  __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
+
+  const Register cache = G3_scratch;
+  const Register index  = G1_scratch;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
+  const Register parameter_size = flags;
+  __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
+  __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
+  __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
+  address entry = __ pc();
+  __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
+#if INCLUDE_JVMCI
+  // Check if we need to take lock at entry of synchronized method.
+  if (UseJVMCICompiler) {
+    Label L;
+    Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
+    __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
+    __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
+    // Clear flag.
+    __ stbool(G0, pending_monitor_enter_addr);
+    // Take lock.
+    lock_method();
+    __ bind(L);
+  }
+#endif
+  { Label L;
+    Address exception_addr(G2_thread, Thread::pending_exception_offset());
+    __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
+    __ br_null_short(Gtemp, Assembler::pt, L);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+// A result handler converts/unboxes a native call result into
+// a java interpreter/compiler result. The current frame is an
+// interpreter frame. The activation frame unwind code must be
+// consistent with that of TemplateTable::_return(...). In the
+// case of native methods, the caller's SP was not modified.
+address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
+  address entry = __ pc();
+  Register Itos_i  = Otos_i ->after_save();
+  Register Itos_l  = Otos_l ->after_save();
+  Register Itos_l1 = Otos_l1->after_save();
+  Register Itos_l2 = Otos_l2->after_save();
+  switch (type) {
+    case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
+    case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
+    case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
+    case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
+    case T_LONG   :
+#ifndef _LP64
+                    __ mov(O1, Itos_l2);  // move other half of long
+#endif              // ifdef or no ifdef, fall through to the T_INT case
+    case T_INT    : __ mov(O0, Itos_i);                         break;
+    case T_VOID   : /* nothing to do */                         break;
+    case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
+    case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
+    case T_OBJECT :
+      __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
+      __ verify_oop(Itos_i);
+      break;
+    default       : ShouldNotReachHere();
+  }
+  __ ret();                           // return from interpreter activation
+  __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
+  NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
+  address entry = __ pc();
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ dispatch_via(vtos, Interpreter::normal_table(vtos));
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  __ dispatch_next(state);
+  return entry;
+}
+
+//
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test
+//
+// Lmethod: method
+// ??: invocation counter
+//
+void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
+  // Note: In tiered we increment either counters in MethodCounters* or in
+  // MDO depending if we're profiling or not.
+  const Register G3_method_counters = G3_scratch;
+  Label done;
+
+  if (TieredCompilation) {
+    const int increment = InvocationCounter::count_increment;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      // If no method data exists, go to profile_continue.
+      __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
+      __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
+      // Increment counter
+      Address mdo_invocation_counter(G4_scratch,
+                                     in_bytes(MethodData::invocation_counter_offset()) +
+                                     in_bytes(InvocationCounter::counter_offset()));
+      Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
+                                 G3_scratch, Lscratch,
+                                 Assembler::zero, overflow);
+      __ ba_short(done);
+    }
+
+    // Increment counter in MethodCounters*
+    __ bind(no_mdo);
+    Address invocation_counter(G3_method_counters,
+            in_bytes(MethodCounters::invocation_counter_offset()) +
+            in_bytes(InvocationCounter::counter_offset()));
+    __ get_method_counters(Lmethod, G3_method_counters, done);
+    Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask,
+                               G4_scratch, Lscratch,
+                               Assembler::zero, overflow);
+    __ bind(done);
+  } else { // not TieredCompilation
+    // Update standard invocation counters
+    __ get_method_counters(Lmethod, G3_method_counters, done);
+    __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
+    if (ProfileInterpreter) {
+      Address interpreter_invocation_counter(G3_method_counters,
+            in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
+      __ ld(interpreter_invocation_counter, G4_scratch);
+      __ inc(G4_scratch);
+      __ st(G4_scratch, interpreter_invocation_counter);
+    }
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
+      __ ld(profile_limit, G1_scratch);
+      __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
+
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(*profile_method);
+    }
+
+    Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
+    __ ld(invocation_limit, G3_scratch);
+    __ cmp(O0, G3_scratch);
+    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
+    __ delayed()->nop();
+    __ bind(done);
+  }
+
+}
+
+// Allocate monitor and lock method (asm interpreter)
+// ebx - Method*
+//
+void TemplateInterpreterGenerator::lock_method() {
+  __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0);  // Load access flags.
+
+#ifdef ASSERT
+ { Label ok;
+   __ btst(JVM_ACC_SYNCHRONIZED, O0);
+   __ br( Assembler::notZero, false, Assembler::pt, ok);
+   __ delayed()->nop();
+   __ stop("method doesn't need synchronization");
+   __ bind(ok);
+  }
+#endif // ASSERT
+
+  // get synchronization object to O0
+  { Label done;
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    __ btst(JVM_ACC_STATIC, O0);
+    __ br( Assembler::zero, true, Assembler::pt, done);
+    __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
+
+    __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
+    __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
+    __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
+
+    // lock the mirror, not the Klass*
+    __ ld_ptr( O0, mirror_offset, O0);
+
+#ifdef ASSERT
+    __ tst(O0);
+    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
+#endif // ASSERT
+
+    __ bind(done);
+  }
+
+  __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
+  __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
+  // __ untested("lock_object from method entry");
+  __ lock_object(Lmonitors, O0);
+}
+
+
+void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
+                                                         Register Rscratch,
+                                                         Register Rscratch2) {
+  const int page_size = os::vm_page_size();
+  Label after_frame_check;
+
+  assert_different_registers(Rframe_size, Rscratch, Rscratch2);
+
+  __ set(page_size, Rscratch);
+  __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
+
+  // get the stack base, and in debug, verify it is non-zero
+  __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
+#ifdef ASSERT
+  Label base_not_zero;
+  __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
+  __ stop("stack base is zero in generate_stack_overflow_check");
+  __ bind(base_not_zero);
+#endif
+
+  // get the stack size, and in debug, verify it is non-zero
+  assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
+  __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
+#ifdef ASSERT
+  Label size_not_zero;
+  __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
+  __ stop("stack size is zero in generate_stack_overflow_check");
+  __ bind(size_not_zero);
+#endif
+
+  // compute the beginning of the protected zone minus the requested frame size
+  __ sub( Rscratch, Rscratch2,   Rscratch );
+  __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
+  __ add( Rscratch, Rscratch2,   Rscratch );
+
+  // Add in the size of the frame (which is the same as subtracting it from the
+  // SP, which would take another register
+  __ add( Rscratch, Rframe_size, Rscratch );
+
+  // the frame is greater than one page in size, so check against
+  // the bottom of the stack
+  __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
+
+  // the stack will overflow, throw an exception
+
+  // Note that SP is restored to sender's sp (in the delay slot). This
+  // is necessary if the sender's frame is an extended compiled frame
+  // (see gen_c2i_adapter()) and safer anyway in case of JSR292
+  // adaptations.
+
+  // Note also that the restored frame is not necessarily interpreted.
+  // Use the shared runtime version of the StackOverflowError.
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
+  AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
+  __ jump_to(stub, Rscratch);
+  __ delayed()->mov(O5_savedSP, SP);
+
+  // if you get to here, then there is enough stack space
+  __ bind( after_frame_check );
+}
+
+
+//
+// Generate a fixed interpreter frame. This is identical setup for interpreted
+// methods and for native methods hence the shared code.
+
+
+//----------------------------------------------------------------------------------------------------
+// Stack frame layout
+//
+// When control flow reaches any of the entry types for the interpreter
+// the following holds ->
+//
+// C2 Calling Conventions:
+//
+// The entry code below assumes that the following registers are set
+// when coming in:
+//    G5_method: holds the Method* of the method to call
+//    Lesp:    points to the TOS of the callers expression stack
+//             after having pushed all the parameters
+//
+// The entry code does the following to setup an interpreter frame
+//   pop parameters from the callers stack by adjusting Lesp
+//   set O0 to Lesp
+//   compute X = (max_locals - num_parameters)
+//   bump SP up by X to accomadate the extra locals
+//   compute X = max_expression_stack
+//               + vm_local_words
+//               + 16 words of register save area
+//   save frame doing a save sp, -X, sp growing towards lower addresses
+//   set Lbcp, Lmethod, LcpoolCache
+//   set Llocals to i0
+//   set Lmonitors to FP - rounded_vm_local_words
+//   set Lesp to Lmonitors - 4
+//
+//  The frame has now been setup to do the rest of the entry code
+
+// Try this optimization:  Most method entries could live in a
+// "one size fits all" stack frame without all the dynamic size
+// calculations.  It might be profitable to do all this calculation
+// statically and approximately for "small enough" methods.
+
+//-----------------------------------------------------------------------------------------------
+
+// C1 Calling conventions
+//
+// Upon method entry, the following registers are setup:
+//
+// g2 G2_thread: current thread
+// g5 G5_method: method to activate
+// g4 Gargs  : pointer to last argument
+//
+//
+// Stack:
+//
+// +---------------+ <--- sp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- sp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- sp + 0x5c
+// |               |
+// :     free      :
+// |               |
+// +---------------+ <--- Gargs
+// |               |
+// :   arguments   :
+// |               |
+// +---------------+
+// |               |
+//
+//
+//
+// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
+//
+// +---------------+ <--- sp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- sp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- sp + 0x5c
+// |               |
+// :               :
+// |               | <--- Lesp
+// +---------------+ <--- Lmonitors (fp - 0x18)
+// |   VM locals   |
+// +---------------+ <--- fp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- fp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- fp + 0x5c
+// |               |
+// :     free      :
+// |               |
+// +---------------+
+// |               |
+// : nonarg locals :
+// |               |
+// +---------------+
+// |               |
+// :   arguments   :
+// |               | <--- Llocals
+// +---------------+ <--- Gargs
+// |               |
+
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
+  //
+  //
+  // The entry code sets up a new interpreter frame in 4 steps:
+  //
+  // 1) Increase caller's SP by for the extra local space needed:
+  //    (check for overflow)
+  //    Efficient implementation of xload/xstore bytecodes requires
+  //    that arguments and non-argument locals are in a contigously
+  //    addressable memory block => non-argument locals must be
+  //    allocated in the caller's frame.
+  //
+  // 2) Create a new stack frame and register window:
+  //    The new stack frame must provide space for the standard
+  //    register save area, the maximum java expression stack size,
+  //    the monitor slots (0 slots initially), and some frame local
+  //    scratch locations.
+  //
+  // 3) The following interpreter activation registers must be setup:
+  //    Lesp       : expression stack pointer
+  //    Lbcp       : bytecode pointer
+  //    Lmethod    : method
+  //    Llocals    : locals pointer
+  //    Lmonitors  : monitor pointer
+  //    LcpoolCache: constant pool cache
+  //
+  // 4) Initialize the non-argument locals if necessary:
+  //    Non-argument locals may need to be initialized to NULL
+  //    for GC to work. If the oop-map information is accurate
+  //    (in the absence of the JSR problem), no initialization
+  //    is necessary.
+  //
+  // (gri - 2/25/2000)
+
+
+  int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
+
+  const int extra_space =
+    rounded_vm_local_words +                   // frame local scratch space
+    Method::extra_stack_entries() +            // extra stack for jsr 292
+    frame::memory_parameter_word_sp_offset +   // register save area
+    (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
+
+  const Register Glocals_size = G3;
+  const Register RconstMethod = Glocals_size;
+  const Register Otmp1 = O3;
+  const Register Otmp2 = O4;
+  // Lscratch can't be used as a temporary because the call_stub uses
+  // it to assert that the stack frame was setup correctly.
+  const Address constMethod       (G5_method, Method::const_offset());
+  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+
+  __ ld_ptr( constMethod, RconstMethod );
+  __ lduh( size_of_parameters, Glocals_size);
+
+  // Gargs points to first local + BytesPerWord
+  // Set the saved SP after the register window save
+  //
+  assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
+  __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
+  __ add(Gargs, Otmp1, Gargs);
+
+  if (native_call) {
+    __ calc_mem_param_words( Glocals_size, Gframe_size );
+    __ add( Gframe_size,  extra_space, Gframe_size);
+    __ round_to( Gframe_size, WordsPerLong );
+    __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
+  } else {
+
+    //
+    // Compute number of locals in method apart from incoming parameters
+    //
+    const Address size_of_locals    (Otmp1, ConstMethod::size_of_locals_offset());
+    __ ld_ptr( constMethod, Otmp1 );
+    __ lduh( size_of_locals, Otmp1 );
+    __ sub( Otmp1, Glocals_size, Glocals_size );
+    __ round_to( Glocals_size, WordsPerLong );
+    __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
+
+    // see if the frame is greater than one page in size. If so,
+    // then we need to verify there is enough stack space remaining
+    // Frame_size = (max_stack + extra_space) * BytesPerWord;
+    __ ld_ptr( constMethod, Gframe_size );
+    __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
+    __ add( Gframe_size, extra_space, Gframe_size );
+    __ round_to( Gframe_size, WordsPerLong );
+    __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
+
+    // Add in java locals size for stack overflow check only
+    __ add( Gframe_size, Glocals_size, Gframe_size );
+
+    const Register Otmp2 = O4;
+    assert_different_registers(Otmp1, Otmp2, O5_savedSP);
+    generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
+
+    __ sub( Gframe_size, Glocals_size, Gframe_size);
+
+    //
+    // bump SP to accomodate the extra locals
+    //
+    __ sub( SP, Glocals_size, SP );
+  }
+
+  //
+  // now set up a stack frame with the size computed above
+  //
+  __ neg( Gframe_size );
+  __ save( SP, Gframe_size, SP );
+
+  //
+  // now set up all the local cache registers
+  //
+  // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
+  // that all present references to Lbyte_code initialize the register
+  // immediately before use
+  if (native_call) {
+    __ mov(G0, Lbcp);
+  } else {
+    __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
+    __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
+  }
+  __ mov( G5_method, Lmethod);                 // set Lmethod
+  __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
+  __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
+#ifdef _LP64
+  __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
+#endif
+  __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
+
+  // setup interpreter activation registers
+  __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
+
+  if (ProfileInterpreter) {
+#ifdef FAST_DISPATCH
+    // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
+    // they both use I2.
+    assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
+#endif // FAST_DISPATCH
+    __ set_method_data_pointer();
+  }
+
+}
+
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#if INCLUDE_ALL_GCS
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+     Label slow_path;
+
+    // In the G1 code we don't check if we need to reach a safepoint. We
+    // continue and the thread will safepoint at the next bytecode dispatch.
+
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
+    // check if local 0 == NULL and go the slow path
+    __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
+
+
+    // Load the value of the referent field.
+    if (Assembler::is_simm13(referent_offset)) {
+      __ load_heap_oop(Otos_i, referent_offset, Otos_i);
+    } else {
+      __ set(referent_offset, G3_scratch);
+      __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
+    }
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer. Note with
+    // these parameters the pre-barrier does not generate
+    // the load of the previous value
+
+    __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
+                            Otos_i /* pre_val */,
+                            G3_scratch /* tmp */,
+                            true /* preserve_o_regs */);
+
+    // _areturn
+    __ retl();                      // return from leaf routine
+    __ delayed()->mov(O5_savedSP, SP);
+
+    // Generate regular method entry
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
+    return entry;
+  }
+#endif // INCLUDE_ALL_GCS
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    Label L_slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
+    __ set(SafepointSynchronize::_not_synchronized, O3);
+    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
+
+    // Load parameters
+    const Register crc   = O0; // initial crc
+    const Register val   = O1; // byte to update with
+    const Register table = O2; // address of 256-entry lookup table
+
+    __ ldub(Gargs, 3, val);
+    __ lduw(Gargs, 8, crc);
+
+    __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
+
+    __ not1(crc); // ~crc
+    __ clruwu(crc);
+    __ update_byte_crc32(crc, val, table);
+    __ not1(crc); // ~crc
+
+    // result in O0
+    __ retl();
+    __ delayed()->nop();
+
+    // generate a vanilla native entry as the slow path
+    __ bind(L_slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    Label L_slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
+    __ set(SafepointSynchronize::_not_synchronized, O3);
+    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
+
+    // Load parameters from the stack
+    const Register crc    = O0; // initial crc
+    const Register buf    = O1; // source java byte array address
+    const Register len    = O2; // len
+    const Register offset = O3; // offset
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ lduw(Gargs, 0,  len);
+      __ lduw(Gargs, 8,  offset);
+      __ ldx( Gargs, 16, buf);
+      __ lduw(Gargs, 32, crc);
+      __ add(buf, offset, buf);
+    } else {
+      __ lduw(Gargs, 0,  len);
+      __ lduw(Gargs, 8,  offset);
+      __ ldx( Gargs, 16, buf);
+      __ lduw(Gargs, 24, crc);
+      __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
+      __ add(buf ,offset, buf);
+    }
+
+    // Call the crc32 kernel
+    __ MacroAssembler::save_thread(L7_thread_cache);
+    __ kernel_crc32(crc, buf, len, O3);
+    __ MacroAssembler::restore_thread(L7_thread_cache);
+
+    // result in O0
+    __ retl();
+    __ delayed()->nop();
+
+    // generate a vanilla native entry as the slow path
+    __ bind(L_slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+//
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the native method
+// than the typical interpreter frame setup.
+//
+
+address InterpreterGenerator::generate_native_entry(bool synchronized) {
+  address entry = __ pc();
+
+  // the following temporary registers are used during frame creation
+  const Register Gtmp1 = G3_scratch ;
+  const Register Gtmp2 = G1_scratch;
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // make sure registers are different!
+  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
+
+  const Address Laccess_flags(Lmethod, Method::access_flags_offset());
+
+  const Register Glocals_size = G3;
+  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
+
+  // make sure method is native & not abstract
+  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
+#ifdef ASSERT
+  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
+  {
+    Label L;
+    __ btst(JVM_ACC_NATIVE, Gtmp1);
+    __ br(Assembler::notZero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute non-native method as native");
+    __ bind(L);
+  }
+  { Label L;
+    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
+    __ br(Assembler::zero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute abstract method as non-abstract");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+ // generate the code to allocate the interpreter stack frame
+  generate_fixed_frame(true);
+
+  //
+  // No locals to initialize for native method
+  //
+
+  // this slot will be set later, we initialize it to null here just in
+  // case we get a GC before the actual value is stored later
+  __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
+
+  const Address do_not_unlock_if_synchronized(G2_thread,
+    JavaThread::do_not_unlock_if_synchronized_offset());
+  // Since at this point in the method invocation the exception handler
+  // would try to exit the monitor of synchronized methods which hasn't
+  // been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+  // runtime, exception handling i.e. unlock_if_synchronized_method will
+  // check this thread local flag.
+  // This flag has two effects, one is to force an unwind in the topmost
+  // interpreter frame and not perform an unlock while doing so.
+
+  __ movbool(true, G3_scratch);
+  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
+
+  // increment invocation counter and check for overflow
+  //
+  // Note: checking for negative value instead of overflow
+  //       so we have a 'sticky' overflow test (may be of
+  //       importance as soon as we have true MT/MP)
+  Label invocation_counter_overflow;
+  Label Lcontinue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+
+  }
+  __ bind(Lcontinue);
+
+  bang_stack_shadow_pages(true);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ stbool(G0, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+
+  if (synchronized) {
+    lock_method();
+  } else {
+#ifdef ASSERT
+    { Label ok;
+      __ ld(Laccess_flags, O0);
+      __ btst(JVM_ACC_SYNCHRONIZED, O0);
+      __ br( Assembler::zero, false, Assembler::pt, ok);
+      __ delayed()->nop();
+      __ stop("method needs synchronization");
+      __ bind(ok);
+    }
+#endif // ASSERT
+  }
+
+
+  // start execution
+  __ verify_thread();
+
+  // JVMTI support
+  __ notify_method_entry();
+
+  // native call
+
+  // (note that O0 is never an oop--at most it is a handle)
+  // It is important not to smash any handles created by this call,
+  // until any oop handle in O0 is dereferenced.
+
+  // (note that the space for outgoing params is preallocated)
+
+  // get signature handler
+  { Label L;
+    Address signature_handler(Lmethod, Method::signature_handler_offset());
+    __ ld_ptr(signature_handler, G3_scratch);
+    __ br_notnull_short(G3_scratch, Assembler::pt, L);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
+    __ ld_ptr(signature_handler, G3_scratch);
+    __ bind(L);
+  }
+
+  // Push a new frame so that the args will really be stored in
+  // Copy a few locals across so the new frame has the variables
+  // we need but these values will be dead at the jni call and
+  // therefore not gc volatile like the values in the current
+  // frame (Lmethod in particular)
+
+  // Flush the method pointer to the register save area
+  __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
+  __ mov(Llocals, O1);
+
+  // calculate where the mirror handle body is allocated in the interpreter frame:
+  __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
+
+  // Calculate current frame size
+  __ sub(SP, FP, O3);         // Calculate negative of current frame size
+  __ save(SP, O3, SP);        // Allocate an identical sized frame
+
+  // Note I7 has leftover trash. Slow signature handler will fill it in
+  // should we get there. Normal jni call will set reasonable last_Java_pc
+  // below (and fix I7 so the stack trace doesn't have a meaningless frame
+  // in it).
+
+  // Load interpreter frame's Lmethod into same register here
+
+  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
+
+  __ mov(I1, Llocals);
+  __ mov(I2, Lscratch2);     // save the address of the mirror
+
+
+  // ONLY Lmethod and Llocals are valid here!
+
+  // call signature handler, It will move the arg properly since Llocals in current frame
+  // matches that in outer frame
+
+  __ callr(G3_scratch, 0);
+  __ delayed()->nop();
+
+  // Result handler is in Lscratch
+
+  // Reload interpreter frame's Lmethod since slow signature handler may block
+  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
+
+  { Label not_static;
+
+    __ ld(Laccess_flags, O0);
+    __ btst(JVM_ACC_STATIC, O0);
+    __ br( Assembler::zero, false, Assembler::pt, not_static);
+    // get native function entry point(O0 is a good temp until the very end)
+    __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
+    // for static methods insert the mirror argument
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+
+    __ ld_ptr(Lmethod, Method:: const_offset(), O1);
+    __ ld_ptr(O1, ConstMethod::constants_offset(), O1);
+    __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
+    __ ld_ptr(O1, mirror_offset, O1);
+#ifdef ASSERT
+    if (!PrintSignatureHandlers)  // do not dirty the output with this
+    { Label L;
+      __ br_notnull_short(O1, Assembler::pt, L);
+      __ stop("mirror is missing");
+      __ bind(L);
+    }
+#endif // ASSERT
+    __ st_ptr(O1, Lscratch2, 0);
+    __ mov(Lscratch2, O1);
+    __ bind(not_static);
+  }
+
+  // At this point, arguments have been copied off of stack into
+  // their JNI positions, which are O1..O5 and SP[68..].
+  // Oops are boxed in-place on the stack, with handles copied to arguments.
+  // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
+
+#ifdef ASSERT
+  { Label L;
+    __ br_notnull_short(O0, Assembler::pt, L);
+    __ stop("native entry point is missing");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  //
+  // setup the frame anchor
+  //
+  // The scavenge function only needs to know that the PC of this frame is
+  // in the interpreter method entry code, it doesn't need to know the exact
+  // PC and hence we can use O7 which points to the return address from the
+  // previous call in the code stream (signature handler function)
+  //
+  // The other trick is we set last_Java_sp to FP instead of the usual SP because
+  // we have pushed the extra frame in order to protect the volatile register(s)
+  // in that frame when we return from the jni call
+  //
+
+  __ set_last_Java_frame(FP, O7);
+  __ mov(O7, I7);  // make dummy interpreter frame look like one above,
+                   // not meaningless information that'll confuse me.
+
+  // flush the windows now. We don't care about the current (protection) frame
+  // only the outer frames
+
+  __ flushw();
+
+  // mark windows as flushed
+  Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
+  __ set(JavaFrameAnchor::flushed, G3_scratch);
+  __ st(G3_scratch, flags);
+
+  // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
+
+  Address thread_state(G2_thread, JavaThread::thread_state_offset());
+#ifdef ASSERT
+  { Label L;
+    __ ld(thread_state, G3_scratch);
+    __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
+    __ stop("Wrong thread state in native stub");
+    __ bind(L);
+  }
+#endif // ASSERT
+  __ set(_thread_in_native, G3_scratch);
+  __ st(G3_scratch, thread_state);
+
+  // Call the jni method, using the delay slot to set the JNIEnv* argument.
+  __ save_thread(L7_thread_cache); // save Gthread
+  __ callr(O0, 0);
+  __ delayed()->
+     add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
+
+  // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
+
+  __ restore_thread(L7_thread_cache); // restore G2_thread
+  __ reinit_heapbase();
+
+  // must we block?
+
+  // Block, if necessary, before resuming in _thread_in_Java state.
+  // In order for GC to work, don't clear the last_Java_sp until after blocking.
+  { Label no_block;
+    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
+
+    // Switch thread to "native transition" state before reading the synchronization state.
+    // This additional state is necessary because reading and testing the synchronization
+    // state is not atomic w.r.t. GC, as this scenario demonstrates:
+    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
+    //     VM thread changes sync state to synchronizing and suspends threads for GC.
+    //     Thread A is resumed to finish this native method, but doesn't block here since it
+    //     didn't see any synchronization is progress, and escapes.
+    __ set(_thread_in_native_trans, G3_scratch);
+    __ st(G3_scratch, thread_state);
+    if(os::is_MP()) {
+      if (UseMembar) {
+        // Force this write out before the read below
+        __ membar(Assembler::StoreLoad);
+      } else {
+        // Write serialization page so VM thread can do a pseudo remote membar.
+        // We use the current thread pointer to calculate a thread specific
+        // offset to write to within the page. This minimizes bus traffic
+        // due to cache line collision.
+        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
+      }
+    }
+    __ load_contents(sync_state, G3_scratch);
+    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
+
+    Label L;
+    __ br(Assembler::notEqual, false, Assembler::pn, L);
+    __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
+    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
+    __ bind(L);
+
+    // Block.  Save any potential method result value before the operation and
+    // use a leaf call to leave the last_Java_frame setup undisturbed.
+    save_native_result();
+    __ call_VM_leaf(L7_thread_cache,
+                    CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+                    G2_thread);
+
+    // Restore any method result value
+    restore_native_result();
+    __ bind(no_block);
+  }
+
+  // Clear the frame anchor now
+
+  __ reset_last_Java_frame();
+
+  // Move the result handler address
+  __ mov(Lscratch, G3_scratch);
+  // return possible result to the outer frame
+#ifndef __LP64
+  __ mov(O0, I0);
+  __ restore(O1, G0, O1);
+#else
+  __ restore(O0, G0, O0);
+#endif /* __LP64 */
+
+  // Move result handler to expected register
+  __ mov(G3_scratch, Lscratch);
+
+  // Back in normal (native) interpreter frame. State is thread_in_native_trans
+  // switch to thread_in_Java.
+
+  __ set(_thread_in_Java, G3_scratch);
+  __ st(G3_scratch, thread_state);
+
+  // reset handle block
+  __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
+  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
+
+  // If we have an oop result store it where it will be safe for any further gc
+  // until we return now that we've released the handle it might be protected by
+
+  {
+    Label no_oop, store_result;
+
+    __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
+    __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
+    __ addcc(G0, O0, O0);
+    __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
+    __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
+    __ mov(G0, O0);
+
+    __ bind(store_result);
+    // Store it where gc will look for it and result handler expects it.
+    __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
+
+    __ bind(no_oop);
+
+  }
+
+
+  // handle exceptions (exception handling will handle unlocking!)
+  { Label L;
+    Address exception_addr(G2_thread, Thread::pending_exception_offset());
+    __ ld_ptr(exception_addr, Gtemp);
+    __ br_null_short(Gtemp, Assembler::pt, L);
+    // Note: This could be handled more efficiently since we know that the native
+    //       method doesn't have an exception handler. We could directly return
+    //       to the exception handler for the caller.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  // JVMTI support (preserves thread register)
+  __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
+
+  if (synchronized) {
+    // save and restore any potential method result value around the unlocking operation
+    save_native_result();
+
+    __ add( __ top_most_monitor(), O1);
+    __ unlock_object(O1);
+
+    restore_native_result();
+  }
+
+#if defined(COMPILER2) && !defined(_LP64)
+
+  // C2 expects long results in G1 we can't tell if we're returning to interpreted
+  // or compiled so just be safe.
+
+  __ sllx(O0, 32, G1);          // Shift bits into high G1
+  __ srl (O1, 0, O1);           // Zero extend O1
+  __ or3 (O1, G1, G1);          // OR 64 bits into G1
+
+#endif /* COMPILER2 && !_LP64 */
+
+  // dispose of return address and remove activation
+#ifdef ASSERT
+  {
+    Label ok;
+    __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
+    __ stop("bad I5_savedSP value");
+    __ should_not_reach_here();
+    __ bind(ok);
+  }
+#endif
+  if (TraceJumps) {
+    // Move target to register that is recordable
+    __ mov(Lscratch, G3_scratch);
+    __ JMP(G3_scratch, 0);
+  } else {
+    __ jmp(Lscratch, 0);
+  }
+  __ delayed()->nop();
+
+
+  if (inc_counter) {
+    // handle invocation counter overflow
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(Lcontinue);
+  }
+
+
+
+  return entry;
+}
+
+
+// Generic method entry to (asm) interpreter
+address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+  address entry = __ pc();
+
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // the following temporary registers are used during frame creation
+  const Register Gtmp1 = G3_scratch ;
+  const Register Gtmp2 = G1_scratch;
+
+  // make sure registers are different!
+  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
+
+  const Address constMethod       (G5_method, Method::const_offset());
+  // Seems like G5_method is live at the point this is used. So we could make this look consistent
+  // and use in the asserts.
+  const Address access_flags      (Lmethod,   Method::access_flags_offset());
+
+  const Register Glocals_size = G3;
+  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
+
+  // make sure method is not native & not abstract
+  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
+#ifdef ASSERT
+  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
+  {
+    Label L;
+    __ btst(JVM_ACC_NATIVE, Gtmp1);
+    __ br(Assembler::zero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute native method as non-native");
+    __ bind(L);
+  }
+  { Label L;
+    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
+    __ br(Assembler::zero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute abstract method as non-abstract");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  // generate the code to allocate the interpreter stack frame
+
+  generate_fixed_frame(false);
+
+#ifdef FAST_DISPATCH
+  __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
+                                          // set bytecode dispatch table base
+#endif
+
+  //
+  // Code to initialize the extra (i.e. non-parm) locals
+  //
+  Register init_value = noreg;    // will be G0 if we must clear locals
+  // The way the code was setup before zerolocals was always true for vanilla java entries.
+  // It could only be false for the specialized entries like accessor or empty which have
+  // no extra locals so the testing was a waste of time and the extra locals were always
+  // initialized. We removed this extra complication to already over complicated code.
+
+  init_value = G0;
+  Label clear_loop;
+
+  const Register RconstMethod = O1;
+  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+  const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
+
+  // NOTE: If you change the frame layout, this code will need to
+  // be updated!
+  __ ld_ptr( constMethod, RconstMethod );
+  __ lduh( size_of_locals, O2 );
+  __ lduh( size_of_parameters, O1 );
+  __ sll( O2, Interpreter::logStackElementSize, O2);
+  __ sll( O1, Interpreter::logStackElementSize, O1 );
+  __ sub( Llocals, O2, O2 );
+  __ sub( Llocals, O1, O1 );
+
+  __ bind( clear_loop );
+  __ inc( O2, wordSize );
+
+  __ cmp( O2, O1 );
+  __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
+  __ delayed()->st_ptr( init_value, O2, 0 );
+
+  const Address do_not_unlock_if_synchronized(G2_thread,
+    JavaThread::do_not_unlock_if_synchronized_offset());
+  // Since at this point in the method invocation the exception handler
+  // would try to exit the monitor of synchronized methods which hasn't
+  // been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+  // runtime, exception handling i.e. unlock_if_synchronized_method will
+  // check this thread local flag.
+  __ movbool(true, G3_scratch);
+  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
+
+  __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
+  // increment invocation counter and check for overflow
+  //
+  // Note: checking for negative value instead of overflow
+  //       so we have a 'sticky' overflow test (may be of
+  //       importance as soon as we have true MT/MP)
+  Label invocation_counter_overflow;
+  Label profile_method;
+  Label profile_method_continue;
+  Label Lcontinue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
+    if (ProfileInterpreter) {
+      __ bind(profile_method_continue);
+    }
+  }
+  __ bind(Lcontinue);
+
+  bang_stack_shadow_pages(false);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ stbool(G0, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+
+  if (synchronized) {
+    lock_method();
+  } else {
+#ifdef ASSERT
+    { Label ok;
+      __ ld(access_flags, O0);
+      __ btst(JVM_ACC_SYNCHRONIZED, O0);
+      __ br( Assembler::zero, false, Assembler::pt, ok);
+      __ delayed()->nop();
+      __ stop("method needs synchronization");
+      __ bind(ok);
+    }
+#endif // ASSERT
+  }
+
+  // start execution
+
+  __ verify_thread();
+
+  // jvmti support
+  __ notify_method_entry();
+
+  // start executing instructions
+  __ dispatch_next(vtos);
+
+
+  if (inc_counter) {
+    if (ProfileInterpreter) {
+      // We have decided to profile this method in the interpreter
+      __ bind(profile_method);
+
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      __ ba_short(profile_method_continue);
+    }
+
+    // handle invocation counter overflow
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(Lcontinue);
+  }
+
+
+  return entry;
+}
+
+//----------------------------------------------------------------------------------------------------
+// Exceptions
+void TemplateInterpreterGenerator::generate_throw_exception() {
+
+  // Entry point in previous activation (i.e., if the caller was interpreted)
+  Interpreter::_rethrow_exception_entry = __ pc();
+  // O0: exception
+
+  // entry point for exceptions thrown within interpreter code
+  Interpreter::_throw_exception_entry = __ pc();
+  __ verify_thread();
+  // expression stack is undefined here
+  // O0: exception, i.e. Oexception
+  // Lbcp: exception bcp
+  __ verify_oop(Oexception);
+
+
+  // expression stack must be empty before entering the VM in case of an exception
+  __ empty_expression_stack();
+  // find exception handler address and preserve exception oop
+  // call C routine to find handler and jump to it
+  __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
+  __ push_ptr(O1); // push exception for exception handler bytecodes
+
+  __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
+  __ delayed()->nop();
+
+
+  // if the exception is not handled in the current frame
+  // the frame is removed and the exception is rethrown
+  // (i.e. exception continuation is _rethrow_exception)
+  //
+  // Note: At this point the bci is still the bxi for the instruction which caused
+  //       the exception and the expression stack is empty. Thus, for any VM calls
+  //       at this point, GC will find a legal oop map (with empty expression stack).
+
+  // in current activation
+  // tos: exception
+  // Lbcp: exception bcp
+
+  //
+  // JVMTI PopFrame support
+  //
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
+  // Set the popframe_processing bit in popframe_condition indicating that we are
+  // currently handling popframe, so that call_VMs that may happen later do not trigger new
+  // popframe handling cycles.
+
+  __ ld(popframe_condition_addr, G3_scratch);
+  __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
+  __ stw(G3_scratch, popframe_condition_addr);
+
+  // Empty the expression stack, as in normal exception handling
+  __ empty_expression_stack();
+  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
+
+  {
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    //
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label caller_not_deoptimized;
+    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
+    __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
+
+    const Register Gtmp1 = G3_scratch;
+    const Register Gtmp2 = G1_scratch;
+    const Register RconstMethod = Gtmp1;
+    const Address constMethod(Lmethod, Method::const_offset());
+    const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+
+    // Compute size of arguments for saving when returning to deoptimized caller
+    __ ld_ptr(constMethod, RconstMethod);
+    __ lduh(size_of_parameters, Gtmp1);
+    __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
+    __ sub(Llocals, Gtmp1, Gtmp2);
+    __ add(Gtmp2, wordSize, Gtmp2);
+    // Save these arguments
+    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
+    // Inform deoptimization that it is responsible for restoring these arguments
+    __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
+    Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
+    __ st(Gtmp1, popframe_condition_addr);
+
+    // Return from the current method
+    // The caller's SP was adjusted upon method entry to accomodate
+    // the callee's non-argument locals. Undo that adjustment.
+    __ ret();
+    __ delayed()->restore(I5_savedSP, G0, SP);
+
+    __ bind(caller_not_deoptimized);
+  }
+
+  // Clear the popframe condition flag
+  __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
+
+  // Get out of the current method (how this is done depends on the particular compiler calling
+  // convention that the interpreter currently follows)
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ restore(I5_savedSP, G0, SP);
+  // The method data pointer was incremented already during
+  // call profiling. We have to restore the mdp for the current bcp.
+  if (ProfileInterpreter) {
+    __ set_method_data_pointer_for_bcp();
+  }
+
+#if INCLUDE_JVMTI
+  {
+    Label L_done;
+
+    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
+    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
+
+    __ br_null(G1_scratch, false, Assembler::pn, L_done);
+    __ delayed()->nop();
+
+    __ st_ptr(G1_scratch, Lesp, wordSize);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
+  // Resume bytecode interpretation at the current bcp
+  __ dispatch_next(vtos);
+  // end of JVMTI PopFrame support
+
+  Interpreter::_remove_activation_entry = __ pc();
+
+  // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
+  __ pop_ptr(Oexception);                                  // get exception
+
+  // Intel has the following comment:
+  //// remove the activation (without doing throws on illegalMonitorExceptions)
+  // They remove the activation without checking for bad monitor state.
+  // %%% We should make sure this is the right semantics before implementing.
+
+  __ set_vm_result(Oexception);
+  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
+
+  __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
+
+  __ get_vm_result(Oexception);
+  __ verify_oop(Oexception);
+
+    const int return_reg_adjustment = frame::pc_return_offset;
+  Address issuing_pc_addr(I7, return_reg_adjustment);
+
+  // We are done with this activation frame; find out where to go next.
+  // The continuation point will be an exception handler, which expects
+  // the following registers set up:
+  //
+  // Oexception: exception
+  // Oissuing_pc: the local call that threw exception
+  // Other On: garbage
+  // In/Ln:  the contents of the caller's register window
+  //
+  // We do the required restore at the last possible moment, because we
+  // need to preserve some state across a runtime call.
+  // (Remember that the caller activation is unknown--it might not be
+  // interpreted, so things like Lscratch are useless in the caller.)
+
+  // Although the Intel version uses call_C, we can use the more
+  // compact call_VM.  (The only real difference on SPARC is a
+  // harmlessly ignored [re]set_last_Java_frame, compared with
+  // the Intel code which lacks this.)
+  __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
+  __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
+  __ super_call_VM_leaf(L7_thread_cache,
+                        CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                        G2_thread, Oissuing_pc->after_save());
+
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ JMP(O0, 0);                         // return exception handler in caller
+  __ delayed()->restore(I5_savedSP, G0, SP);
+
+  // (same old exception object is already in Oexception; see above)
+  // Note that an "issuing PC" is actually the next PC after the call
+}
+
+
+//
+// JVMTI ForceEarlyReturn support
+//
+
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+  address entry = __ pc();
+
+  __ empty_expression_stack();
+  __ load_earlyret_value(state);
+
+  __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
+  Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
+
+  // Clear the earlyret state
+  __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
+
+  __ remove_activation(state,
+                       /* throw_monitor_exception */ false,
+                       /* install_monitor_exception */ false);
+
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ ret();                             // return to caller
+  __ delayed()->restore(I5_savedSP, G0, SP);
+
+  return entry;
+} // end of JVMTI ForceEarlyReturn support
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+  aep = __ pc(); __ push_ptr(); __ ba_short(L);
+  fep = __ pc(); __ push_f();   __ ba_short(L);
+  dep = __ pc(); __ push_d();   __ ba_short(L);
+  lep = __ pc(); __ push_l();   __ ba_short(L);
+  iep = __ pc(); __ push_i();
+  bep = cep = sep = iep;                        // there aren't any
+  vep = __ pc(); __ bind(L);                    // fall through
+  generate_and_dispatch(t);
+}
+
+// --------------------------------------------------------------------------------
+
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+ : TemplateInterpreterGenerator(code) {
+   generate_all(); // down here so it can be "virtual"
+}
+
+// --------------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  address entry = __ pc();
+
+  __ push(state);
+  __ mov(O7, Lscratch); // protect return address within interpreter
+
+  // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
+  __ mov( Otos_l2, G3_scratch );
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
+  __ mov(Lscratch, O7); // restore return address
+  __ pop(state);
+  __ retl();
+  __ delayed()->nop();
+
+  return entry;
+}
+
+
+// helpers for generate_and_dispatch
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
+}
+
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+  __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
+}
+
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
+  AddressLiteral index   (&BytecodePairHistogram::_index);
+  AddressLiteral counters((address) &BytecodePairHistogram::_counters);
+
+  // get index, shift out old bytecode, bring in new bytecode, and store it
+  // _index = (_index >> log2_number_of_codes) |
+  //          (bytecode << log2_number_of_codes);
+
+  __ load_contents(index, G4_scratch);
+  __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
+  __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
+  __ or3( G3_scratch,  G4_scratch, G4_scratch );
+  __ store_contents(G4_scratch, index, G3_scratch);
+
+  // bump bucket contents
+  // _counters[_index] ++;
+
+  __ set(counters, G3_scratch);                       // loads into G3_scratch
+  __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
+  __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
+  __ ld (G3_scratch, 0, G4_scratch);
+  __ inc (G4_scratch);
+  __ st (G4_scratch, 0, G3_scratch);
+}
+
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+  address entry = Interpreter::trace_code(t->tos_in());
+  guarantee(entry != NULL, "entry must have been generated");
+  __ call(entry, relocInfo::none);
+  __ delayed()->nop();
+}
+
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  AddressLiteral counter(&BytecodeCounter::_counter_value);
+  __ load_contents(counter, G3_scratch);
+  AddressLiteral stop_at(&StopInterpreterAt);
+  __ load_ptr_contents(stop_at, G4_scratch);
+  __ cmp(G3_scratch, G4_scratch);
+  __ breakpoint_trap(Assembler::equal, Assembler::icc);
+}
+#endif // not PRODUCT
+#endif // !CC_INTERP
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -23,1483 +23,39 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
+#include "oops/constMethod.hpp"
 #include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
-#ifndef CC_INTERP
-#ifndef FAST_DISPATCH
-#define FAST_DISPATCH 1
-#endif
-#undef FAST_DISPATCH
-
-
-// Generation of Interpreter
-//
-// The InterpreterGenerator generates the interpreter into Interpreter::_code.
-
-
-#define __ _masm->
-
-
-//----------------------------------------------------------------------------------------------------
-
-
-void InterpreterGenerator::save_native_result(void) {
-  // result potentially in O0/O1: save it across calls
-  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
-
-  // result potentially in F0/F1: save it across calls
-  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
-
-  // save and restore any potential method result value around the unlocking operation
-  __ stf(FloatRegisterImpl::D, F0, d_tmp);
-#ifdef _LP64
-  __ stx(O0, l_tmp);
-#else
-  __ std(O0, l_tmp);
-#endif
-}
-
-void InterpreterGenerator::restore_native_result(void) {
-  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
-  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
-
-  // Restore any method result value
-  __ ldf(FloatRegisterImpl::D, d_tmp, F0);
-#ifdef _LP64
-  __ ldx(l_tmp, O0);
-#else
-  __ ldd(l_tmp, O0);
-#endif
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  // load exception object
-  __ set((intptr_t)name, G3_scratch);
-  if (pass_oop) {
-    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
-  } else {
-    __ set((intptr_t)message, G4_scratch);
-    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
-  }
-  // throw exception
-  assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
-  AddressLiteral thrower(Interpreter::throw_exception_entry());
-  __ jump_to(thrower, G3_scratch);
-  __ delayed()->nop();
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  // load exception object
-  __ call_VM(Oexception,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_ClassCastException),
-             Otos_i);
-  __ should_not_reach_here();
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  // convention: expect aberrant index in register G3_scratch, then shuffle the
-  // index to G4_scratch for the VM call
-  __ mov(G3_scratch, G4_scratch);
-  __ set((intptr_t)name, G3_scratch);
-  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
-  __ should_not_reach_here();
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
-  __ should_not_reach_here();
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  if (state == atos) {
-    __ profile_return_type(O0, G3_scratch, G1_scratch);
-  }
-
-#if !defined(_LP64) && defined(COMPILER2)
-  // All return values are where we want them, except for Longs.  C2 returns
-  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
-  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
-  // build even if we are returning from interpreted we just do a little
-  // stupid shuffing.
-  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
-  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
-  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-
-  if (state == ltos) {
-    __ srl (G1,  0, O1);
-    __ srlx(G1, 32, O0);
-  }
-#endif // !_LP64 && COMPILER2
-
-  // The callee returns with the stack possibly adjusted by adapter transition
-  // We remove that possible adjustment here.
-  // All interpreter local registers are untouched. Any result is passed back
-  // in the O0/O1 or float registers. Before continuing, the arguments must be
-  // popped from the java expression stack; i.e., Lesp must be adjusted.
-
-  __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
-
-  const Register cache = G3_scratch;
-  const Register index  = G1_scratch;
-  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
-
-  const Register flags = cache;
-  __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
-  const Register parameter_size = flags;
-  __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
-  __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
-  __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-  __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
-#if INCLUDE_JVMCI
-  // Check if we need to take lock at entry of synchronized method.
-  if (UseJVMCICompiler) {
-    Label L;
-    Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
-    __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
-    __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
-    // Clear flag.
-    __ stbool(G0, pending_monitor_enter_addr);
-    // Take lock.
-    lock_method();
-    __ bind(L);
-  }
-#endif
-  { Label L;
-    Address exception_addr(G2_thread, Thread::pending_exception_offset());
-    __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
-    __ br_null_short(Gtemp, Assembler::pt, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-// A result handler converts/unboxes a native call result into
-// a java interpreter/compiler result. The current frame is an
-// interpreter frame. The activation frame unwind code must be
-// consistent with that of TemplateTable::_return(...). In the
-// case of native methods, the caller's SP was not modified.
-address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  address entry = __ pc();
-  Register Itos_i  = Otos_i ->after_save();
-  Register Itos_l  = Otos_l ->after_save();
-  Register Itos_l1 = Otos_l1->after_save();
-  Register Itos_l2 = Otos_l2->after_save();
-  switch (type) {
-    case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
-    case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
-    case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
-    case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
-    case T_LONG   :
-#ifndef _LP64
-                    __ mov(O1, Itos_l2);  // move other half of long
-#endif              // ifdef or no ifdef, fall through to the T_INT case
-    case T_INT    : __ mov(O0, Itos_i);                         break;
-    case T_VOID   : /* nothing to do */                         break;
-    case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
-    case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
-    case T_OBJECT :
-      __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
-      __ verify_oop(Itos_i);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret();                           // return from interpreter activation
-  __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
-  NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::normal_table(vtos));
-  return entry;
-}
-
 
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  __ dispatch_next(state);
-  return entry;
-}
-
-//
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// Lmethod: method
-// ??: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  // Note: In tiered we increment either counters in MethodCounters* or in
-  // MDO depending if we're profiling or not.
-  const Register G3_method_counters = G3_scratch;
-  Label done;
-
-  if (TieredCompilation) {
-    const int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // If no method data exists, go to profile_continue.
-      __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
-      __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
-      // Increment counter
-      Address mdo_invocation_counter(G4_scratch,
-                                     in_bytes(MethodData::invocation_counter_offset()) +
-                                     in_bytes(InvocationCounter::counter_offset()));
-      Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
-                                 G3_scratch, Lscratch,
-                                 Assembler::zero, overflow);
-      __ ba_short(done);
-    }
-
-    // Increment counter in MethodCounters*
-    __ bind(no_mdo);
-    Address invocation_counter(G3_method_counters,
-            in_bytes(MethodCounters::invocation_counter_offset()) +
-            in_bytes(InvocationCounter::counter_offset()));
-    __ get_method_counters(Lmethod, G3_method_counters, done);
-    Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask,
-                               G4_scratch, Lscratch,
-                               Assembler::zero, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    // Update standard invocation counters
-    __ get_method_counters(Lmethod, G3_method_counters, done);
-    __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
-    if (ProfileInterpreter) {
-      Address interpreter_invocation_counter(G3_method_counters,
-            in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
-      __ ld(interpreter_invocation_counter, G4_scratch);
-      __ inc(G4_scratch);
-      __ st(G4_scratch, interpreter_invocation_counter);
-    }
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
-      __ ld(profile_limit, G1_scratch);
-      __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(*profile_method);
-    }
-
-    Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
-    __ ld(invocation_limit, G3_scratch);
-    __ cmp(O0, G3_scratch);
-    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
-    __ delayed()->nop();
-    __ bind(done);
-  }
-
-}
-
-// Allocate monitor and lock method (asm interpreter)
-// ebx - Method*
-//
-void TemplateInterpreterGenerator::lock_method() {
-  __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0);  // Load access flags.
-
-#ifdef ASSERT
- { Label ok;
-   __ btst(JVM_ACC_SYNCHRONIZED, O0);
-   __ br( Assembler::notZero, false, Assembler::pt, ok);
-   __ delayed()->nop();
-   __ stop("method doesn't need synchronization");
-   __ bind(ok);
-  }
-#endif // ASSERT
-
-  // get synchronization object to O0
-  { Label done;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ btst(JVM_ACC_STATIC, O0);
-    __ br( Assembler::zero, true, Assembler::pt, done);
-    __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
-
-    __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
-    __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
-    __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
-
-    // lock the mirror, not the Klass*
-    __ ld_ptr( O0, mirror_offset, O0);
-
-#ifdef ASSERT
-    __ tst(O0);
-    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
-#endif // ASSERT
-
-    __ bind(done);
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
   }
-
-  __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
-  __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
-  // __ untested("lock_object from method entry");
-  __ lock_object(Lmonitors, O0);
-}
-
-
-void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
-                                                         Register Rscratch,
-                                                         Register Rscratch2) {
-  const int page_size = os::vm_page_size();
-  Label after_frame_check;
-
-  assert_different_registers(Rframe_size, Rscratch, Rscratch2);
-
-  __ set(page_size, Rscratch);
-  __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
-
-  // get the stack base, and in debug, verify it is non-zero
-  __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
-#ifdef ASSERT
-  Label base_not_zero;
-  __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
-  __ stop("stack base is zero in generate_stack_overflow_check");
-  __ bind(base_not_zero);
-#endif
-
-  // get the stack size, and in debug, verify it is non-zero
-  assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
-  __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
-#ifdef ASSERT
-  Label size_not_zero;
-  __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
-  __ stop("stack size is zero in generate_stack_overflow_check");
-  __ bind(size_not_zero);
-#endif
-
-  // compute the beginning of the protected zone minus the requested frame size
-  __ sub( Rscratch, Rscratch2,   Rscratch );
-  __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
-  __ add( Rscratch, Rscratch2,   Rscratch );
-
-  // Add in the size of the frame (which is the same as subtracting it from the
-  // SP, which would take another register
-  __ add( Rscratch, Rframe_size, Rscratch );
-
-  // the frame is greater than one page in size, so check against
-  // the bottom of the stack
-  __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
-
-  // the stack will overflow, throw an exception
-
-  // Note that SP is restored to sender's sp (in the delay slot). This
-  // is necessary if the sender's frame is an extended compiled frame
-  // (see gen_c2i_adapter()) and safer anyway in case of JSR292
-  // adaptations.
-
-  // Note also that the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
-  __ jump_to(stub, Rscratch);
-  __ delayed()->mov(O5_savedSP, SP);
-
-  // if you get to here, then there is enough stack space
-  __ bind( after_frame_check );
-}
-
-
-//
-// Generate a fixed interpreter frame. This is identical setup for interpreted
-// methods and for native methods hence the shared code.
-
-
-//----------------------------------------------------------------------------------------------------
-// Stack frame layout
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// C2 Calling Conventions:
-//
-// The entry code below assumes that the following registers are set
-// when coming in:
-//    G5_method: holds the Method* of the method to call
-//    Lesp:    points to the TOS of the callers expression stack
-//             after having pushed all the parameters
-//
-// The entry code does the following to setup an interpreter frame
-//   pop parameters from the callers stack by adjusting Lesp
-//   set O0 to Lesp
-//   compute X = (max_locals - num_parameters)
-//   bump SP up by X to accomadate the extra locals
-//   compute X = max_expression_stack
-//               + vm_local_words
-//               + 16 words of register save area
-//   save frame doing a save sp, -X, sp growing towards lower addresses
-//   set Lbcp, Lmethod, LcpoolCache
-//   set Llocals to i0
-//   set Lmonitors to FP - rounded_vm_local_words
-//   set Lesp to Lmonitors - 4
-//
-//  The frame has now been setup to do the rest of the entry code
-
-// Try this optimization:  Most method entries could live in a
-// "one size fits all" stack frame without all the dynamic size
-// calculations.  It might be profitable to do all this calculation
-// statically and approximately for "small enough" methods.
-
-//-----------------------------------------------------------------------------------------------
-
-// C1 Calling conventions
-//
-// Upon method entry, the following registers are setup:
-//
-// g2 G2_thread: current thread
-// g5 G5_method: method to activate
-// g4 Gargs  : pointer to last argument
-//
-//
-// Stack:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+ <--- Gargs
-// |               |
-// :   arguments   :
-// |               |
-// +---------------+
-// |               |
-//
-//
-//
-// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :               :
-// |               | <--- Lesp
-// +---------------+ <--- Lmonitors (fp - 0x18)
-// |   VM locals   |
-// +---------------+ <--- fp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- fp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- fp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+
-// |               |
-// : nonarg locals :
-// |               |
-// +---------------+
-// |               |
-// :   arguments   :
-// |               | <--- Llocals
-// +---------------+ <--- Gargs
-// |               |
-
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  //
-  //
-  // The entry code sets up a new interpreter frame in 4 steps:
-  //
-  // 1) Increase caller's SP by for the extra local space needed:
-  //    (check for overflow)
-  //    Efficient implementation of xload/xstore bytecodes requires
-  //    that arguments and non-argument locals are in a contigously
-  //    addressable memory block => non-argument locals must be
-  //    allocated in the caller's frame.
-  //
-  // 2) Create a new stack frame and register window:
-  //    The new stack frame must provide space for the standard
-  //    register save area, the maximum java expression stack size,
-  //    the monitor slots (0 slots initially), and some frame local
-  //    scratch locations.
-  //
-  // 3) The following interpreter activation registers must be setup:
-  //    Lesp       : expression stack pointer
-  //    Lbcp       : bytecode pointer
-  //    Lmethod    : method
-  //    Llocals    : locals pointer
-  //    Lmonitors  : monitor pointer
-  //    LcpoolCache: constant pool cache
-  //
-  // 4) Initialize the non-argument locals if necessary:
-  //    Non-argument locals may need to be initialized to NULL
-  //    for GC to work. If the oop-map information is accurate
-  //    (in the absence of the JSR problem), no initialization
-  //    is necessary.
-  //
-  // (gri - 2/25/2000)
-
-
-  int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
-
-  const int extra_space =
-    rounded_vm_local_words +                   // frame local scratch space
-    Method::extra_stack_entries() +            // extra stack for jsr 292
-    frame::memory_parameter_word_sp_offset +   // register save area
-    (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
-
-  const Register Glocals_size = G3;
-  const Register RconstMethod = Glocals_size;
-  const Register Otmp1 = O3;
-  const Register Otmp2 = O4;
-  // Lscratch can't be used as a temporary because the call_stub uses
-  // it to assert that the stack frame was setup correctly.
-  const Address constMethod       (G5_method, Method::const_offset());
-  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
-
-  __ ld_ptr( constMethod, RconstMethod );
-  __ lduh( size_of_parameters, Glocals_size);
-
-  // Gargs points to first local + BytesPerWord
-  // Set the saved SP after the register window save
-  //
-  assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
-  __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
-  __ add(Gargs, Otmp1, Gargs);
-
-  if (native_call) {
-    __ calc_mem_param_words( Glocals_size, Gframe_size );
-    __ add( Gframe_size,  extra_space, Gframe_size);
-    __ round_to( Gframe_size, WordsPerLong );
-    __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
-  } else {
-
-    //
-    // Compute number of locals in method apart from incoming parameters
-    //
-    const Address size_of_locals    (Otmp1, ConstMethod::size_of_locals_offset());
-    __ ld_ptr( constMethod, Otmp1 );
-    __ lduh( size_of_locals, Otmp1 );
-    __ sub( Otmp1, Glocals_size, Glocals_size );
-    __ round_to( Glocals_size, WordsPerLong );
-    __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
-
-    // see if the frame is greater than one page in size. If so,
-    // then we need to verify there is enough stack space remaining
-    // Frame_size = (max_stack + extra_space) * BytesPerWord;
-    __ ld_ptr( constMethod, Gframe_size );
-    __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
-    __ add( Gframe_size, extra_space, Gframe_size );
-    __ round_to( Gframe_size, WordsPerLong );
-    __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
-
-    // Add in java locals size for stack overflow check only
-    __ add( Gframe_size, Glocals_size, Gframe_size );
-
-    const Register Otmp2 = O4;
-    assert_different_registers(Otmp1, Otmp2, O5_savedSP);
-    generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
-
-    __ sub( Gframe_size, Glocals_size, Gframe_size);
-
-    //
-    // bump SP to accomodate the extra locals
-    //
-    __ sub( SP, Glocals_size, SP );
-  }
-
-  //
-  // now set up a stack frame with the size computed above
-  //
-  __ neg( Gframe_size );
-  __ save( SP, Gframe_size, SP );
-
-  //
-  // now set up all the local cache registers
-  //
-  // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
-  // that all present references to Lbyte_code initialize the register
-  // immediately before use
-  if (native_call) {
-    __ mov(G0, Lbcp);
-  } else {
-    __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
-    __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
-  }
-  __ mov( G5_method, Lmethod);                 // set Lmethod
-  __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
-  __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
-#ifdef _LP64
-  __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
-#endif
-  __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
-
-  // setup interpreter activation registers
-  __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
-
-  if (ProfileInterpreter) {
-#ifdef FAST_DISPATCH
-    // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
-    // they both use I2.
-    assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
-#endif // FAST_DISPATCH
-    __ set_method_data_pointer();
-  }
-
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
 }
 
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // This code is based on generate_accessor_enty.
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-     Label slow_path;
-
-    // In the G1 code we don't check if we need to reach a safepoint. We
-    // continue and the thread will safepoint at the next bytecode dispatch.
-
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
-    // check if local 0 == NULL and go the slow path
-    __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
-
-
-    // Load the value of the referent field.
-    if (Assembler::is_simm13(referent_offset)) {
-      __ load_heap_oop(Otos_i, referent_offset, Otos_i);
-    } else {
-      __ set(referent_offset, G3_scratch);
-      __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
-    }
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer. Note with
-    // these parameters the pre-barrier does not generate
-    // the load of the previous value
-
-    __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
-                            Otos_i /* pre_val */,
-                            G3_scratch /* tmp */,
-                            true /* preserve_o_regs */);
-
-    // _areturn
-    __ retl();                      // return from leaf routine
-    __ delayed()->mov(O5_savedSP, SP);
-
-    // Generate regular method entry
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    Label L_slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
-    __ set(SafepointSynchronize::_not_synchronized, O3);
-    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
-
-    // Load parameters
-    const Register crc   = O0; // initial crc
-    const Register val   = O1; // byte to update with
-    const Register table = O2; // address of 256-entry lookup table
-
-    __ ldub(Gargs, 3, val);
-    __ lduw(Gargs, 8, crc);
-
-    __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
-
-    __ not1(crc); // ~crc
-    __ clruwu(crc);
-    __ update_byte_crc32(crc, val, table);
-    __ not1(crc); // ~crc
-
-    // result in O0
-    __ retl();
-    __ delayed()->nop();
-
-    // generate a vanilla native entry as the slow path
-    __ bind(L_slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    Label L_slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
-    __ set(SafepointSynchronize::_not_synchronized, O3);
-    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
-
-    // Load parameters from the stack
-    const Register crc    = O0; // initial crc
-    const Register buf    = O1; // source java byte array address
-    const Register len    = O2; // len
-    const Register offset = O3; // offset
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ lduw(Gargs, 0,  len);
-      __ lduw(Gargs, 8,  offset);
-      __ ldx( Gargs, 16, buf);
-      __ lduw(Gargs, 32, crc);
-      __ add(buf, offset, buf);
-    } else {
-      __ lduw(Gargs, 0,  len);
-      __ lduw(Gargs, 8,  offset);
-      __ ldx( Gargs, 16, buf);
-      __ lduw(Gargs, 24, crc);
-      __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
-      __ add(buf ,offset, buf);
-    }
-
-    // Call the crc32 kernel
-    __ MacroAssembler::save_thread(L7_thread_cache);
-    __ kernel_crc32(crc, buf, len, O3);
-    __ MacroAssembler::restore_thread(L7_thread_cache);
-
-    // result in O0
-    __ retl();
-    __ delayed()->nop();
-
-    // generate a vanilla native entry as the slow path
-    __ bind(L_slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-//
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the native method
-// than the typical interpreter frame setup.
-//
-
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  address entry = __ pc();
-
-  // the following temporary registers are used during frame creation
-  const Register Gtmp1 = G3_scratch ;
-  const Register Gtmp2 = G1_scratch;
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // make sure registers are different!
-  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
-
-  const Address Laccess_flags(Lmethod, Method::access_flags_offset());
-
-  const Register Glocals_size = G3;
-  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
-
-  // make sure method is native & not abstract
-  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
-#ifdef ASSERT
-  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
-  {
-    Label L;
-    __ btst(JVM_ACC_NATIVE, Gtmp1);
-    __ br(Assembler::notZero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  { Label L;
-    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
-    __ br(Assembler::zero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute abstract method as non-abstract");
-    __ bind(L);
-  }
-#endif // ASSERT
-
- // generate the code to allocate the interpreter stack frame
-  generate_fixed_frame(true);
-
-  //
-  // No locals to initialize for native method
-  //
-
-  // this slot will be set later, we initialize it to null here just in
-  // case we get a GC before the actual value is stored later
-  __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
-
-  const Address do_not_unlock_if_synchronized(G2_thread,
-    JavaThread::do_not_unlock_if_synchronized_offset());
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-  // runtime, exception handling i.e. unlock_if_synchronized_method will
-  // check this thread local flag.
-  // This flag has two effects, one is to force an unwind in the topmost
-  // interpreter frame and not perform an unlock while doing so.
-
-  __ movbool(true, G3_scratch);
-  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
-
-  // increment invocation counter and check for overflow
-  //
-  // Note: checking for negative value instead of overflow
-  //       so we have a 'sticky' overflow test (may be of
-  //       importance as soon as we have true MT/MP)
-  Label invocation_counter_overflow;
-  Label Lcontinue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-
-  }
-  __ bind(Lcontinue);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ stbool(G0, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-
-  if (synchronized) {
-    lock_method();
-  } else {
-#ifdef ASSERT
-    { Label ok;
-      __ ld(Laccess_flags, O0);
-      __ btst(JVM_ACC_SYNCHRONIZED, O0);
-      __ br( Assembler::zero, false, Assembler::pt, ok);
-      __ delayed()->nop();
-      __ stop("method needs synchronization");
-      __ bind(ok);
-    }
-#endif // ASSERT
-  }
-
-
-  // start execution
-  __ verify_thread();
-
-  // JVMTI support
-  __ notify_method_entry();
-
-  // native call
-
-  // (note that O0 is never an oop--at most it is a handle)
-  // It is important not to smash any handles created by this call,
-  // until any oop handle in O0 is dereferenced.
-
-  // (note that the space for outgoing params is preallocated)
-
-  // get signature handler
-  { Label L;
-    Address signature_handler(Lmethod, Method::signature_handler_offset());
-    __ ld_ptr(signature_handler, G3_scratch);
-    __ br_notnull_short(G3_scratch, Assembler::pt, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
-    __ ld_ptr(signature_handler, G3_scratch);
-    __ bind(L);
-  }
-
-  // Push a new frame so that the args will really be stored in
-  // Copy a few locals across so the new frame has the variables
-  // we need but these values will be dead at the jni call and
-  // therefore not gc volatile like the values in the current
-  // frame (Lmethod in particular)
-
-  // Flush the method pointer to the register save area
-  __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
-  __ mov(Llocals, O1);
-
-  // calculate where the mirror handle body is allocated in the interpreter frame:
-  __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
-
-  // Calculate current frame size
-  __ sub(SP, FP, O3);         // Calculate negative of current frame size
-  __ save(SP, O3, SP);        // Allocate an identical sized frame
-
-  // Note I7 has leftover trash. Slow signature handler will fill it in
-  // should we get there. Normal jni call will set reasonable last_Java_pc
-  // below (and fix I7 so the stack trace doesn't have a meaningless frame
-  // in it).
-
-  // Load interpreter frame's Lmethod into same register here
-
-  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
-
-  __ mov(I1, Llocals);
-  __ mov(I2, Lscratch2);     // save the address of the mirror
-
-
-  // ONLY Lmethod and Llocals are valid here!
-
-  // call signature handler, It will move the arg properly since Llocals in current frame
-  // matches that in outer frame
-
-  __ callr(G3_scratch, 0);
-  __ delayed()->nop();
-
-  // Result handler is in Lscratch
-
-  // Reload interpreter frame's Lmethod since slow signature handler may block
-  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
-
-  { Label not_static;
-
-    __ ld(Laccess_flags, O0);
-    __ btst(JVM_ACC_STATIC, O0);
-    __ br( Assembler::zero, false, Assembler::pt, not_static);
-    // get native function entry point(O0 is a good temp until the very end)
-    __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
-    // for static methods insert the mirror argument
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-
-    __ ld_ptr(Lmethod, Method:: const_offset(), O1);
-    __ ld_ptr(O1, ConstMethod::constants_offset(), O1);
-    __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
-    __ ld_ptr(O1, mirror_offset, O1);
-#ifdef ASSERT
-    if (!PrintSignatureHandlers)  // do not dirty the output with this
-    { Label L;
-      __ br_notnull_short(O1, Assembler::pt, L);
-      __ stop("mirror is missing");
-      __ bind(L);
-    }
-#endif // ASSERT
-    __ st_ptr(O1, Lscratch2, 0);
-    __ mov(Lscratch2, O1);
-    __ bind(not_static);
-  }
-
-  // At this point, arguments have been copied off of stack into
-  // their JNI positions, which are O1..O5 and SP[68..].
-  // Oops are boxed in-place on the stack, with handles copied to arguments.
-  // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
-
-#ifdef ASSERT
-  { Label L;
-    __ br_notnull_short(O0, Assembler::pt, L);
-    __ stop("native entry point is missing");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  //
-  // setup the frame anchor
-  //
-  // The scavenge function only needs to know that the PC of this frame is
-  // in the interpreter method entry code, it doesn't need to know the exact
-  // PC and hence we can use O7 which points to the return address from the
-  // previous call in the code stream (signature handler function)
-  //
-  // The other trick is we set last_Java_sp to FP instead of the usual SP because
-  // we have pushed the extra frame in order to protect the volatile register(s)
-  // in that frame when we return from the jni call
-  //
-
-  __ set_last_Java_frame(FP, O7);
-  __ mov(O7, I7);  // make dummy interpreter frame look like one above,
-                   // not meaningless information that'll confuse me.
-
-  // flush the windows now. We don't care about the current (protection) frame
-  // only the outer frames
-
-  __ flushw();
-
-  // mark windows as flushed
-  Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
-  __ set(JavaFrameAnchor::flushed, G3_scratch);
-  __ st(G3_scratch, flags);
-
-  // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
-
-  Address thread_state(G2_thread, JavaThread::thread_state_offset());
-#ifdef ASSERT
-  { Label L;
-    __ ld(thread_state, G3_scratch);
-    __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif // ASSERT
-  __ set(_thread_in_native, G3_scratch);
-  __ st(G3_scratch, thread_state);
-
-  // Call the jni method, using the delay slot to set the JNIEnv* argument.
-  __ save_thread(L7_thread_cache); // save Gthread
-  __ callr(O0, 0);
-  __ delayed()->
-     add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
-
-  // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
-
-  __ restore_thread(L7_thread_cache); // restore G2_thread
-  __ reinit_heapbase();
-
-  // must we block?
-
-  // Block, if necessary, before resuming in _thread_in_Java state.
-  // In order for GC to work, don't clear the last_Java_sp until after blocking.
-  { Label no_block;
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
-
-    // Switch thread to "native transition" state before reading the synchronization state.
-    // This additional state is necessary because reading and testing the synchronization
-    // state is not atomic w.r.t. GC, as this scenario demonstrates:
-    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
-    //     VM thread changes sync state to synchronizing and suspends threads for GC.
-    //     Thread A is resumed to finish this native method, but doesn't block here since it
-    //     didn't see any synchronization is progress, and escapes.
-    __ set(_thread_in_native_trans, G3_scratch);
-    __ st(G3_scratch, thread_state);
-    if(os::is_MP()) {
-      if (UseMembar) {
-        // Force this write out before the read below
-        __ membar(Assembler::StoreLoad);
-      } else {
-        // Write serialization page so VM thread can do a pseudo remote membar.
-        // We use the current thread pointer to calculate a thread specific
-        // offset to write to within the page. This minimizes bus traffic
-        // due to cache line collision.
-        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
-      }
-    }
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-
-    Label L;
-    __ br(Assembler::notEqual, false, Assembler::pn, L);
-    __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
-    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
-    __ bind(L);
-
-    // Block.  Save any potential method result value before the operation and
-    // use a leaf call to leave the last_Java_frame setup undisturbed.
-    save_native_result();
-    __ call_VM_leaf(L7_thread_cache,
-                    CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
-                    G2_thread);
-
-    // Restore any method result value
-    restore_native_result();
-    __ bind(no_block);
-  }
-
-  // Clear the frame anchor now
-
-  __ reset_last_Java_frame();
-
-  // Move the result handler address
-  __ mov(Lscratch, G3_scratch);
-  // return possible result to the outer frame
-#ifndef __LP64
-  __ mov(O0, I0);
-  __ restore(O1, G0, O1);
-#else
-  __ restore(O0, G0, O0);
-#endif /* __LP64 */
-
-  // Move result handler to expected register
-  __ mov(G3_scratch, Lscratch);
-
-  // Back in normal (native) interpreter frame. State is thread_in_native_trans
-  // switch to thread_in_Java.
-
-  __ set(_thread_in_Java, G3_scratch);
-  __ st(G3_scratch, thread_state);
-
-  // reset handle block
-  __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
-  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
-
-  // If we have an oop result store it where it will be safe for any further gc
-  // until we return now that we've released the handle it might be protected by
-
-  {
-    Label no_oop, store_result;
-
-    __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
-    __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
-    __ addcc(G0, O0, O0);
-    __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
-    __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
-    __ mov(G0, O0);
-
-    __ bind(store_result);
-    // Store it where gc will look for it and result handler expects it.
-    __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
-
-    __ bind(no_oop);
-
-  }
-
-
-  // handle exceptions (exception handling will handle unlocking!)
-  { Label L;
-    Address exception_addr(G2_thread, Thread::pending_exception_offset());
-    __ ld_ptr(exception_addr, Gtemp);
-    __ br_null_short(Gtemp, Assembler::pt, L);
-    // Note: This could be handled more efficiently since we know that the native
-    //       method doesn't have an exception handler. We could directly return
-    //       to the exception handler for the caller.
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // JVMTI support (preserves thread register)
-  __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
-
-  if (synchronized) {
-    // save and restore any potential method result value around the unlocking operation
-    save_native_result();
-
-    __ add( __ top_most_monitor(), O1);
-    __ unlock_object(O1);
-
-    restore_native_result();
-  }
-
-#if defined(COMPILER2) && !defined(_LP64)
-
-  // C2 expects long results in G1 we can't tell if we're returning to interpreted
-  // or compiled so just be safe.
-
-  __ sllx(O0, 32, G1);          // Shift bits into high G1
-  __ srl (O1, 0, O1);           // Zero extend O1
-  __ or3 (O1, G1, G1);          // OR 64 bits into G1
-
-#endif /* COMPILER2 && !_LP64 */
-
-  // dispose of return address and remove activation
-#ifdef ASSERT
-  {
-    Label ok;
-    __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
-    __ stop("bad I5_savedSP value");
-    __ should_not_reach_here();
-    __ bind(ok);
-  }
-#endif
-  if (TraceJumps) {
-    // Move target to register that is recordable
-    __ mov(Lscratch, G3_scratch);
-    __ JMP(G3_scratch, 0);
-  } else {
-    __ jmp(Lscratch, 0);
-  }
-  __ delayed()->nop();
-
-
-  if (inc_counter) {
-    // handle invocation counter overflow
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(Lcontinue);
-  }
-
-
-
-  return entry;
-}
-
-
-// Generic method entry to (asm) interpreter
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  address entry = __ pc();
-
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // the following temporary registers are used during frame creation
-  const Register Gtmp1 = G3_scratch ;
-  const Register Gtmp2 = G1_scratch;
-
-  // make sure registers are different!
-  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
-
-  const Address constMethod       (G5_method, Method::const_offset());
-  // Seems like G5_method is live at the point this is used. So we could make this look consistent
-  // and use in the asserts.
-  const Address access_flags      (Lmethod,   Method::access_flags_offset());
-
-  const Register Glocals_size = G3;
-  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
-
-  // make sure method is not native & not abstract
-  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
-#ifdef ASSERT
-  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
-  {
-    Label L;
-    __ btst(JVM_ACC_NATIVE, Gtmp1);
-    __ br(Assembler::zero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
-  { Label L;
-    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
-    __ br(Assembler::zero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute abstract method as non-abstract");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  // generate the code to allocate the interpreter stack frame
-
-  generate_fixed_frame(false);
-
-#ifdef FAST_DISPATCH
-  __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
-                                          // set bytecode dispatch table base
-#endif
-
-  //
-  // Code to initialize the extra (i.e. non-parm) locals
-  //
-  Register init_value = noreg;    // will be G0 if we must clear locals
-  // The way the code was setup before zerolocals was always true for vanilla java entries.
-  // It could only be false for the specialized entries like accessor or empty which have
-  // no extra locals so the testing was a waste of time and the extra locals were always
-  // initialized. We removed this extra complication to already over complicated code.
-
-  init_value = G0;
-  Label clear_loop;
-
-  const Register RconstMethod = O1;
-  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
-
-  // NOTE: If you change the frame layout, this code will need to
-  // be updated!
-  __ ld_ptr( constMethod, RconstMethod );
-  __ lduh( size_of_locals, O2 );
-  __ lduh( size_of_parameters, O1 );
-  __ sll( O2, Interpreter::logStackElementSize, O2);
-  __ sll( O1, Interpreter::logStackElementSize, O1 );
-  __ sub( Llocals, O2, O2 );
-  __ sub( Llocals, O1, O1 );
-
-  __ bind( clear_loop );
-  __ inc( O2, wordSize );
-
-  __ cmp( O2, O1 );
-  __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
-  __ delayed()->st_ptr( init_value, O2, 0 );
-
-  const Address do_not_unlock_if_synchronized(G2_thread,
-    JavaThread::do_not_unlock_if_synchronized_offset());
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-  // runtime, exception handling i.e. unlock_if_synchronized_method will
-  // check this thread local flag.
-  __ movbool(true, G3_scratch);
-  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
-
-  __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
-  // increment invocation counter and check for overflow
-  //
-  // Note: checking for negative value instead of overflow
-  //       so we have a 'sticky' overflow test (may be of
-  //       importance as soon as we have true MT/MP)
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  Label Lcontinue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-  __ bind(Lcontinue);
-
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ stbool(G0, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-
-  if (synchronized) {
-    lock_method();
-  } else {
-#ifdef ASSERT
-    { Label ok;
-      __ ld(access_flags, O0);
-      __ btst(JVM_ACC_SYNCHRONIZED, O0);
-      __ br( Assembler::zero, false, Assembler::pt, ok);
-      __ delayed()->nop();
-      __ stop("method needs synchronization");
-      __ bind(ok);
-    }
-#endif // ASSERT
-  }
-
-  // start execution
-
-  __ verify_thread();
-
-  // jvmti support
-  __ notify_method_entry();
-
-  // start executing instructions
-  __ dispatch_next(vtos);
-
-
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      __ ba_short(profile_method_continue);
-    }
-
-    // handle invocation counter overflow
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(Lcontinue);
-  }
-
-
-  return entry;
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  // No special entry points that preclude compilation
+  return true;
 }
 
 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
@@ -1747,332 +303,3 @@
   assert(lo <= esp && esp < monitors, "esp in bounds");
 #endif // ASSERT
 }
-
-//----------------------------------------------------------------------------------------------------
-// Exceptions
-void TemplateInterpreterGenerator::generate_throw_exception() {
-
-  // Entry point in previous activation (i.e., if the caller was interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  // O0: exception
-
-  // entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  __ verify_thread();
-  // expression stack is undefined here
-  // O0: exception, i.e. Oexception
-  // Lbcp: exception bcp
-  __ verify_oop(Oexception);
-
-
-  // expression stack must be empty before entering the VM in case of an exception
-  __ empty_expression_stack();
-  // find exception handler address and preserve exception oop
-  // call C routine to find handler and jump to it
-  __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
-  __ push_ptr(O1); // push exception for exception handler bytecodes
-
-  __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
-  __ delayed()->nop();
-
-
-  // if the exception is not handled in the current frame
-  // the frame is removed and the exception is rethrown
-  // (i.e. exception continuation is _rethrow_exception)
-  //
-  // Note: At this point the bci is still the bxi for the instruction which caused
-  //       the exception and the expression stack is empty. Thus, for any VM calls
-  //       at this point, GC will find a legal oop map (with empty expression stack).
-
-  // in current activation
-  // tos: exception
-  // Lbcp: exception bcp
-
-  //
-  // JVMTI PopFrame support
-  //
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
-  // Set the popframe_processing bit in popframe_condition indicating that we are
-  // currently handling popframe, so that call_VMs that may happen later do not trigger new
-  // popframe handling cycles.
-
-  __ ld(popframe_condition_addr, G3_scratch);
-  __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
-  __ stw(G3_scratch, popframe_condition_addr);
-
-  // Empty the expression stack, as in normal exception handling
-  __ empty_expression_stack();
-  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
-    __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
-
-    const Register Gtmp1 = G3_scratch;
-    const Register Gtmp2 = G1_scratch;
-    const Register RconstMethod = Gtmp1;
-    const Address constMethod(Lmethod, Method::const_offset());
-    const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
-
-    // Compute size of arguments for saving when returning to deoptimized caller
-    __ ld_ptr(constMethod, RconstMethod);
-    __ lduh(size_of_parameters, Gtmp1);
-    __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
-    __ sub(Llocals, Gtmp1, Gtmp2);
-    __ add(Gtmp2, wordSize, Gtmp2);
-    // Save these arguments
-    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
-    // Inform deoptimization that it is responsible for restoring these arguments
-    __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
-    Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
-    __ st(Gtmp1, popframe_condition_addr);
-
-    // Return from the current method
-    // The caller's SP was adjusted upon method entry to accomodate
-    // the callee's non-argument locals. Undo that adjustment.
-    __ ret();
-    __ delayed()->restore(I5_savedSP, G0, SP);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  // Clear the popframe condition flag
-  __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
-
-  // Get out of the current method (how this is done depends on the particular compiler calling
-  // convention that the interpreter currently follows)
-  // The caller's SP was adjusted upon method entry to accomodate
-  // the callee's non-argument locals. Undo that adjustment.
-  __ restore(I5_savedSP, G0, SP);
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-
-    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
-    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
-
-    __ br_null(G1_scratch, false, Assembler::pn, L_done);
-    __ delayed()->nop();
-
-    __ st_ptr(G1_scratch, Lesp, wordSize);
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  // Resume bytecode interpretation at the current bcp
-  __ dispatch_next(vtos);
-  // end of JVMTI PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
-  __ pop_ptr(Oexception);                                  // get exception
-
-  // Intel has the following comment:
-  //// remove the activation (without doing throws on illegalMonitorExceptions)
-  // They remove the activation without checking for bad monitor state.
-  // %%% We should make sure this is the right semantics before implementing.
-
-  __ set_vm_result(Oexception);
-  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
-
-  __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
-
-  __ get_vm_result(Oexception);
-  __ verify_oop(Oexception);
-
-    const int return_reg_adjustment = frame::pc_return_offset;
-  Address issuing_pc_addr(I7, return_reg_adjustment);
-
-  // We are done with this activation frame; find out where to go next.
-  // The continuation point will be an exception handler, which expects
-  // the following registers set up:
-  //
-  // Oexception: exception
-  // Oissuing_pc: the local call that threw exception
-  // Other On: garbage
-  // In/Ln:  the contents of the caller's register window
-  //
-  // We do the required restore at the last possible moment, because we
-  // need to preserve some state across a runtime call.
-  // (Remember that the caller activation is unknown--it might not be
-  // interpreted, so things like Lscratch are useless in the caller.)
-
-  // Although the Intel version uses call_C, we can use the more
-  // compact call_VM.  (The only real difference on SPARC is a
-  // harmlessly ignored [re]set_last_Java_frame, compared with
-  // the Intel code which lacks this.)
-  __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
-  __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
-  __ super_call_VM_leaf(L7_thread_cache,
-                        CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
-                        G2_thread, Oissuing_pc->after_save());
-
-  // The caller's SP was adjusted upon method entry to accomodate
-  // the callee's non-argument locals. Undo that adjustment.
-  __ JMP(O0, 0);                         // return exception handler in caller
-  __ delayed()->restore(I5_savedSP, G0, SP);
-
-  // (same old exception object is already in Oexception; see above)
-  // Note that an "issuing PC" is actually the next PC after the call
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-
-  __ empty_expression_stack();
-  __ load_earlyret_value(state);
-
-  __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
-  Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
-
-  __ remove_activation(state,
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false);
-
-  // The caller's SP was adjusted upon method entry to accomodate
-  // the callee's non-argument locals. Undo that adjustment.
-  __ ret();                             // return to caller
-  __ delayed()->restore(I5_savedSP, G0, SP);
-
-  return entry;
-} // end of JVMTI ForceEarlyReturn support
-
-
-//------------------------------------------------------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  aep = __ pc(); __ push_ptr(); __ ba_short(L);
-  fep = __ pc(); __ push_f();   __ ba_short(L);
-  dep = __ pc(); __ push_d();   __ ba_short(L);
-  lep = __ pc(); __ push_l();   __ ba_short(L);
-  iep = __ pc(); __ push_i();
-  bep = cep = sep = iep;                        // there aren't any
-  vep = __ pc(); __ bind(L);                    // fall through
-  generate_and_dispatch(t);
-}
-
-// --------------------------------------------------------------------------------
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-// --------------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  __ push(state);
-  __ mov(O7, Lscratch); // protect return address within interpreter
-
-  // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
-  __ mov( Otos_l2, G3_scratch );
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
-  __ mov(Lscratch, O7); // restore return address
-  __ pop(state);
-  __ retl();
-  __ delayed()->nop();
-
-  return entry;
-}
-
-
-// helpers for generate_and_dispatch
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  AddressLiteral index   (&BytecodePairHistogram::_index);
-  AddressLiteral counters((address) &BytecodePairHistogram::_counters);
-
-  // get index, shift out old bytecode, bring in new bytecode, and store it
-  // _index = (_index >> log2_number_of_codes) |
-  //          (bytecode << log2_number_of_codes);
-
-  __ load_contents(index, G4_scratch);
-  __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
-  __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
-  __ or3( G3_scratch,  G4_scratch, G4_scratch );
-  __ store_contents(G4_scratch, index, G3_scratch);
-
-  // bump bucket contents
-  // _counters[_index] ++;
-
-  __ set(counters, G3_scratch);                       // loads into G3_scratch
-  __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
-  __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
-  __ ld (G3_scratch, 0, G4_scratch);
-  __ inc (G4_scratch);
-  __ st (G4_scratch, 0, G3_scratch);
-}
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-  address entry = Interpreter::trace_code(t->tos_in());
-  guarantee(entry != NULL, "entry must have been generated");
-  __ call(entry, relocInfo::none);
-  __ delayed()->nop();
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  AddressLiteral counter(&BytecodeCounter::_counter_value);
-  __ load_contents(counter, G3_scratch);
-  AddressLiteral stop_at(&StopInterpreterAt);
-  __ load_ptr_contents(stop_at, G4_scratch);
-  __ cmp(G3_scratch, G4_scratch);
-  __ breakpoint_trap(Assembler::equal, Assembler::icc);
-}
-#endif // not PRODUCT
-#endif // !CC_INTERP
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -175,6 +175,7 @@
     movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
     // NULL last_sp until next java call
     movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+    NOT_LP64(empty_FPU_stack());
   }
 
   // Helpers for swap and dup
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -184,20 +183,3 @@
 
   return entry_point;
 }
-
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -298,19 +297,3 @@
 
   return entry_point;
 }
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -39,6 +39,7 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "runtime/thread.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -10834,3 +10835,43 @@
 SkipIfEqual::~SkipIfEqual() {
   _masm->bind(_label);
 }
+
+// 32-bit Windows has its own fast-path implementation
+// of get_thread
+#if !defined(WIN32) || defined(_LP64)
+
+// This is simply a call to Thread::current()
+void MacroAssembler::get_thread(Register thread) {
+  if (thread != rax) {
+    push(rax);
+  }
+  LP64_ONLY(push(rdi);)
+  LP64_ONLY(push(rsi);)
+  push(rdx);
+  push(rcx);
+#ifdef _LP64
+  push(r8);
+  push(r9);
+  push(r10);
+  push(r11);
+#endif
+
+  MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
+
+#ifdef _LP64
+  pop(r11);
+  pop(r10);
+  pop(r9);
+  pop(r8);
+#endif
+  pop(rcx);
+  pop(rdx);
+  LP64_ONLY(pop(rsi);)
+  LP64_ONLY(pop(rdi);)
+  if (thread != rax) {
+    mov(thread, rax);
+    pop(rax);
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,1874 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+#define __ _masm->
+
+#ifndef CC_INTERP
+
+// Global Register Names
+static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
+static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
+
+const int method_offset = frame::interpreter_frame_method_offset * wordSize;
+const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
+const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
+
+//-----------------------------------------------------------------------------
+
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ lea(rax, Address(rbp,
+                        frame::interpreter_frame_monitor_block_top_offset *
+                        wordSize));
+    __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
+                         // grows negative)
+    __ jcc(Assembler::aboveEqual, L); // check if frame is complete
+    __ stop ("interpreter frame not set up");
+    __ bind(L);
+  }
+#endif // ASSERT
+  // Restore bcp under the assumption that the current frame is still
+  // interpreted
+  __ restore_bcp();
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // throw exception
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::throw_StackOverflowError));
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
+        const char* name) {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  // ??? convention: expect aberrant index in register ebx
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  __ lea(rarg, ExternalAddress((address)name));
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ArrayIndexOutOfBoundsException),
+             rarg, rbx);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+
+  // object is at TOS
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  __ pop(rarg);
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ClassCastException),
+             rarg);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(
+        const char* name, const char* message, bool pass_oop) {
+  assert(!pass_oop || message == NULL, "either oop or message but not both");
+  address entry = __ pc();
+
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
+
+  if (pass_oop) {
+    // object is at TOS
+    __ pop(rarg2);
+  }
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  __ lea(rarg, ExternalAddress((address)name));
+  if (pass_oop) {
+    __ call_VM(rax, CAST_FROM_FN_PTR(address,
+                                     InterpreterRuntime::
+                                     create_klass_exception),
+               rarg, rarg2);
+  } else {
+    // kind of lame ExternalAddress can't take NULL because
+    // external_word_Relocation will assert.
+    if (message != NULL) {
+      __ lea(rarg2, ExternalAddress((address)message));
+    } else {
+      __ movptr(rarg2, NULL_WORD);
+    }
+    __ call_VM(rax,
+               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
+               rarg, rarg2);
+  }
+  // throw exception
+  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  // NULL last_sp until next java call
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ dispatch_next(state);
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+#ifndef _LP64
+#ifdef COMPILER2
+  // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
+    for (int i = 1; i < 8; i++) {
+        __ ffree(i);
+    }
+  } else if (UseSSE < 2) {
+    __ empty_FPU_stack();
+  }
+#endif // COMPILER2
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
+    __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
+  } else {
+    __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
+  }
+
+  if (state == ftos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
+  } else if (state == dtos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
+  }
+#endif // _LP64
+
+  // Restore stack bottom in case i2c adjusted stack
+  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // and NULL it as marker that esp is now tos until next java call
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+
+  __ restore_bcp();
+  __ restore_locals();
+
+  if (state == atos) {
+    Register mdp = rbx;
+    Register tmp = rcx;
+    __ profile_return_type(mdp, rax, tmp);
+  }
+
+  const Register cache = rbx;
+  const Register index = rcx;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
+  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
+  address entry = __ pc();
+
+#ifndef _LP64
+  if (state == ftos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
+  } else if (state == dtos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
+  }
+#endif // _LP64
+
+  // NULL last_sp until next java call
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ restore_bcp();
+  __ restore_locals();
+  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+#if INCLUDE_JVMCI
+  // Check if we need to take lock at entry of synchronized method.
+  if (UseJVMCICompiler) {
+    Label L;
+    __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
+    __ jcc(Assembler::zero, L);
+    // Clear flag.
+    __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
+    // Satisfy calling convention for lock_method().
+    __ get_method(rbx);
+    // Take lock.
+    lock_method();
+    __ bind(L);
+  }
+#endif
+  // handle exceptions
+  {
+    Label L;
+    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
+    __ jcc(Assembler::zero, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_result_handler_for(
+        BasicType type) {
+  address entry = __ pc();
+  switch (type) {
+  case T_BOOLEAN: __ c2bool(rax);            break;
+#ifndef _LP64
+  case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
+#else
+  case T_CHAR   : __ movzwl(rax, rax);       break;
+#endif // _LP64
+  case T_BYTE   : __ sign_extend_byte(rax);  break;
+  case T_SHORT  : __ sign_extend_short(rax); break;
+  case T_INT    : /* nothing to do */        break;
+  case T_LONG   : /* nothing to do */        break;
+  case T_VOID   : /* nothing to do */        break;
+#ifndef _LP64
+  case T_DOUBLE :
+  case T_FLOAT  :
+    { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
+      __ pop(t);                            // remove return address first
+      // Must return a result for interpreter or compiler. In SSE
+      // mode, results are returned in xmm0 and the FPU stack must
+      // be empty.
+      if (type == T_FLOAT && UseSSE >= 1) {
+        // Load ST0
+        __ fld_d(Address(rsp, 0));
+        // Store as float and empty fpu stack
+        __ fstp_s(Address(rsp, 0));
+        // and reload
+        __ movflt(xmm0, Address(rsp, 0));
+      } else if (type == T_DOUBLE && UseSSE >= 2 ) {
+        __ movdbl(xmm0, Address(rsp, 0));
+      } else {
+        // restore ST0
+        __ fld_d(Address(rsp, 0));
+      }
+      // and pop the temp
+      __ addptr(rsp, 2 * wordSize);
+      __ push(t);                           // restore return address
+    }
+    break;
+#else
+  case T_FLOAT  : /* nothing to do */        break;
+  case T_DOUBLE : /* nothing to do */        break;
+#endif // _LP64
+
+  case T_OBJECT :
+    // retrieve result from frame
+    __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
+    // and verify it
+    __ verify_oop(rax);
+    break;
+  default       : ShouldNotReachHere();
+  }
+  __ ret(0);                                   // return from result handler
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(
+        TosState state,
+        address runtime_entry) {
+  address entry = __ pc();
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
+  return entry;
+}
+
+
+
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test
+//
+// rbx: method
+// rcx: invocation counter
+//
+void InterpreterGenerator::generate_counter_incr(
+        Label* overflow,
+        Label* profile_method,
+        Label* profile_method_continue) {
+  Label done;
+  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
+  if (TieredCompilation) {
+    int increment = InvocationCounter::count_increment;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      // Are we profiling?
+      __ movptr(rax, Address(rbx, Method::method_data_offset()));
+      __ testptr(rax, rax);
+      __ jccb(Assembler::zero, no_mdo);
+      // Increment counter in the MDO
+      const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
+                                                in_bytes(InvocationCounter::counter_offset()));
+      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
+      __ jmp(done);
+    }
+    __ bind(no_mdo);
+    // Increment counter in MethodCounters
+    const Address invocation_counter(rax,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+    __ get_method_counters(rbx, rax, done);
+    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
+                               false, Assembler::zero, overflow);
+    __ bind(done);
+  } else { // not TieredCompilation
+    const Address backedge_counter(rax,
+                  MethodCounters::backedge_counter_offset() +
+                  InvocationCounter::counter_offset());
+    const Address invocation_counter(rax,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+
+    __ get_method_counters(rbx, rax, done);
+
+    if (ProfileInterpreter) {
+      __ incrementl(Address(rax,
+              MethodCounters::interpreter_invocation_counter_offset()));
+    }
+    // Update standard invocation counters
+    __ movl(rcx, invocation_counter);
+    __ incrementl(rcx, InvocationCounter::count_increment);
+    __ movl(invocation_counter, rcx); // save invocation count
+
+    __ movl(rax, backedge_counter);   // load backedge counter
+    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
+
+    __ addl(rcx, rax);                // add both counters
+
+    // profile_method is non-null only for interpreted method so
+    // profile_method != NULL == !native_call
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
+      __ jcc(Assembler::less, *profile_method_continue);
+
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(rax, *profile_method);
+    }
+
+    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
+    __ jcc(Assembler::aboveEqual, *overflow);
+    __ bind(done);
+  }
+}
+
+void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
+
+  // Asm interpreter on entry
+  // r14/rdi - locals
+  // r13/rsi - bcp
+  // rbx - method
+  // rdx - cpool --- DOES NOT APPEAR TO BE TRUE
+  // rbp - interpreter frame
+
+  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
+  // Everything as it was on entry
+  // rdx is not restored. Doesn't appear to really be set.
+
+  // InterpreterRuntime::frequency_counter_overflow takes two
+  // arguments, the first (thread) is passed by call_VM, the second
+  // indicates if the counter overflow occurs at a backwards branch
+  // (NULL bcp).  We pass zero for it.  The call returns the address
+  // of the verified entry point for the method or NULL if the
+  // compilation did not complete (either went background or bailed
+  // out).
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  __ movl(rarg, 0);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::frequency_counter_overflow),
+             rarg);
+
+  __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
+  // Preserve invariant that r13/r14 contain bcp/locals of sender frame
+  // and jump to the interpreted entry.
+  __ jmp(*do_continue, relocInfo::none);
+}
+
+// See if we've got enough room on the stack for locals plus overhead.
+// The expression stack grows down incrementally, so the normal guard
+// page mechanism will work for that.
+//
+// NOTE: Since the additional locals are also always pushed (wasn't
+// obvious in generate_fixed_frame) so the guard should work for them
+// too.
+//
+// Args:
+//      rdx: number of additional locals this frame needs (what we must check)
+//      rbx: Method*
+//
+// Kills:
+//      rax
+void InterpreterGenerator::generate_stack_overflow_check(void) {
+
+  // monitor entry size: see picture of stack in frame_x86.hpp
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+  // total overhead size: entry_size + (saved rbp through expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
+
+  const int page_size = os::vm_page_size();
+
+  Label after_frame_check;
+
+  // see if the frame is greater than one page in size. If so,
+  // then we need to verify there is enough stack space remaining
+  // for the additional locals.
+  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
+  __ jcc(Assembler::belowEqual, after_frame_check);
+
+  // compute rsp as if this were going to be the last frame on
+  // the stack before the red zone
+
+  Label after_frame_check_pop;
+  const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
+#ifndef _LP64
+  __ push(thread);
+  __ get_thread(thread);
+#endif
+
+  const Address stack_base(thread, Thread::stack_base_offset());
+  const Address stack_size(thread, Thread::stack_size_offset());
+
+  // locals + overhead, in bytes
+  __ mov(rax, rdx);
+  __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
+  __ addptr(rax, overhead_size);
+
+#ifdef ASSERT
+  Label stack_base_okay, stack_size_okay;
+  // verify that thread stack base is non-zero
+  __ cmpptr(stack_base, (int32_t)NULL_WORD);
+  __ jcc(Assembler::notEqual, stack_base_okay);
+  __ stop("stack base is zero");
+  __ bind(stack_base_okay);
+  // verify that thread stack size is non-zero
+  __ cmpptr(stack_size, 0);
+  __ jcc(Assembler::notEqual, stack_size_okay);
+  __ stop("stack size is zero");
+  __ bind(stack_size_okay);
+#endif
+
+  // Add stack base to locals and subtract stack size
+  __ addptr(rax, stack_base);
+  __ subptr(rax, stack_size);
+
+  // Use the maximum number of pages we might bang.
+  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+                                                                              (StackRedPages+StackYellowPages);
+
+  // add in the red and yellow zone sizes
+  __ addptr(rax, max_pages * page_size);
+
+  // check against the current stack bottom
+  __ cmpptr(rsp, rax);
+
+  __ jcc(Assembler::above, after_frame_check_pop);
+  NOT_LP64(__ pop(rsi));  // get saved bcp
+
+  // Restore sender's sp as SP. This is necessary if the sender's
+  // frame is an extended compiled frame (see gen_c2i_adapter())
+  // and safer anyway in case of JSR292 adaptations.
+
+  __ pop(rax); // return address must be moved if SP is changed
+  __ mov(rsp, rbcp);
+  __ push(rax);
+
+  // Note: the restored frame is not necessarily interpreted.
+  // Use the shared runtime version of the StackOverflowError.
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
+  __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
+  // all done with frame size check
+  __ bind(after_frame_check_pop);
+  NOT_LP64(__ pop(rsi));
+
+  // all done with frame size check
+  __ bind(after_frame_check);
+}
+
+// Allocate monitor and lock method (asm interpreter)
+//
+// Args:
+//      rbx: Method*
+//      r14/rdi: locals
+//
+// Kills:
+//      rax
+//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
+//      rscratch1, rscratch2 (scratch regs)
+void TemplateInterpreterGenerator::lock_method() {
+  // synchronize method
+  const Address access_flags(rbx, Method::access_flags_offset());
+  const Address monitor_block_top(
+        rbp,
+        frame::interpreter_frame_monitor_block_top_offset * wordSize);
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ movl(rax, access_flags);
+    __ testl(rax, JVM_ACC_SYNCHRONIZED);
+    __ jcc(Assembler::notZero, L);
+    __ stop("method doesn't need synchronization");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  // get synchronization object
+  {
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    Label done;
+    __ movl(rax, access_flags);
+    __ testl(rax, JVM_ACC_STATIC);
+    // get receiver (assume this is frequent case)
+    __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
+    __ jcc(Assembler::zero, done);
+    __ movptr(rax, Address(rbx, Method::const_offset()));
+    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
+    __ movptr(rax, Address(rax,
+                           ConstantPool::pool_holder_offset_in_bytes()));
+    __ movptr(rax, Address(rax, mirror_offset));
+
+#ifdef ASSERT
+    {
+      Label L;
+      __ testptr(rax, rax);
+      __ jcc(Assembler::notZero, L);
+      __ stop("synchronization object is NULL");
+      __ bind(L);
+    }
+#endif // ASSERT
+
+    __ bind(done);
+  }
+
+  // add space for monitor & lock
+  __ subptr(rsp, entry_size); // add space for a monitor entry
+  __ movptr(monitor_block_top, rsp);  // set new monitor block top
+  // store object
+  __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
+  const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+  __ movptr(lockreg, rsp); // object address
+  __ lock_object(lockreg);
+}
+
+// Generate a fixed interpreter frame. This is identical setup for
+// interpreted methods and for native methods hence the shared code.
+//
+// Args:
+//      rax: return address
+//      rbx: Method*
+//      r14/rdi: pointer to locals
+//      r13/rsi: sender sp
+//      rdx: cp cache
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
+  // initialize fixed part of activation frame
+  __ push(rax);        // save return address
+  __ enter();          // save old & set new rbp
+  __ push(rbcp);        // set sender sp
+  __ push((int)NULL_WORD); // leave last_sp as null
+  __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
+  __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
+  __ push(rbx);        // save Method*
+  if (ProfileInterpreter) {
+    Label method_data_continue;
+    __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
+    __ testptr(rdx, rdx);
+    __ jcc(Assembler::zero, method_data_continue);
+    __ addptr(rdx, in_bytes(MethodData::data_offset()));
+    __ bind(method_data_continue);
+    __ push(rdx);      // set the mdp (method data pointer)
+  } else {
+    __ push(0);
+  }
+
+  __ movptr(rdx, Address(rbx, Method::const_offset()));
+  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
+  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
+  __ push(rdx); // set constant pool cache
+  __ push(rlocals); // set locals pointer
+  if (native_call) {
+    __ push(0); // no bcp
+  } else {
+    __ push(rbcp); // set bcp
+  }
+  __ push(0); // reserve word for pointer to expression stack bottom
+  __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
+}
+
+// End of helpers
+
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#if INCLUDE_ALL_GCS
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // rbx: Method*
+
+  // r13: senderSP must preserve for slow path, set SP to it on fast path
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+    Label slow_path;
+    // rbx: method
+
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ movptr(rax, Address(rsp, wordSize));
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, slow_path);
+
+    // rax: local 0
+    // rbx: method (but can be used as scratch now)
+    // rdx: scratch
+    // rdi: scratch
+
+    // Preserve the sender sp in case the pre-barrier
+    // calls the runtime
+    NOT_LP64(__ push(rsi));
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+
+    // Load the value of the referent field.
+    const Address field_address(rax, referent_offset);
+    __ load_heap_oop(rax, field_address);
+
+    const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
+    const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+    NOT_LP64(__ get_thread(thread));
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    __ g1_write_barrier_pre(noreg /* obj */,
+                            rax /* pre_val */,
+                            thread /* thread */,
+                            rbx /* tmp */,
+                            true /* tosca_live */,
+                            true /* expand_call */);
+
+    // _areturn
+    NOT_LP64(__ pop(rsi));      // get sender sp
+    __ pop(rdi);                // get return address
+    __ mov(rsp, sender_sp);     // set sp to sender sp
+    __ jmp(rdi);
+    __ ret(0);
+
+    // generate a vanilla interpreter entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
+    return entry;
+  }
+#endif // INCLUDE_ALL_GCS
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return NULL;
+}
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+address InterpreterGenerator::generate_native_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // rbx: Method*
+  // rbcp: sender sp
+
+  address entry_point = __ pc();
+
+  const Address constMethod       (rbx, Method::const_offset());
+  const Address access_flags      (rbx, Method::access_flags_offset());
+  const Address size_of_parameters(rcx, ConstMethod::
+                                        size_of_parameters_offset());
+
+
+  // get parameter size (always needed)
+  __ movptr(rcx, constMethod);
+  __ load_unsigned_short(rcx, size_of_parameters);
+
+  // native calls don't need the stack size check since they have no
+  // expression stack and the arguments are already on the stack and
+  // we only add a handful of words to the stack
+
+  // rbx: Method*
+  // rcx: size of parameters
+  // rbcp: sender sp
+  __ pop(rax);                                       // get return address
+
+  // for natives the size of locals is zero
+
+  // compute beginning of parameters
+  __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
+
+  // add 2 zero-initialized slots for native calls
+  // initialize result_handler slot
+  __ push((int) NULL_WORD);
+  // slot for oop temp
+  // (static native method holder mirror/jni oop result)
+  __ push((int) NULL_WORD);
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(true);
+
+  // make sure method is native & not abstract
+#ifdef ASSERT
+  __ movl(rax, access_flags);
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_NATIVE);
+    __ jcc(Assembler::notZero, L);
+    __ stop("tried to execute non-native method as native");
+    __ bind(L);
+  }
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_ABSTRACT);
+    __ jcc(Assembler::zero, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception handler
+  // would try to exit the monitor of synchronized methods which hasn't
+  // been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation will
+  // check this flag.
+
+  const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread1));
+  const Address do_not_unlock_if_synchronized(thread1,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ movbool(do_not_unlock_if_synchronized, true);
+
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  bang_stack_shadow_pages(true);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  NOT_LP64(__ get_thread(thread1));
+  __ movbool(do_not_unlock_if_synchronized, false);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ movl(rax, access_flags);
+      __ testl(rax, JVM_ACC_SYNCHRONIZED);
+      __ jcc(Assembler::zero, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+    const Address monitor_block_top(rbp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ movptr(rax, monitor_block_top);
+    __ cmpptr(rax, rsp);
+    __ jcc(Assembler::equal, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  // work registers
+  const Register method = rbx;
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+  const Register t      = NOT_LP64(rcx) LP64_ONLY(r11);
+
+  // allocate space for parameters
+  __ get_method(method);
+  __ movptr(t, Address(method, Method::const_offset()));
+  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
+
+#ifndef _LP64
+  __ shlptr(t, Interpreter::logStackElementSize);
+  __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
+  __ subptr(rsp, t);
+  __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
+#else
+  __ shll(t, Interpreter::logStackElementSize);
+
+  __ subptr(rsp, t);
+  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
+#endif // _LP64
+
+  // get signature handler
+  {
+    Label L;
+    __ movptr(t, Address(method, Method::signature_handler_offset()));
+    __ testptr(t, t);
+    __ jcc(Assembler::notZero, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               method);
+    __ get_method(method);
+    __ movptr(t, Address(method, Method::signature_handler_offset()));
+    __ bind(L);
+  }
+
+  // call signature handler
+  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
+         "adjust this code");
+
+  // The generated handlers do not touch RBX (the method oop).
+  // However, large signatures cannot be cached and are generated
+  // each time here.  The slow-path generator can do a GC on return,
+  // so we must reload it after the call.
+  __ call(t);
+  __ get_method(method);        // slow path can do a GC, reload RBX
+
+
+  // result handler is in rax
+  // set result handler
+  __ movptr(Address(rbp,
+                    (frame::interpreter_frame_result_handler_offset) * wordSize),
+            rax);
+
+  // pass mirror handle if static call
+  {
+    Label L;
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    __ movl(t, Address(method, Method::access_flags_offset()));
+    __ testl(t, JVM_ACC_STATIC);
+    __ jcc(Assembler::zero, L);
+    // get mirror
+    __ movptr(t, Address(method, Method::const_offset()));
+    __ movptr(t, Address(t, ConstMethod::constants_offset()));
+    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
+    __ movptr(t, Address(t, mirror_offset));
+    // copy mirror into activation frame
+    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
+            t);
+    // pass handle to mirror
+#ifndef _LP64
+    __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+    __ movptr(Address(rsp, wordSize), t);
+#else
+    __ lea(c_rarg1,
+           Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+#endif // _LP64
+    __ bind(L);
+  }
+
+  // get native function entry point
+  {
+    Label L;
+    __ movptr(rax, Address(method, Method::native_function_offset()));
+    ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
+    __ cmpptr(rax, unsatisfied.addr());
+    __ jcc(Assembler::notEqual, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               method);
+    __ get_method(method);
+    __ movptr(rax, Address(method, Method::native_function_offset()));
+    __ bind(L);
+  }
+
+  // pass JNIEnv
+#ifndef _LP64
+   __ get_thread(thread);
+   __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
+   __ movptr(Address(rsp, 0), t);
+
+   // set_last_Java_frame_before_call
+   // It is enough that the pc()
+   // points into the right code segment. It does not have to be the correct return pc.
+   __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+#else
+   __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
+
+   // It is enough that the pc() points into the right code
+   // segment. It does not have to be the correct return pc.
+   __ set_last_Java_frame(rsp, rbp, (address) __ pc());
+#endif // _LP64
+
+  // change thread state
+#ifdef ASSERT
+  {
+    Label L;
+    __ movl(t, Address(thread, JavaThread::thread_state_offset()));
+    __ cmpl(t, _thread_in_Java);
+    __ jcc(Assembler::equal, L);
+    __ stop("Wrong thread state in native stub");
+    __ bind(L);
+  }
+#endif
+
+  // Change state to native
+
+  __ movl(Address(thread, JavaThread::thread_state_offset()),
+          _thread_in_native);
+
+  // Call the native method.
+  __ call(rax);
+  // 32: result potentially in rdx:rax or ST0
+  // 64: result potentially in rax or xmm0
+
+  // Verify or restore cpu control state after JNI call
+  __ restore_cpu_control_state_after_jni();
+
+  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
+  // in order to extract the result of a method call. If the order of these
+  // pushes change or anything else is added to the stack then the code in
+  // interpreter_frame_result must also change.
+
+#ifndef _LP64
+  // save potential result in ST(0) & rdx:rax
+  // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
+  // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
+  // It is safe to do this push because state is _thread_in_native and return address will be found
+  // via _last_native_pc and not via _last_jave_sp
+
+  // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
+  // If the order changes or anything else is added to the stack the code in
+  // interpreter_frame_result will have to be changed.
+
+  { Label L;
+    Label push_double;
+    ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
+    ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
+    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
+              float_handler.addr());
+    __ jcc(Assembler::equal, push_double);
+    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
+              double_handler.addr());
+    __ jcc(Assembler::notEqual, L);
+    __ bind(push_double);
+    __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
+    __ bind(L);
+  }
+#else
+  __ push(dtos);
+#endif // _LP64
+
+  __ push(ltos);
+
+  // change thread state
+  NOT_LP64(__ get_thread(thread));
+  __ movl(Address(thread, JavaThread::thread_state_offset()),
+          _thread_in_native_trans);
+
+  if (os::is_MP()) {
+    if (UseMembar) {
+      // Force this write out before the read below
+      __ membar(Assembler::Membar_mask_bits(
+           Assembler::LoadLoad | Assembler::LoadStore |
+           Assembler::StoreLoad | Assembler::StoreStore));
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(thread, rcx);
+    }
+  }
+
+#ifndef _LP64
+  if (AlwaysRestoreFPU) {
+    //  Make sure the control word is correct.
+    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+  }
+#endif // _LP64
+
+  // check for safepoint operation in progress and/or pending suspend requests
+  {
+    Label Continue;
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+
+    Label L;
+    __ jcc(Assembler::notEqual, L);
+    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
+    __ jcc(Assembler::equal, Continue);
+    __ bind(L);
+
+    // Don't use call_VM as it will see a possible pending exception
+    // and forward it and never return here preventing us from
+    // clearing _last_native_pc down below.  Also can't use
+    // call_VM_leaf either as it will check to see if r13 & r14 are
+    // preserved and correspond to the bcp/locals pointers. So we do a
+    // runtime call by hand.
+    //
+#ifndef _LP64
+    __ push(thread);
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+                                            JavaThread::check_special_condition_for_native_trans)));
+    __ increment(rsp, wordSize);
+    __ get_thread(thread);
+#else
+    __ mov(c_rarg0, r15_thread);
+    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
+    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+    __ andptr(rsp, -16); // align stack as required by ABI
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
+    __ mov(rsp, r12); // restore sp
+    __ reinit_heapbase();
+#endif // _LP64
+    __ bind(Continue);
+  }
+
+  // change thread state
+  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
+
+  // reset_last_Java_frame
+  __ reset_last_Java_frame(thread, true, true);
+
+  // reset handle block
+  __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
+  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+
+  // If result is an oop unbox and store it in frame where gc will see it
+  // and result handler will pick it up
+
+  {
+    Label no_oop, store_result;
+    __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
+    __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
+    __ jcc(Assembler::notEqual, no_oop);
+    // retrieve result
+    __ pop(ltos);
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, store_result);
+    __ movptr(rax, Address(rax, 0));
+    __ bind(store_result);
+    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
+    // keep stack depth as expected by pushing oop which will eventually be discarded
+    __ push(ltos);
+    __ bind(no_oop);
+  }
+
+
+  {
+    Label no_reguard;
+    __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
+            JavaThread::stack_guard_yellow_disabled);
+    __ jcc(Assembler::notEqual, no_reguard);
+
+    __ pusha(); // XXX only save smashed registers
+#ifndef _LP64
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
+    __ popa();
+#else
+    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
+    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+    __ andptr(rsp, -16); // align stack as required by ABI
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
+    __ mov(rsp, r12); // restore sp
+    __ popa(); // XXX only restore smashed registers
+    __ reinit_heapbase();
+#endif // _LP64
+
+    __ bind(no_reguard);
+  }
+
+
+  // The method register is junk from after the thread_in_native transition
+  // until here.  Also can't call_VM until the bcp has been
+  // restored.  Need bcp for throwing exception below so get it now.
+  __ get_method(method);
+
+  // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base()
+  __ movptr(rbcp, Address(method, Method::const_offset()));   // get ConstMethod*
+  __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset()));    // get codebase
+
+  // handle exceptions (exception handling will handle unlocking!)
+  {
+    Label L;
+    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
+    __ jcc(Assembler::zero, L);
+    // Note: At some point we may want to unify this with the code
+    // used in call_VM_base(); i.e., we should use the
+    // StubRoutines::forward_exception code. For now this doesn't work
+    // here because the rsp is not correctly set at this point.
+    __ MacroAssembler::call_VM(noreg,
+                               CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  // do unlocking if necessary
+  {
+    Label L;
+    __ movl(t, Address(method, Method::access_flags_offset()));
+    __ testl(t, JVM_ACC_SYNCHRONIZED);
+    __ jcc(Assembler::zero, L);
+    // the code below should be shared with interpreter macro
+    // assembler implementation
+    {
+      Label unlock;
+      // BasicObjectLock will be first in list, since this is a
+      // synchronized method. However, need to check that the object
+      // has not been unlocked by an explicit monitorexit bytecode.
+      const Address monitor(rbp,
+                            (intptr_t)(frame::interpreter_frame_initial_sp_offset *
+                                       wordSize - (int)sizeof(BasicObjectLock)));
+
+      const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+
+      // monitor expect in c_rarg1 for slow unlock path
+      __ lea(regmon, monitor); // address of first monitor
+
+      __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
+      __ testptr(t, t);
+      __ jcc(Assembler::notZero, unlock);
+
+      // Entry already unlocked, need to throw exception
+      __ MacroAssembler::call_VM(noreg,
+                                 CAST_FROM_FN_PTR(address,
+                   InterpreterRuntime::throw_illegal_monitor_state_exception));
+      __ should_not_reach_here();
+
+      __ bind(unlock);
+      __ unlock_object(regmon);
+    }
+    __ bind(L);
+  }
+
+  // jvmti support
+  // Note: This must happen _after_ handling/throwing any exceptions since
+  //       the exception handler code notifies the runtime of method exits
+  //       too. If this happens before, method entry/exit notifications are
+  //       not properly paired (was bug - gri 11/22/99).
+  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
+
+  // restore potential result in edx:eax, call result handler to
+  // restore potential result in ST0 & handle result
+
+  __ pop(ltos);
+  LP64_ONLY( __ pop(dtos));
+
+  __ movptr(t, Address(rbp,
+                       (frame::interpreter_frame_result_handler_offset) * wordSize));
+  __ call(t);
+
+  // remove activation
+  __ movptr(t, Address(rbp,
+                       frame::interpreter_frame_sender_sp_offset *
+                       wordSize)); // get sender sp
+  __ leave();                                // remove frame anchor
+  __ pop(rdi);                               // get return address
+  __ mov(rsp, t);                            // set sp to sender sp
+  __ jmp(rdi);
+
+  if (inc_counter) {
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//
+// Generic interpreted method entry to (asm) interpreter
+//
+address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // ebx: Method*
+  // rbcp: sender sp
+  address entry_point = __ pc();
+
+  const Address constMethod(rbx, Method::const_offset());
+  const Address access_flags(rbx, Method::access_flags_offset());
+  const Address size_of_parameters(rdx,
+                                   ConstMethod::size_of_parameters_offset());
+  const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
+
+
+  // get parameter size (always needed)
+  __ movptr(rdx, constMethod);
+  __ load_unsigned_short(rcx, size_of_parameters);
+
+  // rbx: Method*
+  // rcx: size of parameters
+  // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
+
+  __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
+  __ subl(rdx, rcx); // rdx = no. of additional locals
+
+  // YYY
+//   __ incrementl(rdx);
+//   __ andl(rdx, -2);
+
+  // see if we've got enough room on the stack for locals plus overhead.
+  generate_stack_overflow_check();
+
+  // get return address
+  __ pop(rax);
+
+  // compute beginning of parameters
+  __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
+
+  // rdx - # of additional locals
+  // allocate space for locals
+  // explicitly initialize locals
+  {
+    Label exit, loop;
+    __ testl(rdx, rdx);
+    __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
+    __ bind(loop);
+    __ push((int) NULL_WORD); // initialize local variables
+    __ decrementl(rdx); // until everything initialized
+    __ jcc(Assembler::greater, loop);
+    __ bind(exit);
+  }
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(false);
+
+  // make sure method is not native & not abstract
+#ifdef ASSERT
+  __ movl(rax, access_flags);
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_NATIVE);
+    __ jcc(Assembler::zero, L);
+    __ stop("tried to execute native method as non-native");
+    __ bind(L);
+  }
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_ABSTRACT);
+    __ jcc(Assembler::zero, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception
+  // handler would try to exit the monitor of synchronized methods
+  // which hasn't been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation
+  // will check this flag.
+
+  const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+  const Address do_not_unlock_if_synchronized(thread,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ movbool(do_not_unlock_if_synchronized, true);
+
+  __ profile_parameters_type(rax, rcx, rdx);
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  Label profile_method;
+  Label profile_method_continue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow,
+                          &profile_method,
+                          &profile_method_continue);
+    if (ProfileInterpreter) {
+      __ bind(profile_method_continue);
+    }
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  // check for synchronized interpreted methods
+  bang_stack_shadow_pages(false);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  NOT_LP64(__ get_thread(thread));
+  __ movbool(do_not_unlock_if_synchronized, false);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    // Allocate monitor and lock method
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ movl(rax, access_flags);
+      __ testl(rax, JVM_ACC_SYNCHRONIZED);
+      __ jcc(Assembler::zero, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+     const Address monitor_block_top (rbp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ movptr(rax, monitor_block_top);
+    __ cmpptr(rax, rsp);
+    __ jcc(Assembler::equal, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  __ dispatch_next(vtos);
+
+  // invocation counter overflow
+  if (inc_counter) {
+    if (ProfileInterpreter) {
+      // We have decided to profile this method in the interpreter
+      __ bind(profile_method);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      __ get_method(rbx);
+      __ jmp(profile_method_continue);
+    }
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//-----------------------------------------------------------------------------
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() {
+  // Entry point in previous activation (i.e., if the caller was
+  // interpreted)
+  Interpreter::_rethrow_exception_entry = __ pc();
+  // Restore sp to interpreter_frame_last_sp even though we are going
+  // to empty the expression stack for the exception processing.
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  // rax: exception
+  // rdx: return address/pc that threw exception
+  __ restore_bcp();    // r13/rsi points to call/send
+  __ restore_locals();
+  LP64_ONLY(__ reinit_heapbase());  // restore r12 as heapbase.
+  // Entry point for exceptions thrown within interpreter code
+  Interpreter::_throw_exception_entry = __ pc();
+  // expression stack is undefined here
+  // rax: exception
+  // r13/rsi: exception bcp
+  __ verify_oop(rax);
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  LP64_ONLY(__ mov(c_rarg1, rax));
+
+  // expression stack must be empty before entering the VM in case of
+  // an exception
+  __ empty_expression_stack();
+  // find exception handler address and preserve exception oop
+  __ call_VM(rdx,
+             CAST_FROM_FN_PTR(address,
+                          InterpreterRuntime::exception_handler_for_exception),
+             rarg);
+  // rax: exception handler entry point
+  // rdx: preserved exception oop
+  // r13/rsi: bcp for exception handler
+  __ push_ptr(rdx); // push exception which is now the only value on the stack
+  __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
+
+  // If the exception is not handled in the current frame the frame is
+  // removed and the exception is rethrown (i.e. exception
+  // continuation is _rethrow_exception).
+  //
+  // Note: At this point the bci is still the bxi for the instruction
+  // which caused the exception and the expression stack is
+  // empty. Thus, for any VM calls at this point, GC will find a legal
+  // oop map (with empty expression stack).
+
+  // In current activation
+  // tos: exception
+  // esi: exception bcp
+
+  //
+  // JVMTI PopFrame support
+  //
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  __ empty_expression_stack();
+  // Set the popframe_processing bit in pending_popframe_condition
+  // indicating that we are currently handling popframe, so that
+  // call_VMs that may happen later do not trigger new popframe
+  // handling cycles.
+  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+  __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
+  __ orl(rdx, JavaThread::popframe_processing_bit);
+  __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
+
+  {
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    //
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label caller_not_deoptimized;
+    Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+    __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::interpreter_contains), rarg);
+    __ testl(rax, rax);
+    __ jcc(Assembler::notZero, caller_not_deoptimized);
+
+    // Compute size of arguments for saving when returning to
+    // deoptimized caller
+    __ get_method(rax);
+    __ movptr(rax, Address(rax, Method::const_offset()));
+    __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
+                                                size_of_parameters_offset())));
+    __ shll(rax, Interpreter::logStackElementSize);
+    __ restore_locals();
+    __ subptr(rlocals, rax);
+    __ addptr(rlocals, wordSize);
+    // Save these arguments
+    NOT_LP64(__ get_thread(thread));
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                                           Deoptimization::
+                                           popframe_preserve_args),
+                          thread, rax, rlocals);
+
+    __ remove_activation(vtos, rdx,
+                         /* throw_monitor_exception */ false,
+                         /* install_monitor_exception */ false,
+                         /* notify_jvmdi */ false);
+
+    // Inform deoptimization that it is responsible for restoring
+    // these arguments
+    NOT_LP64(__ get_thread(thread));
+    __ movl(Address(thread, JavaThread::popframe_condition_offset()),
+            JavaThread::popframe_force_deopt_reexecution_bit);
+
+    // Continue in deoptimization handler
+    __ jmp(rdx);
+
+    __ bind(caller_not_deoptimized);
+  }
+
+  __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
+                       /* throw_monitor_exception */ false,
+                       /* install_monitor_exception */ false,
+                       /* notify_jvmdi */ false);
+
+  // Finish with popframe handling
+  // A previous I2C followed by a deoptimization might have moved the
+  // outgoing arguments further up the stack. PopFrame expects the
+  // mutations to those outgoing arguments to be preserved and other
+  // constraints basically require this frame to look exactly as
+  // though it had previously invoked an interpreted activation with
+  // no space between the top of the expression stack (current
+  // last_sp) and the top of stack. Rather than force deopt to
+  // maintain this kind of invariant all the time we call a small
+  // fixup routine to move the mutated arguments onto the top of our
+  // expression stack if necessary.
+#ifndef _LP64
+  __ mov(rax, rsp);
+  __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ get_thread(thread);
+  // PC must point into interpreter here
+  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
+  __ get_thread(thread);
+#else
+  __ mov(c_rarg1, rsp);
+  __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // PC must point into interpreter here
+  __ set_last_Java_frame(noreg, rbp, __ pc());
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
+#endif
+  __ reset_last_Java_frame(thread, true, true);
+
+  // Restore the last_sp and null it out
+  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+
+  __ restore_bcp();
+  __ restore_locals();
+  // The method data pointer was incremented already during
+  // call profiling. We have to restore the mdp for the current bcp.
+  if (ProfileInterpreter) {
+    __ set_method_data_pointer_for_bcp();
+  }
+
+  // Clear the popframe condition flag
+  NOT_LP64(__ get_thread(thread));
+  __ movl(Address(thread, JavaThread::popframe_condition_offset()),
+          JavaThread::popframe_inactive);
+
+#if INCLUDE_JVMTI
+  {
+    Label L_done;
+    const Register local0 = rlocals;
+
+    __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic);
+    __ jcc(Assembler::notEqual, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ get_method(rdx);
+    __ movptr(rax, Address(local0, 0));
+    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp);
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, L_done);
+
+    __ movptr(Address(rbx, 0), rax);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
+  __ dispatch_next(vtos);
+  // end of PopFrame support
+
+  Interpreter::_remove_activation_entry = __ pc();
+
+  // preserve exception over this code sequence
+  __ pop_ptr(rax);
+  NOT_LP64(__ get_thread(thread));
+  __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
+  // remove the activation (without doing throws on illegalMonitorExceptions)
+  __ remove_activation(vtos, rdx, false, true, false);
+  // restore exception
+  NOT_LP64(__ get_thread(thread));
+  __ get_vm_result(rax, thread);
+
+  // In between activations - previous activation type unknown yet
+  // compute continuation point - the continuation point expects the
+  // following registers set up:
+  //
+  // rax: exception
+  // rdx: return address/pc that threw exception
+  // rsp: expression stack of caller
+  // rbp: ebp of caller
+  __ push(rax);                                  // save exception
+  __ push(rdx);                                  // save return address
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                          SharedRuntime::exception_handler_for_return_address),
+                        thread, rdx);
+  __ mov(rbx, rax);                              // save exception handler
+  __ pop(rdx);                                   // restore return address
+  __ pop(rax);                                   // restore exception
+  // Note that an "issuing PC" is actually the next PC after the call
+  __ jmp(rbx);                                   // jump to exception
+                                                 // handler of caller
+}
+
+
+//
+// JVMTI ForceEarlyReturn support
+//
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+  address entry = __ pc();
+
+  __ restore_bcp();
+  __ restore_locals();
+  __ empty_expression_stack();
+  __ load_earlyret_value(state);  // 32 bits returns value in rdx, so don't reuse
+
+  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+  __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
+  Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
+
+  // Clear the earlyret state
+  __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
+
+  __ remove_activation(state, rsi,
+                       false, /* throw_monitor_exception */
+                       false, /* install_monitor_exception */
+                       true); /* notify_jvmdi */
+  __ jmp(rsi);
+
+  return entry;
+} // end of ForceEarlyReturn support
+
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+                                                         address& bep,
+                                                         address& cep,
+                                                         address& sep,
+                                                         address& aep,
+                                                         address& iep,
+                                                         address& lep,
+                                                         address& fep,
+                                                         address& dep,
+                                                         address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+  aep = __ pc();  __ push_ptr();   __ jmp(L);
+#ifndef _LP64
+  fep = __ pc(); __ push(ftos); __ jmp(L);
+  dep = __ pc(); __ push(dtos); __ jmp(L);
+#else
+  fep = __ pc();  __ push_f(xmm0); __ jmp(L);
+  dep = __ pc();  __ push_d(xmm0); __ jmp(L);
+#endif // _LP64
+  lep = __ pc();  __ push_l();     __ jmp(L);
+  bep = cep = sep =
+  iep = __ pc();  __ push_i();
+  vep = __ pc();
+  __ bind(L);
+  generate_and_dispatch(t);
+}
+
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+  : TemplateInterpreterGenerator(code) {
+   generate_all(); // down here so it can be "virtual"
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  address entry = __ pc();
+
+#ifndef _LP64
+  // prepare expression stack
+  __ pop(rcx);          // pop return address so expression stack is 'pure'
+  __ push(state);       // save tosca
+
+  // pass tosca registers as arguments & call tracer
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
+  __ mov(rcx, rax);     // make sure return address is not destroyed by pop(state)
+  __ pop(state);        // restore tosca
+
+  // return
+  __ jmp(rcx);
+#else
+  __ push(state);
+  __ push(c_rarg0);
+  __ push(c_rarg1);
+  __ push(c_rarg2);
+  __ push(c_rarg3);
+  __ mov(c_rarg2, rax);  // Pass itos
+#ifdef _WIN64
+  __ movflt(xmm3, xmm0); // Pass ftos
+#endif
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
+             c_rarg1, c_rarg2, c_rarg3);
+  __ pop(c_rarg3);
+  __ pop(c_rarg2);
+  __ pop(c_rarg1);
+  __ pop(c_rarg0);
+  __ pop(state);
+  __ ret(0);                                   // return from result handler
+#endif // _LP64
+
+  return entry;
+}
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+  __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
+  __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
+  __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
+  __ orl(rbx,
+         ((int) t->bytecode()) <<
+         BytecodePairHistogram::log2_number_of_codes);
+  __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
+  __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
+  __ incrementl(Address(rscratch1, rbx, Address::times_4));
+}
+
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+
+  assert(Interpreter::trace_code(t->tos_in()) != NULL,
+         "entry must have been generated");
+#ifndef _LP64
+  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
+#else
+  __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
+  __ andptr(rsp, -16); // align stack as required by ABI
+  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
+  __ mov(rsp, r12); // restore sp
+  __ reinit_heapbase();
+#endif // _LP64
+}
+
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  Label L;
+  __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
+           StopInterpreterAt);
+  __ jcc(Assembler::notEqual, L);
+  __ int3();
+  __ bind(L);
+}
+#endif // !PRODUCT
+#endif // ! CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "runtime/arguments.hpp"
+
+#define __ _masm->
+
+
+#ifndef CC_INTERP
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx: Method*
+    // rsi: senderSP must preserved for slow path, set SP to it on fast path
+    // rdx: scratch
+    // rdi: scratch
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register val = rdx;  // source java byte value
+    const Register tbl = rdi;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ movl(val, Address(rsp,   wordSize)); // byte value
+    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
+
+    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
+    __ notl(crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ notl(crc); // ~crc
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // rsi: senderSP must preserved for slow path, set SP to it on fast path
+    // rdx: scratch
+    // rdi: scratch
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register buf = rdx;  // source java byte array address
+    const Register len = rdi;  // length
+
+    // value              x86_32
+    // interp. arg ptr    ESP + 4
+    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+    //                                         3           2      1        0
+    // int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+    //                                              4         2,3      1        0
+
+    // Arguments are reversed on java expression stack
+    __ movl(len,   Address(rsp,   4 + 0)); // Length
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 4 + 4 * wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 4 + 3 * wordSize)); // Initial CRC
+    }
+
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+* Method entry for static native methods:
+*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
+*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
+*/
+address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32CIntrinsics) {
+    address entry = __ pc();
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register buf = rcx;  // source java byte array address
+    const Register len = rdx;  // length
+    const Register end = len;
+
+    // value              x86_32
+    // interp. arg ptr    ESP + 4
+    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int end)
+    //                                         3           2      1        0
+    // int java.util.zip.CRC32.updateByteBuffer(int crc, long address, int off, int end)
+    //                                              4         2,3          1        0
+
+    // Arguments are reversed on java expression stack
+    __ movl(end, Address(rsp, 4 + 0)); // end
+    __ subl(len, Address(rsp, 4 + 1 * wordSize));  // end - offset == length
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long address
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC
+    }
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
+    // result in rax
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Float.intBitsToFloat(int bits)
+ */
+address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
+  if (UseSSE >= 1) {
+    address entry = __ pc();
+
+    // rsi: the sender's SP
+
+    // Skip safepoint check (compiler intrinsic versions of this method
+    // do not perform safepoint checks either).
+
+    // Load 'bits' into xmm0 (interpreter returns results in xmm0)
+    __ movflt(xmm0, Address(rsp, wordSize));
+
+    // Return
+    __ pop(rdi); // get return address
+    __ mov(rsp, rsi); // set rsp to the sender's SP
+    __ jmp(rdi);
+    return entry;
+  }
+
+  return NULL;
+}
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Float.floatToRawIntBits(float value)
+ */
+address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
+  if (UseSSE >= 1) {
+    address entry = __ pc();
+
+    // rsi: the sender's SP
+
+    // Skip safepoint check (compiler intrinsic versions of this method
+    // do not perform safepoint checks either).
+
+    // Load the parameter (a floating-point value) into rax.
+    __ movl(rax, Address(rsp, wordSize));
+
+    // Return
+    __ pop(rdi); // get return address
+    __ mov(rsp, rsi); // set rsp to the sender's SP
+    __ jmp(rdi);
+    return entry;
+  }
+
+  return NULL;
+}
+
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Double.longBitsToDouble(long bits)
+ */
+address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
+   if (UseSSE >= 2) {
+     address entry = __ pc();
+
+     // rsi: the sender's SP
+
+     // Skip safepoint check (compiler intrinsic versions of this method
+     // do not perform safepoint checks either).
+
+     // Load 'bits' into xmm0 (interpreter returns results in xmm0)
+     __ movdbl(xmm0, Address(rsp, wordSize));
+
+     // Return
+     __ pop(rdi); // get return address
+     __ mov(rsp, rsi); // set rsp to the sender's SP
+     __ jmp(rdi);
+     return entry;
+   }
+
+   return NULL;
+}
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Double.doubleToRawLongBits(double value)
+ */
+address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
+  if (UseSSE >= 2) {
+    address entry = __ pc();
+
+    // rsi: the sender's SP
+
+    // Skip safepoint check (compiler intrinsic versions of this method
+    // do not perform safepoint checks either).
+
+    // Load the parameter (a floating-point value) into rax.
+    __ movl(rdx, Address(rsp, 2*wordSize));
+    __ movl(rax, Address(rsp, wordSize));
+
+    // Return
+    __ pop(rdi); // get return address
+    __ mov(rsp, rsi); // set rsp to the sender's SP
+    __ jmp(rdi);
+    return entry;
+  }
+
+  return NULL;
+}
+#endif // CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "runtime/arguments.hpp"
+
+#define __ _masm->
+
+#ifndef CC_INTERP
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // r13: senderSP must preserved for slow path, set SP to it on fast path
+    // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
+    // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register val = c_rarg0;  // source java byte value
+    const Register tbl = c_rarg1;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ movl(val, Address(rsp,   wordSize)); // byte value
+    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
+
+    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
+    __ notl(crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ notl(crc); // ~crc
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // r13: senderSP must preserved for slow path, set SP to it on fast path
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register buf = c_rarg1;  // source java byte array address
+    const Register len = c_rarg2;  // length
+    const Register off = len;      // offset (never overlaps with 'len')
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
+      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
+    }
+    // Can now load 'len' since we're finished with 'off'
+    __ movl(len, Address(rsp, wordSize)); // Length
+
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+* Method entry for static native methods:
+*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
+*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
+*/
+address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32CIntrinsics) {
+    address entry = __ pc();
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register buf = c_rarg1;  // source java byte array address
+    const Register len = c_rarg2;
+    const Register off = c_rarg3;  // offset
+    const Register end = len;
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
+      __ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
+      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
+      // Note on 5 * wordSize vs. 4 * wordSize:
+      // *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
+      //                                                   4         2,3          1        0
+      // end starts at SP + 8
+      // The Java(R) Virtual Machine Specification Java SE 7 Edition
+      // 4.10.2.3. Values of Types long and double
+      //    "When calculating operand stack length, values of type long and double have length two."
+    } else {
+      __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
+    }
+    __ movl(end, Address(rsp, wordSize)); // end
+    __ subl(end, off); // end - off
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
+    // result in rax
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+
+    return entry;
+  }
+
+  return NULL;
+}
+#endif // ! CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -116,4 +116,87 @@
     method->constants()->cache();
 }
 
+#ifndef _LP64
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : // fall through
+    case T_LONG   : // fall through
+    case T_VOID   : i = 4; break;
+    case T_FLOAT  : i = 5; break;  // have to treat float and double separately for SSE
+    case T_DOUBLE : i = 6; break;
+    case T_OBJECT : // fall through
+    case T_ARRAY  : i = 7; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
+}
+#else
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
+         "index out of bounds");
+  return i;
+}
+#endif // _LP64
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    : // fall thru
+    case Interpreter::java_lang_math_pow     : // fall thru
+    case Interpreter::java_lang_math_exp     :
+      return false;
+    default:
+      return true;
+  }
+}
+
+// How much stack a method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  const int entry_size = frame::interpreter_frame_monitor_size();
+
+  // total overhead size: entry_size + (saved rbp thru expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
+
+#ifndef _LP64
+  const int stub_code = 4;  // see generate_call_stub
+#else
+  const int stub_code = frame::entry_frame_after_call_words;
+#endif
+
+  const int method_stack = (method->max_locals() + method->max_stack()) *
+                           Interpreter::stackElementWords;
+  return (overhead_size + method_stack + stub_code);
+}
+
 #endif // CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1916 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-#define __ _masm->
-
-
-#ifndef CC_INTERP
-const int method_offset = frame::interpreter_frame_method_offset * wordSize;
-const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
-const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
-
-//------------------------------------------------------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-
-  // Note: There should be a minimal interpreter frame set up when stack
-  // overflow occurs since we check explicitly for it now.
-  //
-#ifdef ASSERT
-  { Label L;
-    __ lea(rax, Address(rbp,
-                frame::interpreter_frame_monitor_block_top_offset * wordSize));
-    __ cmpptr(rax, rsp);  // rax, = maximal rsp for current rbp,
-                        //  (stack grows negative)
-    __ jcc(Assembler::aboveEqual, L); // check if frame is complete
-    __ stop ("interpreter frame not set up");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // Restore bcp under the assumption that the current frame is still
-  // interpreted
-  __ restore_bcp();
-
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // setup parameters
-  // ??? convention: expect aberrant index in register rbx,
-  __ lea(rax, ExternalAddress((address)name));
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-  // object is at TOS
-  __ pop(rax);
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_ClassCastException),
-             rax);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  if (pass_oop) {
-    // object is at TOS
-    __ pop(rbx);
-  }
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // setup parameters
-  __ lea(rax, ExternalAddress((address)name));
-  if (pass_oop) {
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
-  } else {
-    if (message != NULL) {
-      __ lea(rbx, ExternalAddress((address)message));
-    } else {
-      __ movptr(rbx, NULL_WORD);
-    }
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
-  }
-  // throw exception
-  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-  __ dispatch_next(state);
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-#ifdef COMPILER2
-  // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
-  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
-    for (int i = 1; i < 8; i++) {
-        __ ffree(i);
-    }
-  } else if (UseSSE < 2) {
-    __ empty_FPU_stack();
-  }
-#endif
-  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
-    __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
-  } else {
-    __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
-  }
-
-  if (state == ftos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
-  } else if (state == dtos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
-  }
-
-  // Restore stack bottom in case i2c adjusted stack
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // and NULL it as marker that rsp is now tos until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-
-  __ restore_bcp();
-  __ restore_locals();
-
-  if (state == atos) {
-    Register mdp = rbx;
-    Register tmp = rcx;
-    __ profile_return_type(mdp, rax, tmp);
-  }
-
-  const Register cache = rbx;
-  const Register index = rcx;
-  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
-
-  const Register flags = cache;
-  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
-  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-
-  if (state == ftos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
-  } else if (state == dtos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
-  }
-
-  // The stack is not extended by deopt but we must NULL last_sp as this
-  // entry is like a "return".
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-  __ restore_bcp();
-  __ restore_locals();
-  // handle exceptions
-  { Label L;
-    const Register thread = rcx;
-    __ get_thread(thread);
-    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : // fall through
-    case T_LONG   : // fall through
-    case T_VOID   : i = 4; break;
-    case T_FLOAT  : i = 5; break;  // have to treat float and double separately for SSE
-    case T_DOUBLE : i = 6; break;
-    case T_OBJECT : // fall through
-    case T_ARRAY  : i = 7; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
-
-address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  address entry = __ pc();
-  switch (type) {
-    case T_BOOLEAN: __ c2bool(rax);            break;
-    case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
-    case T_BYTE   : __ sign_extend_byte (rax); break;
-    case T_SHORT  : __ sign_extend_short(rax); break;
-    case T_INT    : /* nothing to do */        break;
-    case T_LONG   : /* nothing to do */        break;
-    case T_VOID   : /* nothing to do */        break;
-    case T_DOUBLE :
-    case T_FLOAT  :
-      { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
-        __ pop(t);                            // remove return address first
-        // Must return a result for interpreter or compiler. In SSE
-        // mode, results are returned in xmm0 and the FPU stack must
-        // be empty.
-        if (type == T_FLOAT && UseSSE >= 1) {
-          // Load ST0
-          __ fld_d(Address(rsp, 0));
-          // Store as float and empty fpu stack
-          __ fstp_s(Address(rsp, 0));
-          // and reload
-          __ movflt(xmm0, Address(rsp, 0));
-        } else if (type == T_DOUBLE && UseSSE >= 2 ) {
-          __ movdbl(xmm0, Address(rsp, 0));
-        } else {
-          // restore ST0
-          __ fld_d(Address(rsp, 0));
-        }
-        // and pop the temp
-        __ addptr(rsp, 2 * wordSize);
-        __ push(t);                           // restore return address
-      }
-      break;
-    case T_OBJECT :
-      // retrieve result from frame
-      __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
-      // and verify it
-      __ verify_oop(rax);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret(0);                                   // return from result handler
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-  return entry;
-}
-
-
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// rbx,: method
-// rcx: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  Label done;
-  // Note: In tiered we increment either counters in MethodCounters* or in MDO
-  // depending if we're profiling or not.
-  if (TieredCompilation) {
-    int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // Are we profiling?
-      __ movptr(rax, Address(rbx, Method::method_data_offset()));
-      __ testptr(rax, rax);
-      __ jccb(Assembler::zero, no_mdo);
-      // Increment counter in the MDO
-      const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
-                                                in_bytes(InvocationCounter::counter_offset()));
-      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
-      __ jmp(done);
-    }
-    __ bind(no_mdo);
-    // Increment counter in MethodCounters
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rbx, rax, done);
-    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask,
-                               rcx, false, Assembler::zero, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    const Address backedge_counter(rax,
-                  MethodCounters::backedge_counter_offset() +
-                  InvocationCounter::counter_offset());
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rbx, rax, done);
-
-    if (ProfileInterpreter) {
-      __ incrementl(Address(rax,
-              MethodCounters::interpreter_invocation_counter_offset()));
-    }
-
-    // Update standard invocation counters
-    __ movl(rcx, invocation_counter);
-    __ incrementl(rcx, InvocationCounter::count_increment);
-    __ movl(invocation_counter, rcx);             // save invocation count
-
-    __ movl(rax, backedge_counter);               // load backedge counter
-    __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
-
-    __ addl(rcx, rax);                            // add both counters
-
-    // profile_method is non-null only for interpreted method so
-    // profile_method != NULL == !native_call
-    // BytecodeInterpreter only calls for native so code is elided.
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
-      __ jcc(Assembler::less, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(rax, *profile_method);
-    }
-
-    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
-    __ jcc(Assembler::aboveEqual, *overflow);
-    __ bind(done);
-  }
-}
-
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
-
-  // Asm interpreter on entry
-  // rdi - locals
-  // rsi - bcp
-  // rbx, - method
-  // rdx - cpool
-  // rbp, - interpreter frame
-
-  // C++ interpreter on entry
-  // rsi - new interpreter state pointer
-  // rbp - interpreter frame pointer
-  // rbx - method
-
-  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
-  // rbx, - method
-  // rcx - rcvr (assuming there is one)
-  // top of stack return address of interpreter caller
-  // rsp - sender_sp
-
-  // C++ interpreter only
-  // rsi - previous interpreter state pointer
-
-  // InterpreterRuntime::frequency_counter_overflow takes one argument
-  // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
-  // The call returns the address of the verified entry point for the method or NULL
-  // if the compilation did not complete (either went background or bailed out).
-  __ movptr(rax, (intptr_t)false);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
-
-  __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
-
-  // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
-  // and jump to the interpreted entry.
-  __ jmp(*do_continue, relocInfo::none);
-
-}
-
-void InterpreterGenerator::generate_stack_overflow_check(void) {
-  // see if we've got enough room on the stack for locals plus overhead.
-  // the expression stack grows down incrementally, so the normal guard
-  // page mechanism will work for that.
-  //
-  // Registers live on entry:
-  //
-  // Asm interpreter
-  // rdx: number of additional locals this frame needs (what we must check)
-  // rbx,: Method*
-
-  // destroyed on exit
-  // rax,
-
-  // NOTE:  since the additional locals are also always pushed (wasn't obvious in
-  // generate_fixed_frame) so the guard should work for them too.
-  //
-
-  // monitor entry size: see picture of stack in frame_x86.hpp
-  const int entry_size    = frame::interpreter_frame_monitor_size() * wordSize;
-
-  // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
-  // be sure to change this if you add/subtract anything to/from the overhead area
-  const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size;
-
-  const int page_size = os::vm_page_size();
-
-  Label after_frame_check;
-
-  // see if the frame is greater than one page in size. If so,
-  // then we need to verify there is enough stack space remaining
-  // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
-  __ jcc(Assembler::belowEqual, after_frame_check);
-
-  // compute rsp as if this were going to be the last frame on
-  // the stack before the red zone
-
-  Label after_frame_check_pop;
-
-  __ push(rsi);
-
-  const Register thread = rsi;
-
-  __ get_thread(thread);
-
-  const Address stack_base(thread, Thread::stack_base_offset());
-  const Address stack_size(thread, Thread::stack_size_offset());
-
-  // locals + overhead, in bytes
-  __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
-
-#ifdef ASSERT
-  Label stack_base_okay, stack_size_okay;
-  // verify that thread stack base is non-zero
-  __ cmpptr(stack_base, (int32_t)NULL_WORD);
-  __ jcc(Assembler::notEqual, stack_base_okay);
-  __ stop("stack base is zero");
-  __ bind(stack_base_okay);
-  // verify that thread stack size is non-zero
-  __ cmpptr(stack_size, 0);
-  __ jcc(Assembler::notEqual, stack_size_okay);
-  __ stop("stack size is zero");
-  __ bind(stack_size_okay);
-#endif
-
-  // Add stack base to locals and subtract stack size
-  __ addptr(rax, stack_base);
-  __ subptr(rax, stack_size);
-
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-  __ addptr(rax, max_pages * page_size);
-
-  // check against the current stack bottom
-  __ cmpptr(rsp, rax);
-  __ jcc(Assembler::above, after_frame_check_pop);
-
-  __ pop(rsi);  // get saved bcp / (c++ prev state ).
-
-  // Restore sender's sp as SP. This is necessary if the sender's
-  // frame is an extended compiled frame (see gen_c2i_adapter())
-  // and safer anyway in case of JSR292 adaptations.
-
-  __ pop(rax); // return address must be moved if SP is changed
-  __ mov(rsp, rsi);
-  __ push(rax);
-
-  // Note: the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
-  // all done with frame size check
-  __ bind(after_frame_check_pop);
-  __ pop(rsi);
-
-  __ bind(after_frame_check);
-}
-
-// Allocate monitor and lock method (asm interpreter)
-// rbx, - Method*
-//
-void TemplateInterpreterGenerator::lock_method() {
-  // synchronize method
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
-  const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
-
-  #ifdef ASSERT
-    { Label L;
-      __ movl(rax, access_flags);
-      __ testl(rax, JVM_ACC_SYNCHRONIZED);
-      __ jcc(Assembler::notZero, L);
-      __ stop("method doesn't need synchronization");
-      __ bind(L);
-    }
-  #endif // ASSERT
-  // get synchronization object
-  { Label done;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_STATIC);
-    __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0)));  // get receiver (assume this is frequent case)
-    __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, Method::const_offset()));
-    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
-    __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(rax, Address(rax, mirror_offset));
-    __ bind(done);
-  }
-  // add space for monitor & lock
-  __ subptr(rsp, entry_size);                                           // add space for a monitor entry
-  __ movptr(monitor_block_top, rsp);                                    // set new monitor block top
-  __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
-  __ mov(rdx, rsp);                                                    // object address
-  __ lock_object(rdx);
-}
-
-//
-// Generate a fixed interpreter frame. This is identical setup for interpreted methods
-// and for native methods hence the shared code.
-
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  // initialize fixed part of activation frame
-  __ push(rax);                                       // save return address
-  __ enter();                                         // save old & set new rbp,
-
-
-  __ push(rsi);                                       // set sender sp
-  __ push((int32_t)NULL_WORD);                        // leave last_sp as null
-  __ movptr(rsi, Address(rbx,Method::const_offset())); // get ConstMethod*
-  __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase
-  __ push(rbx);                                      // save Method*
-  if (ProfileInterpreter) {
-    Label method_data_continue;
-    __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
-    __ testptr(rdx, rdx);
-    __ jcc(Assembler::zero, method_data_continue);
-    __ addptr(rdx, in_bytes(MethodData::data_offset()));
-    __ bind(method_data_continue);
-    __ push(rdx);                                       // set the mdp (method data pointer)
-  } else {
-    __ push(0);
-  }
-
-  __ movptr(rdx, Address(rbx, Method::const_offset()));
-  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
-  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
-  __ push(rdx);                                       // set constant pool cache
-  __ push(rdi);                                       // set locals pointer
-  if (native_call) {
-    __ push(0);                                       // no bcp
-  } else {
-    __ push(rsi);                                     // set bcp
-    }
-  __ push(0);                                         // reserve word for pointer to expression stack bottom
-  __ movptr(Address(rsp, 0), rsp);                    // set expression stack bottom
-}
-
-
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code below can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // This code is based on generate_accessor_enty.
-
-  // rbx,: Method*
-  // rcx: receiver (preserve for slow entry into asm interpreter)
-
-  // rsi: senderSP must preserved for slow path, set SP to it on fast path
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-    Label slow_path;
-
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ movptr(rax, Address(rsp, wordSize));
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, slow_path);
-
-    // rax: local 0 (must be preserved across the G1 barrier call)
-    //
-    // rbx: method (at this point it's scratch)
-    // rcx: receiver (at this point it's scratch)
-    // rdx: scratch
-    // rdi: scratch
-    //
-    // rsi: sender sp
-
-    // Preserve the sender sp in case the pre-barrier
-    // calls the runtime
-    __ push(rsi);
-
-    // Load the value of the referent field.
-    const Address field_address(rax, referent_offset);
-    __ movptr(rax, field_address);
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-    __ get_thread(rcx);
-    __ g1_write_barrier_pre(noreg /* obj */,
-                            rax /* pre_val */,
-                            rcx /* thread */,
-                            rbx /* tmp */,
-                            true /* tosca_save */,
-                            true /* expand_call */);
-
-    // _areturn
-    __ pop(rsi);                // get sender sp
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx: Method*
-    // rsi: senderSP must preserved for slow path, set SP to it on fast path
-    // rdx: scratch
-    // rdi: scratch
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register val = rdx;  // source java byte value
-    const Register tbl = rdi;  // scratch
-
-    // Arguments are reversed on java expression stack
-    __ movl(val, Address(rsp,   wordSize)); // byte value
-    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
-
-    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
-    __ notl(crc); // ~crc
-    __ update_byte_crc32(crc, val, tbl);
-    __ notl(crc); // ~crc
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx,: Method*
-    // rsi: senderSP must preserved for slow path, set SP to it on fast path
-    // rdx: scratch
-    // rdi: scratch
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register buf = rdx;  // source java byte array address
-    const Register len = rdi;  // length
-
-    // value              x86_32
-    // interp. arg ptr    ESP + 4
-    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
-    //                                         3           2      1        0
-    // int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
-    //                                              4         2,3      1        0
-
-    // Arguments are reversed on java expression stack
-    __ movl(len,   Address(rsp,   4 + 0)); // Length
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc,   Address(rsp, 4 + 4 * wordSize)); // Initial CRC
-    } else {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc,   Address(rsp, 4 + 3 * wordSize)); // Initial CRC
-    }
-
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
-* Method entry for static native methods:
-*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
-*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
-*/
-address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32CIntrinsics) {
-    address entry = __ pc();
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register buf = rcx;  // source java byte array address
-    const Register len = rdx;  // length
-    const Register end = len;
-
-    // value              x86_32
-    // interp. arg ptr    ESP + 4
-    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int end)
-    //                                         3           2      1        0
-    // int java.util.zip.CRC32.updateByteBuffer(int crc, long address, int off, int end)
-    //                                              4         2,3          1        0
-
-    // Arguments are reversed on java expression stack
-    __ movl(end, Address(rsp, 4 + 0)); // end
-    __ subl(len, Address(rsp, 4 + 1 * wordSize));  // end - offset == length
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long address
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC
-    } else {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC
-    }
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
-    // result in rax
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native method:
- *    java.lang.Float.intBitsToFloat(int bits)
- */
-address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
-  if (UseSSE >= 1) {
-    address entry = __ pc();
-
-    // rsi: the sender's SP
-
-    // Skip safepoint check (compiler intrinsic versions of this method
-    // do not perform safepoint checks either).
-
-    // Load 'bits' into xmm0 (interpreter returns results in xmm0)
-    __ movflt(xmm0, Address(rsp, wordSize));
-
-    // Return
-    __ pop(rdi); // get return address
-    __ mov(rsp, rsi); // set rsp to the sender's SP
-    __ jmp(rdi);
-    return entry;
-  }
-
-  return NULL;
-}
-
-/**
- * Method entry for static native method:
- *    java.lang.Float.floatToRawIntBits(float value)
- */
-address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
-  if (UseSSE >= 1) {
-    address entry = __ pc();
-
-    // rsi: the sender's SP
-
-    // Skip safepoint check (compiler intrinsic versions of this method
-    // do not perform safepoint checks either).
-
-    // Load the parameter (a floating-point value) into rax.
-    __ movl(rax, Address(rsp, wordSize));
-
-    // Return
-    __ pop(rdi); // get return address
-    __ mov(rsp, rsi); // set rsp to the sender's SP
-    __ jmp(rdi);
-    return entry;
-  }
-
-  return NULL;
-}
-
-
-/**
- * Method entry for static native method:
- *    java.lang.Double.longBitsToDouble(long bits)
- */
-address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
-   if (UseSSE >= 2) {
-     address entry = __ pc();
-
-     // rsi: the sender's SP
-
-     // Skip safepoint check (compiler intrinsic versions of this method
-     // do not perform safepoint checks either).
-
-     // Load 'bits' into xmm0 (interpreter returns results in xmm0)
-     __ movdbl(xmm0, Address(rsp, wordSize));
-
-     // Return
-     __ pop(rdi); // get return address
-     __ mov(rsp, rsi); // set rsp to the sender's SP
-     __ jmp(rdi);
-     return entry;
-   }
-
-   return NULL;
-}
-
-/**
- * Method entry for static native method:
- *    java.lang.Double.doubleToRawLongBits(double value)
- */
-address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
-  if (UseSSE >= 2) {
-    address entry = __ pc();
-
-    // rsi: the sender's SP
-
-    // Skip safepoint check (compiler intrinsic versions of this method
-    // do not perform safepoint checks either).
-
-    // Load the parameter (a floating-point value) into rax.
-    __ movl(rdx, Address(rsp, 2*wordSize));
-    __ movl(rax, Address(rsp, wordSize));
-
-    // Return
-    __ pop(rdi); // get return address
-    __ mov(rsp, rsi); // set rsp to the sender's SP
-    __ jmp(rdi);
-    return entry;
-  }
-
-  return NULL;
-}
-
-//
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the native method
-// than the typical interpreter frame setup.
-//
-
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rbx,: Method*
-  // rsi: sender sp
-  // rsi: previous interpreter state (C++ interpreter) must preserve
-  address entry_point = __ pc();
-
-  const Address constMethod       (rbx, Method::const_offset());
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
-
-  // get parameter size (always needed)
-  __ movptr(rcx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // native calls don't need the stack size check since they have no expression stack
-  // and the arguments are already on the stack and we only add a handful of words
-  // to the stack
-
-  // rbx,: Method*
-  // rcx: size of parameters
-  // rsi: sender sp
-
-  __ pop(rax);                                       // get return address
-  // for natives the size of locals is zero
-
-  // compute beginning of parameters (rdi)
-  __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
-
-
-  // add 2 zero-initialized slots for native calls
-  // NULL result handler
-  __ push((int32_t)NULL_WORD);
-  // NULL oop temp (mirror or jni oop result)
-  __ push((int32_t)NULL_WORD);
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(true);
-
-  // make sure method is native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::notZero, L);
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  { Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation will
-  // check this flag.
-
-  __ get_thread(rax);
-  const Address do_not_unlock_if_synchronized(rax,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ get_thread(rax);
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  //
-  if (synchronized) {
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-      { Label L;
-        __ movl(rax, access_flags);
-        __ testl(rax, JVM_ACC_SYNCHRONIZED);
-        __ jcc(Assembler::zero, L);
-        __ stop("method needs synchronization");
-        __ bind(L);
-      }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  { Label L;
-    const Address monitor_block_top (rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti/dtrace support
-  __ notify_method_entry();
-
-  // work registers
-  const Register method = rbx;
-  const Register thread = rdi;
-  const Register t      = rcx;
-
-  // allocate space for parameters
-  __ get_method(method);
-  __ movptr(t, Address(method, Method::const_offset()));
-  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
-
-  __ shlptr(t, Interpreter::logStackElementSize);
-  __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
-  __ subptr(rsp, t);
-  __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
-
-  // get signature handler
-  { Label L;
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ testptr(t, t);
-    __ jcc(Assembler::notZero, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
-    __ get_method(method);
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ bind(L);
-  }
-
-  // call signature handler
-  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::to  () == rsp, "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t  , "adjust this code");
-  // The generated handlers do not touch RBX (the method oop).
-  // However, large signatures cannot be cached and are generated
-  // each time here.  The slow-path generator will blow RBX
-  // sometime, so we must reload it after the call.
-  __ call(t);
-  __ get_method(method);        // slow path call blows RBX on DevStudio 5.0
-
-  // result handler is in rax,
-  // set result handler
-  __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
-
-  // pass mirror handle if static call
-  { Label L;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_STATIC);
-    __ jcc(Assembler::zero, L);
-    // get mirror
-    __ movptr(t, Address(method, Method:: const_offset()));
-    __ movptr(t, Address(t, ConstMethod::constants_offset()));
-    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(t, Address(t, mirror_offset));
-    // copy mirror into activation frame
-    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
-    // pass handle to mirror
-    __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
-    __ movptr(Address(rsp, wordSize), t);
-    __ bind(L);
-  }
-
-  // get native function entry point
-  { Label L;
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
-    __ cmpptr(rax, unsatisfied.addr());
-    __ jcc(Assembler::notEqual, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
-    __ get_method(method);
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    __ bind(L);
-  }
-
-  // pass JNIEnv
-  __ get_thread(thread);
-  __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
-  __ movptr(Address(rsp, 0), t);
-
-  // set_last_Java_frame_before_call
-  // It is enough that the pc()
-  // points into the right code segment. It does not have to be the correct return pc.
-  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
-
-  // change thread state
-#ifdef ASSERT
-  { Label L;
-    __ movl(t, Address(thread, JavaThread::thread_state_offset()));
-    __ cmpl(t, _thread_in_Java);
-    __ jcc(Assembler::equal, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif
-
-  // Change state to native
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
-  __ call(rax);
-
-  // result potentially in rdx:rax or ST0
-
-  // Verify or restore cpu control state after JNI call
-  __ restore_cpu_control_state_after_jni();
-
-  // save potential result in ST(0) & rdx:rax
-  // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
-  // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
-  // It is safe to do this push because state is _thread_in_native and return address will be found
-  // via _last_native_pc and not via _last_jave_sp
-
-  // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
-  // If the order changes or anything else is added to the stack the code in
-  // interpreter_frame_result will have to be changed.
-
-  { Label L;
-    Label push_double;
-    ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
-    ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
-    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
-              float_handler.addr());
-    __ jcc(Assembler::equal, push_double);
-    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
-              double_handler.addr());
-    __ jcc(Assembler::notEqual, L);
-    __ bind(push_double);
-    __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
-    __ bind(L);
-  }
-  __ push(ltos);
-
-  // change thread state
-  __ get_thread(thread);
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
-  if(os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ membar(Assembler::Membar_mask_bits(
-           Assembler::LoadLoad | Assembler::LoadStore |
-           Assembler::StoreLoad | Assembler::StoreStore));
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(thread, rcx);
-    }
-  }
-
-  if (AlwaysRestoreFPU) {
-    //  Make sure the control word is correct.
-    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
-  }
-
-  // check for safepoint operation in progress and/or pending suspend requests
-  { Label Continue;
-
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    Label L;
-    __ jcc(Assembler::notEqual, L);
-    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
-    __ jcc(Assembler::equal, Continue);
-    __ bind(L);
-
-    // Don't use call_VM as it will see a possible pending exception and forward it
-    // and never return here preventing us from clearing _last_native_pc down below.
-    // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
-    // preserved and correspond to the bcp/locals pointers. So we do a runtime call
-    // by hand.
-    //
-    __ push(thread);
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
-                                            JavaThread::check_special_condition_for_native_trans)));
-    __ increment(rsp, wordSize);
-    __ get_thread(thread);
-
-    __ bind(Continue);
-  }
-
-  // change thread state
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
-
-  __ reset_last_Java_frame(thread, true, true);
-
-  // reset handle block
-  __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
-  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
-
-  // If result was an oop then unbox and save it in the frame
-  { Label L;
-    Label no_oop, store_result;
-    ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
-    __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
-              handler.addr());
-    __ jcc(Assembler::notEqual, no_oop);
-    __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
-    __ pop(ltos);
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, store_result);
-    // unbox
-    __ movptr(rax, Address(rax, 0));
-    __ bind(store_result);
-    __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
-    // keep stack depth as expected by pushing oop which will eventually be discarded
-    __ push(ltos);
-    __ bind(no_oop);
-  }
-
-  {
-     Label no_reguard;
-     __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
-     __ jcc(Assembler::notEqual, no_reguard);
-
-     __ pusha();
-     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
-     __ popa();
-
-     __ bind(no_reguard);
-   }
-
-  // restore rsi to have legal interpreter frame,
-  // i.e., bci == 0 <=> rsi == code_base()
-  // Can't call_VM until bcp is within reasonable.
-  __ get_method(method);      // method is junk from thread_in_native to now.
-  __ movptr(rsi, Address(method,Method::const_offset()));   // get ConstMethod*
-  __ lea(rsi, Address(rsi,ConstMethod::codes_offset()));    // get codebase
-
-  // handle exceptions (exception handling will handle unlocking!)
-  { Label L;
-    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    // Note: At some point we may want to unify this with the code used in call_VM_base();
-    //       i.e., we should use the StubRoutines::forward_exception code. For now this
-    //       doesn't work here because the rsp is not correctly set at this point.
-    __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // do unlocking if necessary
-  { Label L;
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::zero, L);
-    // the code below should be shared with interpreter macro assembler implementation
-    { Label unlock;
-      // BasicObjectLock will be first in list, since this is a synchronized method. However, need
-      // to check that the object has not been unlocked by an explicit monitorexit bytecode.
-      const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
-
-      __ lea(rdx, monitor);                   // address of first monitor
-
-      __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
-      __ testptr(t, t);
-      __ jcc(Assembler::notZero, unlock);
-
-      // Entry already unlocked, need to throw exception
-      __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
-      __ should_not_reach_here();
-
-      __ bind(unlock);
-      __ unlock_object(rdx);
-    }
-    __ bind(L);
-  }
-
-  // jvmti/dtrace support
-  // Note: This must happen _after_ handling/throwing any exceptions since
-  //       the exception handler code notifies the runtime of method exits
-  //       too. If this happens before, method entry/exit notifications are
-  //       not properly paired (was bug - gri 11/22/99).
-  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
-
-  // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
-  __ pop(ltos);
-  __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
-  __ call(t);
-
-  // remove activation
-  __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
-  __ leave();                                // remove frame anchor
-  __ pop(rdi);                               // get return address
-  __ mov(rsp, t);                            // set sp to sender sp
-  __ jmp(rdi);
-
-  if (inc_counter) {
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-//
-// Generic interpreted method entry to (asm) interpreter
-//
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rbx,: Method*
-  // rsi: sender sp
-  address entry_point = __ pc();
-
-  const Address constMethod       (rbx, Method::const_offset());
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals    (rdx, ConstMethod::size_of_locals_offset());
-
-  // get parameter size (always needed)
-  __ movptr(rdx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // rbx,: Method*
-  // rcx: size of parameters
-
-  // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
-
-  __ load_unsigned_short(rdx, size_of_locals);       // get size of locals in words
-  __ subl(rdx, rcx);                                // rdx = no. of additional locals
-
-  // see if we've got enough room on the stack for locals plus overhead.
-  generate_stack_overflow_check();
-
-  // get return address
-  __ pop(rax);
-
-  // compute beginning of parameters (rdi)
-  __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
-
-  // rdx - # of additional locals
-  // allocate space for locals
-  // explicitly initialize locals
-  {
-    Label exit, loop;
-    __ testl(rdx, rdx);
-    __ jcc(Assembler::lessEqual, exit);               // do nothing if rdx <= 0
-    __ bind(loop);
-    __ push((int32_t)NULL_WORD);                      // initialize local variables
-    __ decrement(rdx);                                // until everything initialized
-    __ jcc(Assembler::greater, loop);
-    __ bind(exit);
-  }
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(false);
-
-  // make sure method is not native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
-  { Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation will
-  // check this flag.
-
-  __ get_thread(rax);
-  const Address do_not_unlock_if_synchronized(rax,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  __ profile_parameters_type(rax, rcx, rdx);
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ get_thread(rax);
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  //
-  if (synchronized) {
-    // Allocate monitor and lock method
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-      { Label L;
-        __ movl(rax, access_flags);
-        __ testl(rax, JVM_ACC_SYNCHRONIZED);
-        __ jcc(Assembler::zero, L);
-        __ stop("method needs synchronization");
-        __ bind(L);
-      }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  { Label L;
-     const Address monitor_block_top (rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  __ dispatch_next(vtos);
-
-  // invocation counter overflow
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      __ get_method(rbx);
-      __ jmp(profile_method_continue);
-    }
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     :
-      return false;
-    default:
-      return true;
-  }
-}
-
-// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-
-  const int stub_code = 4;  // see generate_call_stub
-  // Save space for one monitor to get into the interpreted method in case
-  // the method is synchronized
-  int monitor_size    = method->is_synchronized() ?
-                                1*frame::interpreter_frame_monitor_size() : 0;
-
-  // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
-  // be sure to change this if you add/subtract anything to/from the overhead area
-  const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
-
-  const int method_stack = (method->max_locals() + method->max_stack()) *
-                           Interpreter::stackElementWords;
-  return overhead_size + method_stack + stub_code;
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  // Entry point in previous activation (i.e., if the caller was interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  const Register thread = rcx;
-
-  // Restore sp to interpreter_frame_last_sp even though we are going
-  // to empty the expression stack for the exception processing.
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-  // rax,: exception
-  // rdx: return address/pc that threw exception
-  __ restore_bcp();                              // rsi points to call/send
-  __ restore_locals();
-
-  // Entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  // expression stack is undefined here
-  // rax,: exception
-  // rsi: exception bcp
-  __ verify_oop(rax);
-
-  // expression stack must be empty before entering the VM in case of an exception
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // find exception handler address and preserve exception oop
-  __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax);
-  // rax,: exception handler entry point
-  // rdx: preserved exception oop
-  // rsi: bcp for exception handler
-  __ push_ptr(rdx);                              // push exception which is now the only value on the stack
-  __ jmp(rax);                                   // jump to exception handler (may be _remove_activation_entry!)
-
-  // If the exception is not handled in the current frame the frame is removed and
-  // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction which caused
-  //       the exception and the expression stack is empty. Thus, for any VM calls
-  //       at this point, GC will find a legal oop map (with empty expression stack).
-
-  // In current activation
-  // tos: exception
-  // rsi: exception bcp
-
-  //
-  // JVMTI PopFrame support
-  //
-
-   Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // Set the popframe_processing bit in pending_popframe_condition indicating that we are
-  // currently handling popframe, so that call_VMs that may happen later do not trigger new
-  // popframe handling cycles.
-  __ get_thread(thread);
-  __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
-  __ orl(rdx, JavaThread::popframe_processing_bit);
-  __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
-    __ testl(rax, rax);
-    __ jcc(Assembler::notZero, caller_not_deoptimized);
-
-    // Compute size of arguments for saving when returning to deoptimized caller
-    __ get_method(rax);
-    __ movptr(rax, Address(rax, Method::const_offset()));
-    __ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset()));
-    __ shlptr(rax, Interpreter::logStackElementSize);
-    __ restore_locals();
-    __ subptr(rdi, rax);
-    __ addptr(rdi, wordSize);
-    // Save these arguments
-    __ get_thread(thread);
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
-
-    __ remove_activation(vtos, rdx,
-                         /* throw_monitor_exception */ false,
-                         /* install_monitor_exception */ false,
-                         /* notify_jvmdi */ false);
-
-    // Inform deoptimization that it is responsible for restoring these arguments
-    __ get_thread(thread);
-    __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
-
-    // Continue in deoptimization handler
-    __ jmp(rdx);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  __ remove_activation(vtos, rdx,
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false,
-                       /* notify_jvmdi */ false);
-
-  // Finish with popframe handling
-  // A previous I2C followed by a deoptimization might have moved the
-  // outgoing arguments further up the stack. PopFrame expects the
-  // mutations to those outgoing arguments to be preserved and other
-  // constraints basically require this frame to look exactly as
-  // though it had previously invoked an interpreted activation with
-  // no space between the top of the expression stack (current
-  // last_sp) and the top of stack. Rather than force deopt to
-  // maintain this kind of invariant all the time we call a small
-  // fixup routine to move the mutated arguments onto the top of our
-  // expression stack if necessary.
-  __ mov(rax, rsp);
-  __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ get_thread(thread);
-  // PC must point into interpreter here
-  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
-  __ get_thread(thread);
-  __ reset_last_Java_frame(thread, true, true);
-  // Restore the last_sp and null it out
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-
-  __ restore_bcp();
-  __ restore_locals();
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-  // Clear the popframe condition flag
-  __ get_thread(thread);
-  __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-    const Register local0 = rdi;
-
-    __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic);
-    __ jcc(Assembler::notEqual, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ get_method(rdx);
-    __ movptr(rax, Address(local0, 0));
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi);
-
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, L_done);
-
-    __ movptr(Address(rbx, 0), rax);
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  __ dispatch_next(vtos);
-  // end of PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence
-  __ pop_ptr(rax);
-  __ get_thread(thread);
-  __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
-  // remove the activation (without doing throws on illegalMonitorExceptions)
-  __ remove_activation(vtos, rdx, false, true, false);
-  // restore exception
-  __ get_thread(thread);
-  __ get_vm_result(rax, thread);
-
-  // Inbetween activations - previous activation type unknown yet
-  // compute continuation point - the continuation point expects
-  // the following registers set up:
-  //
-  // rax: exception
-  // rdx: return address/pc that threw exception
-  // rsp: expression stack of caller
-  // rbp: rbp, of caller
-  __ push(rax);                                  // save exception
-  __ push(rdx);                                  // save return address
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
-  __ mov(rbx, rax);                              // save exception handler
-  __ pop(rdx);                                   // restore return address
-  __ pop(rax);                                   // restore exception
-  // Note that an "issuing PC" is actually the next PC after the call
-  __ jmp(rbx);                                   // jump to exception handler of caller
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-  const Register thread = rcx;
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  __ load_earlyret_value(state);
-
-  __ get_thread(thread);
-  __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
-  const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
-
-  __ remove_activation(state, rsi,
-                       false, /* throw_monitor_exception */
-                       false, /* install_monitor_exception */
-                       true); /* notify_jvmdi */
-  __ jmp(rsi);
-  return entry;
-} // end of ForceEarlyReturn support
-
-
-//------------------------------------------------------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  fep = __ pc(); __ push(ftos); __ jmp(L);
-  dep = __ pc(); __ push(dtos); __ jmp(L);
-  lep = __ pc(); __ push(ltos); __ jmp(L);
-  aep = __ pc(); __ push(atos); __ jmp(L);
-  bep = cep = sep =             // fall through
-  iep = __ pc(); __ push(itos); // fall through
-  vep = __ pc(); __ bind(L);    // fall through
-  generate_and_dispatch(t);
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  // prepare expression stack
-  __ pop(rcx);          // pop return address so expression stack is 'pure'
-  __ push(state);       // save tosca
-
-  // pass tosca registers as arguments & call tracer
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
-  __ mov(rcx, rax);     // make sure return address is not destroyed by pop(state)
-  __ pop(state);        // restore tosca
-
-  // return
-  __ jmp(rcx);
-
-  return entry;
-}
-
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
-  __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
-  __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
-  ExternalAddress table((address) BytecodePairHistogram::_counters);
-  Address index(noreg, rbx, Address::times_4);
-  __ incrementl(ArrayAddress(table, index));
-}
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
-           StopInterpreterAt);
-  __ jcc(Assembler::notEqual, L);
-  __ int3();
-  __ bind(L);
-}
-#endif // !PRODUCT
-#endif // CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1866 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-#define __ _masm->
-
-#ifndef CC_INTERP
-
-const int method_offset = frame::interpreter_frame_method_offset * wordSize;
-const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
-const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
-
-//-----------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ lea(rax, Address(rbp,
-                        frame::interpreter_frame_monitor_block_top_offset *
-                        wordSize));
-    __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
-                         // grows negative)
-    __ jcc(Assembler::aboveEqual, L); // check if frame is complete
-    __ stop ("interpreter frame not set up");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // Restore bcp under the assumption that the current frame is still
-  // interpreted
-  __ restore_bcp();
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // throw exception
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_StackOverflowError));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
-        const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  // ??? convention: expect aberrant index in register ebx
-  __ lea(c_rarg1, ExternalAddress((address)name));
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ArrayIndexOutOfBoundsException),
-             c_rarg1, rbx);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-
-  // object is at TOS
-  __ pop(c_rarg1);
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ClassCastException),
-             c_rarg1);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(
-        const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  if (pass_oop) {
-    // object is at TOS
-    __ pop(c_rarg2);
-  }
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  __ lea(c_rarg1, ExternalAddress((address)name));
-  if (pass_oop) {
-    __ call_VM(rax, CAST_FROM_FN_PTR(address,
-                                     InterpreterRuntime::
-                                     create_klass_exception),
-               c_rarg1, c_rarg2);
-  } else {
-    // kind of lame ExternalAddress can't take NULL because
-    // external_word_Relocation will assert.
-    if (message != NULL) {
-      __ lea(c_rarg2, ExternalAddress((address)message));
-    } else {
-      __ movptr(c_rarg2, NULL_WORD);
-    }
-    __ call_VM(rax,
-               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
-               c_rarg1, c_rarg2);
-  }
-  // throw exception
-  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-  __ dispatch_next(state);
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  // Restore stack bottom in case i2c adjusted stack
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // and NULL it as marker that esp is now tos until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-
-  __ restore_bcp();
-  __ restore_locals();
-
-  if (state == atos) {
-    Register mdp = rbx;
-    Register tmp = rcx;
-    __ profile_return_type(mdp, rax, tmp);
-  }
-
-  const Register cache = rbx;
-  const Register index = rcx;
-  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
-
-  const Register flags = cache;
-  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
-  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-  __ restore_bcp();
-  __ restore_locals();
-#if INCLUDE_JVMCI
-  // Check if we need to take lock at entry of synchronized method.
-  if (UseJVMCICompiler) {
-    Label L;
-    __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
-    __ jcc(Assembler::zero, L);
-    // Clear flag.
-    __ movb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
-    // Satisfy calling convention for lock_method().
-    __ get_method(rbx);
-    // Take lock.
-    lock_method();
-    __ bind(L);
-  }
-#endif
-  // handle exceptions
-  {
-    Label L;
-    __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
-         "index out of bounds");
-  return i;
-}
-
-
-address TemplateInterpreterGenerator::generate_result_handler_for(
-        BasicType type) {
-  address entry = __ pc();
-  switch (type) {
-  case T_BOOLEAN: __ c2bool(rax);            break;
-  case T_CHAR   : __ movzwl(rax, rax);       break;
-  case T_BYTE   : __ sign_extend_byte(rax);  break;
-  case T_SHORT  : __ sign_extend_short(rax); break;
-  case T_INT    : /* nothing to do */        break;
-  case T_LONG   : /* nothing to do */        break;
-  case T_VOID   : /* nothing to do */        break;
-  case T_FLOAT  : /* nothing to do */        break;
-  case T_DOUBLE : /* nothing to do */        break;
-  case T_OBJECT :
-    // retrieve result from frame
-    __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
-    // and verify it
-    __ verify_oop(rax);
-    break;
-  default       : ShouldNotReachHere();
-  }
-  __ ret(0);                                   // return from result handler
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(
-        TosState state,
-        address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-  return entry;
-}
-
-
-
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// rbx: method
-// ecx: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(
-        Label* overflow,
-        Label* profile_method,
-        Label* profile_method_continue) {
-  Label done;
-  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
-  if (TieredCompilation) {
-    int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // Are we profiling?
-      __ movptr(rax, Address(rbx, Method::method_data_offset()));
-      __ testptr(rax, rax);
-      __ jccb(Assembler::zero, no_mdo);
-      // Increment counter in the MDO
-      const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
-                                                in_bytes(InvocationCounter::counter_offset()));
-      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
-      __ jmp(done);
-    }
-    __ bind(no_mdo);
-    // Increment counter in MethodCounters
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-    __ get_method_counters(rbx, rax, done);
-    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
-                               false, Assembler::zero, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    const Address backedge_counter(rax,
-                  MethodCounters::backedge_counter_offset() +
-                  InvocationCounter::counter_offset());
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rbx, rax, done);
-
-    if (ProfileInterpreter) {
-      __ incrementl(Address(rax,
-              MethodCounters::interpreter_invocation_counter_offset()));
-    }
-    // Update standard invocation counters
-    __ movl(rcx, invocation_counter);
-    __ incrementl(rcx, InvocationCounter::count_increment);
-    __ movl(invocation_counter, rcx); // save invocation count
-
-    __ movl(rax, backedge_counter);   // load backedge counter
-    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
-
-    __ addl(rcx, rax);                // add both counters
-
-    // profile_method is non-null only for interpreted method so
-    // profile_method != NULL == !native_call
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
-      __ jcc(Assembler::less, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(rax, *profile_method);
-    }
-
-    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
-    __ jcc(Assembler::aboveEqual, *overflow);
-    __ bind(done);
-  }
-}
-
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
-
-  // Asm interpreter on entry
-  // r14 - locals
-  // r13 - bcp
-  // rbx - method
-  // edx - cpool --- DOES NOT APPEAR TO BE TRUE
-  // rbp - interpreter frame
-
-  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
-  // Everything as it was on entry
-  // rdx is not restored. Doesn't appear to really be set.
-
-  // InterpreterRuntime::frequency_counter_overflow takes two
-  // arguments, the first (thread) is passed by call_VM, the second
-  // indicates if the counter overflow occurs at a backwards branch
-  // (NULL bcp).  We pass zero for it.  The call returns the address
-  // of the verified entry point for the method or NULL if the
-  // compilation did not complete (either went background or bailed
-  // out).
-  __ movl(c_rarg1, 0);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::frequency_counter_overflow),
-             c_rarg1);
-
-  __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
-  // Preserve invariant that r13/r14 contain bcp/locals of sender frame
-  // and jump to the interpreted entry.
-  __ jmp(*do_continue, relocInfo::none);
-}
-
-// See if we've got enough room on the stack for locals plus overhead.
-// The expression stack grows down incrementally, so the normal guard
-// page mechanism will work for that.
-//
-// NOTE: Since the additional locals are also always pushed (wasn't
-// obvious in generate_fixed_frame) so the guard should work for them
-// too.
-//
-// Args:
-//      rdx: number of additional locals this frame needs (what we must check)
-//      rbx: Method*
-//
-// Kills:
-//      rax
-void InterpreterGenerator::generate_stack_overflow_check(void) {
-
-  // monitor entry size: see picture of stack in frame_x86.hpp
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-  // total overhead size: entry_size + (saved rbp through expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
-
-  const int page_size = os::vm_page_size();
-
-  Label after_frame_check;
-
-  // see if the frame is greater than one page in size. If so,
-  // then we need to verify there is enough stack space remaining
-  // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
-  __ jcc(Assembler::belowEqual, after_frame_check);
-
-  // compute rsp as if this were going to be the last frame on
-  // the stack before the red zone
-
-  const Address stack_base(r15_thread, Thread::stack_base_offset());
-  const Address stack_size(r15_thread, Thread::stack_size_offset());
-
-  // locals + overhead, in bytes
-  __ mov(rax, rdx);
-  __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
-  __ addptr(rax, overhead_size);
-
-#ifdef ASSERT
-  Label stack_base_okay, stack_size_okay;
-  // verify that thread stack base is non-zero
-  __ cmpptr(stack_base, (int32_t)NULL_WORD);
-  __ jcc(Assembler::notEqual, stack_base_okay);
-  __ stop("stack base is zero");
-  __ bind(stack_base_okay);
-  // verify that thread stack size is non-zero
-  __ cmpptr(stack_size, 0);
-  __ jcc(Assembler::notEqual, stack_size_okay);
-  __ stop("stack size is zero");
-  __ bind(stack_size_okay);
-#endif
-
-  // Add stack base to locals and subtract stack size
-  __ addptr(rax, stack_base);
-  __ subptr(rax, stack_size);
-
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-
-  // add in the red and yellow zone sizes
-  __ addptr(rax, max_pages * page_size);
-
-  // check against the current stack bottom
-  __ cmpptr(rsp, rax);
-  __ jcc(Assembler::above, after_frame_check);
-
-  // Restore sender's sp as SP. This is necessary if the sender's
-  // frame is an extended compiled frame (see gen_c2i_adapter())
-  // and safer anyway in case of JSR292 adaptations.
-
-  __ pop(rax); // return address must be moved if SP is changed
-  __ mov(rsp, r13);
-  __ push(rax);
-
-  // Note: the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
-
-  // all done with frame size check
-  __ bind(after_frame_check);
-}
-
-// Allocate monitor and lock method (asm interpreter)
-//
-// Args:
-//      rbx: Method*
-//      r14: locals
-//
-// Kills:
-//      rax
-//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
-//      rscratch1, rscratch2 (scratch regs)
-void TemplateInterpreterGenerator::lock_method() {
-  // synchronize method
-  const Address access_flags(rbx, Method::access_flags_offset());
-  const Address monitor_block_top(
-        rbp,
-        frame::interpreter_frame_monitor_block_top_offset * wordSize);
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::notZero, L);
-    __ stop("method doesn't need synchronization");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  // get synchronization object
-  {
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    Label done;
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_STATIC);
-    // get receiver (assume this is frequent case)
-    __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
-    __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, Method::const_offset()));
-    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
-    __ movptr(rax, Address(rax,
-                           ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(rax, Address(rax, mirror_offset));
-
-#ifdef ASSERT
-    {
-      Label L;
-      __ testptr(rax, rax);
-      __ jcc(Assembler::notZero, L);
-      __ stop("synchronization object is NULL");
-      __ bind(L);
-    }
-#endif // ASSERT
-
-    __ bind(done);
-  }
-
-  // add space for monitor & lock
-  __ subptr(rsp, entry_size); // add space for a monitor entry
-  __ movptr(monitor_block_top, rsp);  // set new monitor block top
-  // store object
-  __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
-  __ movptr(c_rarg1, rsp); // object address
-  __ lock_object(c_rarg1);
-}
-
-// Generate a fixed interpreter frame. This is identical setup for
-// interpreted methods and for native methods hence the shared code.
-//
-// Args:
-//      rax: return address
-//      rbx: Method*
-//      r14: pointer to locals
-//      r13: sender sp
-//      rdx: cp cache
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  // initialize fixed part of activation frame
-  __ push(rax);        // save return address
-  __ enter();          // save old & set new rbp
-  __ push(r13);        // set sender sp
-  __ push((int)NULL_WORD); // leave last_sp as null
-  __ movptr(r13, Address(rbx, Method::const_offset()));      // get ConstMethod*
-  __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
-  __ push(rbx);        // save Method*
-  if (ProfileInterpreter) {
-    Label method_data_continue;
-    __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
-    __ testptr(rdx, rdx);
-    __ jcc(Assembler::zero, method_data_continue);
-    __ addptr(rdx, in_bytes(MethodData::data_offset()));
-    __ bind(method_data_continue);
-    __ push(rdx);      // set the mdp (method data pointer)
-  } else {
-    __ push(0);
-  }
-
-  __ movptr(rdx, Address(rbx, Method::const_offset()));
-  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
-  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
-  __ push(rdx); // set constant pool cache
-  __ push(r14); // set locals pointer
-  if (native_call) {
-    __ push(0); // no bcp
-  } else {
-    __ push(r13); // set bcp
-  }
-  __ push(0); // reserve word for pointer to expression stack bottom
-  __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
-}
-
-// End of helpers
-
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // rbx: Method*
-
-  // r13: senderSP must preserve for slow path, set SP to it on fast path
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-    Label slow_path;
-    // rbx: method
-
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ movptr(rax, Address(rsp, wordSize));
-
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, slow_path);
-
-    // rax: local 0
-    // rbx: method (but can be used as scratch now)
-    // rdx: scratch
-    // rdi: scratch
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-
-    // Load the value of the referent field.
-    const Address field_address(rax, referent_offset);
-    __ load_heap_oop(rax, field_address);
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-    __ g1_write_barrier_pre(noreg /* obj */,
-                            rax /* pre_val */,
-                            r15_thread /* thread */,
-                            rbx /* tmp */,
-                            true /* tosca_live */,
-                            true /* expand_call */);
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-    __ ret(0);
-
-    // generate a vanilla interpreter entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx,: Method*
-    // r13: senderSP must preserved for slow path, set SP to it on fast path
-    // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
-    // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register val = c_rarg0;  // source java byte value
-    const Register tbl = c_rarg1;  // scratch
-
-    // Arguments are reversed on java expression stack
-    __ movl(val, Address(rsp,   wordSize)); // byte value
-    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
-
-    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
-    __ notl(crc); // ~crc
-    __ update_byte_crc32(crc, val, tbl);
-    __ notl(crc); // ~crc
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx,: Method*
-    // r13: senderSP must preserved for slow path, set SP to it on fast path
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register buf = c_rarg1;  // source java byte array address
-    const Register len = c_rarg2;  // length
-    const Register off = len;      // offset (never overlaps with 'len')
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
-      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
-    } else {
-      __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
-    }
-    // Can now load 'len' since we're finished with 'off'
-    __ movl(len, Address(rsp, wordSize)); // Length
-
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
-* Method entry for static native methods:
-*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
-*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
-*/
-address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32CIntrinsics) {
-    address entry = __ pc();
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register buf = c_rarg1;  // source java byte array address
-    const Register len = c_rarg2;
-    const Register off = c_rarg3;  // offset
-    const Register end = len;
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
-      __ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
-      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
-      // Note on 5 * wordSize vs. 4 * wordSize:
-      // *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
-      //                                                   4         2,3          1        0
-      // end starts at SP + 8
-      // The Java(R) Virtual Machine Specification Java SE 7 Edition
-      // 4.10.2.3. Values of Types long and double
-      //    "When calculating operand stack length, values of type long and double have length two."
-    } else {
-      __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
-    }
-    __ movl(end, Address(rsp, wordSize)); // end
-    __ subl(end, off); // end - off
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
-    // result in rax
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-
-    return entry;
-  }
-
-  return NULL;
-}
-
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the
-// native method than the typical interpreter frame setup.
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rbx: Method*
-  // r13: sender sp
-
-  address entry_point = __ pc();
-
-  const Address constMethod       (rbx, Method::const_offset());
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rcx, ConstMethod::
-                                        size_of_parameters_offset());
-
-
-  // get parameter size (always needed)
-  __ movptr(rcx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // native calls don't need the stack size check since they have no
-  // expression stack and the arguments are already on the stack and
-  // we only add a handful of words to the stack
-
-  // rbx: Method*
-  // rcx: size of parameters
-  // r13: sender sp
-  __ pop(rax);                                       // get return address
-
-  // for natives the size of locals is zero
-
-  // compute beginning of parameters (r14)
-  __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
-
-  // add 2 zero-initialized slots for native calls
-  // initialize result_handler slot
-  __ push((int) NULL_WORD);
-  // slot for oop temp
-  // (static native method holder mirror/jni oop result)
-  __ push((int) NULL_WORD);
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(true);
-
-  // make sure method is native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::notZero, L);
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation will
-  // check this flag.
-
-  const Address do_not_unlock_if_synchronized(r15_thread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ movl(rax, access_flags);
-      __ testl(rax, JVM_ACC_SYNCHRONIZED);
-      __ jcc(Assembler::zero, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-    const Address monitor_block_top(rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  // work registers
-  const Register method = rbx;
-  const Register t      = r11;
-
-  // allocate space for parameters
-  __ get_method(method);
-  __ movptr(t, Address(method, Method::const_offset()));
-  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
-  __ shll(t, Interpreter::logStackElementSize);
-
-  __ subptr(rsp, t);
-  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
-
-  // get signature handler
-  {
-    Label L;
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ testptr(t, t);
-    __ jcc(Assembler::notZero, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               method);
-    __ get_method(method);
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ bind(L);
-  }
-
-  // call signature handler
-  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
-          "adjust this code");
-
-  // The generated handlers do not touch RBX (the method oop).
-  // However, large signatures cannot be cached and are generated
-  // each time here.  The slow-path generator can do a GC on return,
-  // so we must reload it after the call.
-  __ call(t);
-  __ get_method(method);        // slow path can do a GC, reload RBX
-
-
-  // result handler is in rax
-  // set result handler
-  __ movptr(Address(rbp,
-                    (frame::interpreter_frame_result_handler_offset) * wordSize),
-            rax);
-
-  // pass mirror handle if static call
-  {
-    Label L;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_STATIC);
-    __ jcc(Assembler::zero, L);
-    // get mirror
-    __ movptr(t, Address(method, Method::const_offset()));
-    __ movptr(t, Address(t, ConstMethod::constants_offset()));
-    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(t, Address(t, mirror_offset));
-    // copy mirror into activation frame
-    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
-            t);
-    // pass handle to mirror
-    __ lea(c_rarg1,
-           Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
-    __ bind(L);
-  }
-
-  // get native function entry point
-  {
-    Label L;
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
-    __ movptr(rscratch2, unsatisfied.addr());
-    __ cmpptr(rax, rscratch2);
-    __ jcc(Assembler::notEqual, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               method);
-    __ get_method(method);
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    __ bind(L);
-  }
-
-  // pass JNIEnv
-  __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
-
-  // It is enough that the pc() points into the right code
-  // segment. It does not have to be the correct return pc.
-  __ set_last_Java_frame(rsp, rbp, (address) __ pc());
-
-  // change thread state
-#ifdef ASSERT
-  {
-    Label L;
-    __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
-    __ cmpl(t, _thread_in_Java);
-    __ jcc(Assembler::equal, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif
-
-  // Change state to native
-
-  __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
-          _thread_in_native);
-
-  // Call the native method.
-  __ call(rax);
-  // result potentially in rax or xmm0
-
-  // Verify or restore cpu control state after JNI call
-  __ restore_cpu_control_state_after_jni();
-
-  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
-  // in order to extract the result of a method call. If the order of these
-  // pushes change or anything else is added to the stack then the code in
-  // interpreter_frame_result must also change.
-
-  __ push(dtos);
-  __ push(ltos);
-
-  // change thread state
-  __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
-          _thread_in_native_trans);
-
-  if (os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ membar(Assembler::Membar_mask_bits(
-           Assembler::LoadLoad | Assembler::LoadStore |
-           Assembler::StoreLoad | Assembler::StoreStore));
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(r15_thread, rscratch2);
-    }
-  }
-
-  // check for safepoint operation in progress and/or pending suspend requests
-  {
-    Label Continue;
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    Label L;
-    __ jcc(Assembler::notEqual, L);
-    __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
-    __ jcc(Assembler::equal, Continue);
-    __ bind(L);
-
-    // Don't use call_VM as it will see a possible pending exception
-    // and forward it and never return here preventing us from
-    // clearing _last_native_pc down below.  Also can't use
-    // call_VM_leaf either as it will check to see if r13 & r14 are
-    // preserved and correspond to the bcp/locals pointers. So we do a
-    // runtime call by hand.
-    //
-    __ mov(c_rarg0, r15_thread);
-    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
-    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-    __ andptr(rsp, -16); // align stack as required by ABI
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
-    __ mov(rsp, r12); // restore sp
-    __ reinit_heapbase();
-    __ bind(Continue);
-  }
-
-  // change thread state
-  __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
-
-  // reset_last_Java_frame
-  __ reset_last_Java_frame(true, true);
-
-  // reset handle block
-  __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
-  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
-
-  // If result is an oop unbox and store it in frame where gc will see it
-  // and result handler will pick it up
-
-  {
-    Label no_oop, store_result;
-    __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
-    __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
-    __ jcc(Assembler::notEqual, no_oop);
-    // retrieve result
-    __ pop(ltos);
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, store_result);
-    __ movptr(rax, Address(rax, 0));
-    __ bind(store_result);
-    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
-    // keep stack depth as expected by pushing oop which will eventually be discarde
-    __ push(ltos);
-    __ bind(no_oop);
-  }
-
-
-  {
-    Label no_reguard;
-    __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
-            JavaThread::stack_guard_yellow_disabled);
-    __ jcc(Assembler::notEqual, no_reguard);
-
-    __ pusha(); // XXX only save smashed registers
-    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
-    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-    __ andptr(rsp, -16); // align stack as required by ABI
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
-    __ mov(rsp, r12); // restore sp
-    __ popa(); // XXX only restore smashed registers
-    __ reinit_heapbase();
-
-    __ bind(no_reguard);
-  }
-
-
-  // The method register is junk from after the thread_in_native transition
-  // until here.  Also can't call_VM until the bcp has been
-  // restored.  Need bcp for throwing exception below so get it now.
-  __ get_method(method);
-
-  // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
-  // r13 == code_base()
-  __ movptr(r13, Address(method, Method::const_offset()));   // get ConstMethod*
-  __ lea(r13, Address(r13, ConstMethod::codes_offset()));    // get codebase
-  // handle exceptions (exception handling will handle unlocking!)
-  {
-    Label L;
-    __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    // Note: At some point we may want to unify this with the code
-    // used in call_VM_base(); i.e., we should use the
-    // StubRoutines::forward_exception code. For now this doesn't work
-    // here because the rsp is not correctly set at this point.
-    __ MacroAssembler::call_VM(noreg,
-                               CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // do unlocking if necessary
-  {
-    Label L;
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::zero, L);
-    // the code below should be shared with interpreter macro
-    // assembler implementation
-    {
-      Label unlock;
-      // BasicObjectLock will be first in list, since this is a
-      // synchronized method. However, need to check that the object
-      // has not been unlocked by an explicit monitorexit bytecode.
-      const Address monitor(rbp,
-                            (intptr_t)(frame::interpreter_frame_initial_sp_offset *
-                                       wordSize - sizeof(BasicObjectLock)));
-
-      // monitor expect in c_rarg1 for slow unlock path
-      __ lea(c_rarg1, monitor); // address of first monitor
-
-      __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
-      __ testptr(t, t);
-      __ jcc(Assembler::notZero, unlock);
-
-      // Entry already unlocked, need to throw exception
-      __ MacroAssembler::call_VM(noreg,
-                                 CAST_FROM_FN_PTR(address,
-                   InterpreterRuntime::throw_illegal_monitor_state_exception));
-      __ should_not_reach_here();
-
-      __ bind(unlock);
-      __ unlock_object(c_rarg1);
-    }
-    __ bind(L);
-  }
-
-  // jvmti support
-  // Note: This must happen _after_ handling/throwing any exceptions since
-  //       the exception handler code notifies the runtime of method exits
-  //       too. If this happens before, method entry/exit notifications are
-  //       not properly paired (was bug - gri 11/22/99).
-  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
-
-  // restore potential result in edx:eax, call result handler to
-  // restore potential result in ST0 & handle result
-
-  __ pop(ltos);
-  __ pop(dtos);
-
-  __ movptr(t, Address(rbp,
-                       (frame::interpreter_frame_result_handler_offset) * wordSize));
-  __ call(t);
-
-  // remove activation
-  __ movptr(t, Address(rbp,
-                       frame::interpreter_frame_sender_sp_offset *
-                       wordSize)); // get sender sp
-  __ leave();                                // remove frame anchor
-  __ pop(rdi);                               // get return address
-  __ mov(rsp, t);                            // set sp to sender sp
-  __ jmp(rdi);
-
-  if (inc_counter) {
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-//
-// Generic interpreted method entry to (asm) interpreter
-//
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // ebx: Method*
-  // r13: sender sp
-  address entry_point = __ pc();
-
-  const Address constMethod(rbx, Method::const_offset());
-  const Address access_flags(rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rdx,
-                                   ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
-
-
-  // get parameter size (always needed)
-  __ movptr(rdx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // rbx: Method*
-  // rcx: size of parameters
-  // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
-
-  __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
-  __ subl(rdx, rcx); // rdx = no. of additional locals
-
-  // YYY
-//   __ incrementl(rdx);
-//   __ andl(rdx, -2);
-
-  // see if we've got enough room on the stack for locals plus overhead.
-  generate_stack_overflow_check();
-
-  // get return address
-  __ pop(rax);
-
-  // compute beginning of parameters (r14)
-  __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
-
-  // rdx - # of additional locals
-  // allocate space for locals
-  // explicitly initialize locals
-  {
-    Label exit, loop;
-    __ testl(rdx, rdx);
-    __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
-    __ bind(loop);
-    __ push((int) NULL_WORD); // initialize local variables
-    __ decrementl(rdx); // until everything initialized
-    __ jcc(Assembler::greater, loop);
-    __ bind(exit);
-  }
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(false);
-
-  // make sure method is not native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception
-  // handler would try to exit the monitor of synchronized methods
-  // which hasn't been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation
-  // will check this flag.
-
-  const Address do_not_unlock_if_synchronized(r15_thread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  __ profile_parameters_type(rax, rcx, rdx);
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow,
-                          &profile_method,
-                          &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  // check for synchronized interpreted methods
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    // Allocate monitor and lock method
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ movl(rax, access_flags);
-      __ testl(rax, JVM_ACC_SYNCHRONIZED);
-      __ jcc(Assembler::zero, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-     const Address monitor_block_top (rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  __ dispatch_next(vtos);
-
-  // invocation counter overflow
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      __ get_method(rbx);
-      __ jmp(profile_method_continue);
-    }
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     :
-      return false;
-    default:
-      return true;
-  }
-}
-
-// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-  const int entry_size = frame::interpreter_frame_monitor_size();
-
-  // total overhead size: entry_size + (saved rbp thru expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
-
-  const int stub_code = frame::entry_frame_after_call_words;
-  const int method_stack = (method->max_locals() + method->max_stack()) *
-                           Interpreter::stackElementWords;
-  return (overhead_size + method_stack + stub_code);
-}
-
-//-----------------------------------------------------------------------------
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  // Entry point in previous activation (i.e., if the caller was
-  // interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  // Restore sp to interpreter_frame_last_sp even though we are going
-  // to empty the expression stack for the exception processing.
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-  // rax: exception
-  // rdx: return address/pc that threw exception
-  __ restore_bcp();    // r13 points to call/send
-  __ restore_locals();
-  __ reinit_heapbase();  // restore r12 as heapbase.
-  // Entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  // expression stack is undefined here
-  // rax: exception
-  // r13: exception bcp
-  __ verify_oop(rax);
-  __ mov(c_rarg1, rax);
-
-  // expression stack must be empty before entering the VM in case of
-  // an exception
-  __ empty_expression_stack();
-  // find exception handler address and preserve exception oop
-  __ call_VM(rdx,
-             CAST_FROM_FN_PTR(address,
-                          InterpreterRuntime::exception_handler_for_exception),
-             c_rarg1);
-  // rax: exception handler entry point
-  // rdx: preserved exception oop
-  // r13: bcp for exception handler
-  __ push_ptr(rdx); // push exception which is now the only value on the stack
-  __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
-
-  // If the exception is not handled in the current frame the frame is
-  // removed and the exception is rethrown (i.e. exception
-  // continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction
-  // which caused the exception and the expression stack is
-  // empty. Thus, for any VM calls at this point, GC will find a legal
-  // oop map (with empty expression stack).
-
-  // In current activation
-  // tos: exception
-  // esi: exception bcp
-
-  //
-  // JVMTI PopFrame support
-  //
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  __ empty_expression_stack();
-  // Set the popframe_processing bit in pending_popframe_condition
-  // indicating that we are currently handling popframe, so that
-  // call_VMs that may happen later do not trigger new popframe
-  // handling cycles.
-  __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
-  __ orl(rdx, JavaThread::popframe_processing_bit);
-  __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::interpreter_contains), c_rarg1);
-    __ testl(rax, rax);
-    __ jcc(Assembler::notZero, caller_not_deoptimized);
-
-    // Compute size of arguments for saving when returning to
-    // deoptimized caller
-    __ get_method(rax);
-    __ movptr(rax, Address(rax, Method::const_offset()));
-    __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
-                                                size_of_parameters_offset())));
-    __ shll(rax, Interpreter::logStackElementSize);
-    __ restore_locals(); // XXX do we need this?
-    __ subptr(r14, rax);
-    __ addptr(r14, wordSize);
-    // Save these arguments
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                                           Deoptimization::
-                                           popframe_preserve_args),
-                          r15_thread, rax, r14);
-
-    __ remove_activation(vtos, rdx,
-                         /* throw_monitor_exception */ false,
-                         /* install_monitor_exception */ false,
-                         /* notify_jvmdi */ false);
-
-    // Inform deoptimization that it is responsible for restoring
-    // these arguments
-    __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
-            JavaThread::popframe_force_deopt_reexecution_bit);
-
-    // Continue in deoptimization handler
-    __ jmp(rdx);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false,
-                       /* notify_jvmdi */ false);
-
-  // Finish with popframe handling
-  // A previous I2C followed by a deoptimization might have moved the
-  // outgoing arguments further up the stack. PopFrame expects the
-  // mutations to those outgoing arguments to be preserved and other
-  // constraints basically require this frame to look exactly as
-  // though it had previously invoked an interpreted activation with
-  // no space between the top of the expression stack (current
-  // last_sp) and the top of stack. Rather than force deopt to
-  // maintain this kind of invariant all the time we call a small
-  // fixup routine to move the mutated arguments onto the top of our
-  // expression stack if necessary.
-  __ mov(c_rarg1, rsp);
-  __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // PC must point into interpreter here
-  __ set_last_Java_frame(noreg, rbp, __ pc());
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
-  __ reset_last_Java_frame(true, true);
-  // Restore the last_sp and null it out
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-
-  __ restore_bcp();  // XXX do we need this?
-  __ restore_locals(); // XXX do we need this?
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-  // Clear the popframe condition flag
-  __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
-          JavaThread::popframe_inactive);
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-    const Register local0 = r14;
-
-    __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
-    __ jcc(Assembler::notEqual, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ get_method(rdx);
-    __ movptr(rax, Address(local0, 0));
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
-
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, L_done);
-
-    __ movptr(Address(rbx, 0), rax);
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  __ dispatch_next(vtos);
-  // end of PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence
-  __ pop_ptr(rax);
-  __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
-  // remove the activation (without doing throws on illegalMonitorExceptions)
-  __ remove_activation(vtos, rdx, false, true, false);
-  // restore exception
-  __ get_vm_result(rax, r15_thread);
-
-  // In between activations - previous activation type unknown yet
-  // compute continuation point - the continuation point expects the
-  // following registers set up:
-  //
-  // rax: exception
-  // rdx: return address/pc that threw exception
-  // rsp: expression stack of caller
-  // rbp: ebp of caller
-  __ push(rax);                                  // save exception
-  __ push(rdx);                                  // save return address
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                          SharedRuntime::exception_handler_for_return_address),
-                        r15_thread, rdx);
-  __ mov(rbx, rax);                              // save exception handler
-  __ pop(rdx);                                   // restore return address
-  __ pop(rax);                                   // restore exception
-  // Note that an "issuing PC" is actually the next PC after the call
-  __ jmp(rbx);                                   // jump to exception
-                                                 // handler of caller
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ empty_expression_stack();
-  __ load_earlyret_value(state);
-
-  __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
-  Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
-
-  __ remove_activation(state, rsi,
-                       false, /* throw_monitor_exception */
-                       false, /* install_monitor_exception */
-                       true); /* notify_jvmdi */
-  __ jmp(rsi);
-
-  return entry;
-} // end of ForceEarlyReturn support
-
-
-//-----------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
-                                                         address& bep,
-                                                         address& cep,
-                                                         address& sep,
-                                                         address& aep,
-                                                         address& iep,
-                                                         address& lep,
-                                                         address& fep,
-                                                         address& dep,
-                                                         address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  aep = __ pc();  __ push_ptr();   __ jmp(L);
-  fep = __ pc();  __ push_f(xmm0); __ jmp(L);
-  dep = __ pc();  __ push_d(xmm0); __ jmp(L);
-  lep = __ pc();  __ push_l();     __ jmp(L);
-  bep = cep = sep =
-  iep = __ pc();  __ push_i();
-  vep = __ pc();
-  __ bind(L);
-  generate_and_dispatch(t);
-}
-
-
-//-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-//-----------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  __ push(state);
-  __ push(c_rarg0);
-  __ push(c_rarg1);
-  __ push(c_rarg2);
-  __ push(c_rarg3);
-  __ mov(c_rarg2, rax);  // Pass itos
-#ifdef _WIN64
-  __ movflt(xmm3, xmm0); // Pass ftos
-#endif
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
-             c_rarg1, c_rarg2, c_rarg3);
-  __ pop(c_rarg3);
-  __ pop(c_rarg2);
-  __ pop(c_rarg1);
-  __ pop(c_rarg0);
-  __ pop(state);
-  __ ret(0);                                   // return from result handler
-
-  return entry;
-}
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
-  __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
-  __ orl(rbx,
-         ((int) t->bytecode()) <<
-         BytecodePairHistogram::log2_number_of_codes);
-  __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
-  __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
-  __ incrementl(Address(rscratch1, rbx, Address::times_4));
-}
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-  __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
-  __ andptr(rsp, -16); // align stack as required by ABI
-  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
-  __ mov(rsp, r12); // restore sp
-  __ reinit_heapbase();
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
-           StopInterpreterAt);
-  __ jcc(Assembler::notEqual, L);
-  __ int3();
-  __ bind(L);
-}
-#endif // !PRODUCT
-#endif // ! CC_INTERP
--- a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -43,8 +43,8 @@
 #define __ _masm->
 
 // Global Register Names
-Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
-Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
+static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
+static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
 
 // Platform-dependent initialization
 void TemplateTable::pd_initialize() {
--- a/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -74,7 +73,3 @@
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
   return true;
 }
-
-void Deoptimization::unwind_callee_save_values(frame* f,
-                                               vframeArray* vframe_array) {
-}
--- a/hotspot/src/os/aix/vm/globals_aix.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/globals_aix.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -29,37 +29,61 @@
 //
 // Defines Aix specific flags. They are not available on other platforms.
 //
+// (Please keep the switches sorted alphabetically.)
 #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
                                                                                     \
+  /* Whether to allow the VM to run if EXTSHM=ON. EXTSHM is an environment */       \
+  /* variable used on AIX to activate certain hacks which allow more shm segments */\
+  /* for 32bit processes. For 64bit processes, it is pointless and may have */      \
+  /* harmful side effects (e.g. for some reasonn prevents allocation of 64k pages */\
+  /* via shmctl). */                                                                \
+  /* Per default we quit with an error if that variable is found; for certain */    \
+  /* customer scenarios, we may want to be able to run despite that variable. */    \
+  product(bool, AllowExtshm, false,                                                 \
+          "Allow VM to run with EXTSHM=ON.")                                        \
+                                                                                    \
+  product(intx, AttachListenerTimeout, 1000,                                        \
+          "Timeout in ms the attach listener waits for a request")                  \
+          range(0, 2147483)                                                         \
+                                                                                    \
+  /*  Maximum expected size of the data segment. That correlates with the      */   \
+  /*  to the maximum C Heap consumption we expect.                             */   \
+  /*  We need to know this because we need to leave "breathing space" for the  */   \
+  /*  data segment when placing the java heap. If that space is too small, we  */   \
+  /*  reduce our chance of getting a low heap address (needed for compressed   */   \
+  /*  Oops).                                                                   */   \
+  product(uintx, MaxExpectedDataSegmentSize, (SIZE_4G * 2),                         \
+          "Maximum expected Data Segment Size.")                                    \
+                                                                                    \
+  /* Use optimized addresses for the polling page.                             */   \
+  product(bool, OptimizePollingPageLocation, true,                                  \
+             "Optimize the location of the polling page used for Safepoints")       \
+                                                                                    \
   /* Use 64K pages for virtual memory (shmat). */                                   \
   product(bool, Use64KPages, true,                                                  \
           "Use 64K pages if available.")                                            \
                                                                                     \
-  /* If UseLargePages == true allow or deny usage of 16M pages. 16M pages are  */   \
-  /* a scarce resource and there may be situations where we do not want the VM */   \
-  /* to run with 16M pages. (Will fall back to 64K pages).                     */   \
-  product_pd(bool, Use16MPages,                                                     \
-             "Use 16M pages if available.")                                         \
+  /*  If VM uses 64K paged memory (shmat) for virtual memory: threshold below  */   \
+  /*  which virtual memory allocations are done with 4K memory (mmap). This is */   \
+  /*  mainly for test purposes.                                                */   \
+  develop(uintx, Use64KPagesThreshold, 0,                                           \
+          "4K/64K page allocation threshold.")                                      \
                                                                                     \
-  /*  use optimized addresses for the polling page, */                              \
-  /* e.g. map it to a special 32-bit address.       */                              \
-  product_pd(bool, OptimizePollingPageLocation,                                     \
-             "Optimize the location of the polling page used for Safepoints")       \
-                                                                                    \
-  product_pd(intx, AttachListenerTimeout,                                           \
-             "Timeout in ms the attach listener waits for a request")               \
-             range(0, 2147483)                                                      \
+  /* Normally AIX commits memory on touch, but sometimes it is helpful to have */   \
+  /* explicit commit behaviour. This flag, if true, causes the VM to touch     */   \
+  /* memory on os::commit_memory() (which normally is a noop).                 */   \
+  product(bool, UseExplicitCommit, false,                                           \
+          "Explicit commit for virtual memory.")                                    \
                                                                                     \
 
-// Per default, do not allow 16M pages. 16M pages have to be switched on specifically.
-define_pd_global(bool, Use16MPages, false);
-define_pd_global(bool, OptimizePollingPageLocation, true);
-define_pd_global(intx, AttachListenerTimeout, 1000);
 
 //
 // Defines Aix-specific default values. The flags are available on all
 // platforms, but they may have different default values on other platforms.
 //
+
+// UseLargePages means nothing, for now, on AIX.
+// Use Use64KPages or Use16MPages instead.
 define_pd_global(bool, UseLargePages, false);
 define_pd_global(bool, UseLargePagesIndividualAllocation, false);
 define_pd_global(bool, UseOSErrorReporting, false);
--- a/hotspot/src/os/aix/vm/jvm_aix.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/jvm_aix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -109,92 +109,3 @@
   return JNI_TRUE;
 JVM_END
 
-/*
-  All the defined signal names for Linux.
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  STOP, and Linux simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-    HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
-    CLD, CHLD, CONT, TSTP, TTIN, TTOU, URG, XCPU, XFSZ, VTALRM, PROF,
-    WINCH, POLL, IO, PWR, SYS
-
-*/
-
-struct siglabel {
-  const char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] = {
-  /* derived from /usr/include/bits/signum.h on RH7.2 */
-   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
-  "INT",        SIGINT,         /* Interrupt (ANSI).  */
-  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
-  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
-  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
-  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
-  "IOT",        SIGIOT,         /* IOT trap (4.2 BSD).  */
-  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
-  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
-  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
-  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
-  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
-  "USR2",       SIGUSR2,        /* User-defined signal 2 (POSIX).  */
-  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
-  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
-  "TERM",       SIGTERM,        /* Termination (ANSI).  */
-#ifdef SIGSTKFLT
-  "STKFLT",     SIGSTKFLT,      /* Stack fault.  */
-#endif
-  "CLD",        SIGCLD,         /* Same as SIGCHLD (System V).  */
-  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
-  "CONT",       SIGCONT,        /* Continue (POSIX).  */
-  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
-  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
-  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
-  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
-  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
-  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
-  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
-  "DANGER",     SIGDANGER,      /* System crash imminent; free up some page space (AIX). */
-  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
-  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
-  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
-  "POLL",       SIGPOLL,        /* Pollable event occurred (System V).  */
-  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
-  "PWR",        SIGPWR,         /* Power failure restart (System V).  */
-#ifdef SIGSYS
-  "SYS",        SIGSYS          /* Bad system call. Only on some Linuxen! */
-#endif
-  };
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  /* find and return the named signal's number */
-
-  for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-
-  return -1;
-
-JVM_END
-
-// used by os::exception_name()
-extern bool signal_name(int signo, char* buf, size_t len) {
-  for(uint i = 0; i < ARRAY_SIZE(siglabels); i++) {
-    if (signo == siglabels[i].number) {
-      jio_snprintf(buf, len, "SIG%s", siglabels[i].name);
-      return true;
-    }
-  }
-  return false;
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os/aix/vm/libo4.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// This is only a stub. Will flesh out later when/if we add further support
+// for PASE.
+
+#include "libo4.hpp"
+
+bool libo4::init() { return false; }
+void libo4::cleanup() {}
+bool libo4::get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
+  unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free) {
+  return false;
+}
+bool libo4::get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15) { return false; }
+bool libo4::realpath (const char* file_name, char* resolved_name, int resolved_name_len) { return false; }
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os/aix/vm/libo4.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// A C++ wrapper around the libo4 porting library. The libo4 porting library
+// is a set of bridge functions into native AS/400 functionality.
+
+#ifndef OS_AIX_VM_LIBO4_HPP
+#define OS_AIX_VM_LIBO4_HPP
+
+
+class libo4 {
+public:
+
+  // Initialize the libo4 porting library.
+  // Returns true if succeeded, false if error.
+  static bool init();
+
+  // cleanup of the libo4 porting library.
+  static void cleanup();
+
+  // returns a number of memory statistics from the
+  // AS/400.
+  //
+  // Specify NULL for numbers you are not interested in.
+  //
+  // returns false if an error happened. Activate OsMisc trace for
+  // trace output.
+  //
+  static bool get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
+    unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free);
+
+  // returns information about system load
+  // (similar to "loadavg()" under other Unices)
+  //
+  // Specify NULL for numbers you are not interested in.
+  //
+  // returns false if an error happened. Activate OsMisc trace for
+  // trace output.
+  //
+  static bool get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15);
+
+  // this is a replacement for the "realpath()" API which does not really work
+  // on PASE
+  //
+  // Specify NULL for numbers you are not interested in.
+  //
+  // returns false if an error happened. Activate OsMisc trace for
+  // trace output.
+  //
+  static bool realpath (const char* file_name,
+      char* resolved_name, int resolved_name_len);
+
+};
+
+#endif // OS_AIX_VM_LIBO4_HPP
+
--- a/hotspot/src/os/aix/vm/libperfstat_aix.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/libperfstat_aix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,49 +22,50 @@
  *
  */
 
-#include "runtime/arguments.hpp"
 #include "libperfstat_aix.hpp"
+#include "misc_aix.hpp"
 
-// For dlopen and friends
-#include <fcntl.h>
+#include <dlfcn.h>
+#include <sys/systemcfg.h>
 
-// handle to the libperfstat
+// Handle to the libperfstat.
 static void* g_libhandle = NULL;
 
-// whether initialization worked
-static bool g_initialized = false;
-
-
-typedef int (*fun_perfstat_cpu_total_t) (perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+typedef int (*fun_perfstat_cpu_total_t) (perfstat_id_t *name, PERFSTAT_CPU_TOTAL_T_LATEST* userbuff,
                                          int sizeof_userbuff, int desired_number);
 
 typedef int (*fun_perfstat_memory_total_t) (perfstat_id_t *name, perfstat_memory_total_t* userbuff,
                                             int sizeof_userbuff, int desired_number);
 
+typedef int (*fun_perfstat_partition_total_t) (perfstat_id_t *name,
+    PERFSTAT_PARTITON_TOTAL_T_LATEST* userbuff, int sizeof_userbuff,
+    int desired_number);
+
+typedef int (*fun_perfstat_wpar_total_t) (perfstat_id_wpar_t *name,
+    PERFSTAT_WPAR_TOTAL_T_LATEST* userbuff, int sizeof_userbuff,
+    int desired_number);
+
 typedef void (*fun_perfstat_reset_t) ();
 
+typedef cid_t (*fun_wpar_getcid_t) ();
+
 static fun_perfstat_cpu_total_t     g_fun_perfstat_cpu_total = NULL;
 static fun_perfstat_memory_total_t  g_fun_perfstat_memory_total = NULL;
+static fun_perfstat_partition_total_t g_fun_perfstat_partition_total = NULL;
+static fun_perfstat_wpar_total_t    g_fun_perfstat_wpar_total = NULL;
 static fun_perfstat_reset_t         g_fun_perfstat_reset = NULL;
+static fun_wpar_getcid_t            g_fun_wpar_getcid = NULL;
 
 bool libperfstat::init() {
 
-  if (g_initialized) {
-    return true;
-  }
-
-  g_initialized = false;
-
-  // dynamically load the libperfstat porting library.
+  // Dynamically load the libperfstat porting library.
   g_libhandle = dlopen("/usr/lib/libperfstat.a(shr_64.o)", RTLD_MEMBER | RTLD_NOW);
   if (!g_libhandle) {
-    if (Verbose) {
-      fprintf(stderr, "Cannot load libperfstat.a (dlerror: %s)", dlerror());
-    }
+    trcVerbose("Cannot load libperfstat.a (dlerror: %s)", dlerror());
     return false;
   }
 
-  // resolve function pointers
+  // Resolve function pointers
 
 #define RESOLVE_FUN_NO_ERROR(name) \
   g_fun_##name = (fun_##name##_t) dlsym(g_libhandle, #name);
@@ -72,26 +73,28 @@
 #define RESOLVE_FUN(name) \
   RESOLVE_FUN_NO_ERROR(name) \
   if (!g_fun_##name) { \
-    if (Verbose) { \
-      fprintf(stderr, "Cannot resolve " #name "() from libperfstat.a\n" \
+    trcVerbose("Cannot resolve " #name "() from libperfstat.a\n" \
                       "   (dlerror: %s)", dlerror()); \
-      } \
     return false; \
   }
 
+  // These functions may or may not be there depending on the OS release.
+  RESOLVE_FUN_NO_ERROR(perfstat_partition_total);
+  RESOLVE_FUN_NO_ERROR(perfstat_wpar_total);
+  RESOLVE_FUN_NO_ERROR(wpar_getcid);
+
+  // These functions are required for every release.
   RESOLVE_FUN(perfstat_cpu_total);
   RESOLVE_FUN(perfstat_memory_total);
   RESOLVE_FUN(perfstat_reset);
 
-  g_initialized = true;
+  trcVerbose("libperfstat loaded.");
 
   return true;
 }
 
 void libperfstat::cleanup() {
 
-  g_initialized = false;
-
   if (g_libhandle) {
     dlclose(g_libhandle);
     g_libhandle = NULL;
@@ -99,26 +102,250 @@
 
   g_fun_perfstat_cpu_total = NULL;
   g_fun_perfstat_memory_total = NULL;
+  g_fun_perfstat_partition_total = NULL;
+  g_fun_perfstat_wpar_total = NULL;
   g_fun_perfstat_reset = NULL;
+  g_fun_wpar_getcid = NULL;
+
 }
 
 int libperfstat::perfstat_memory_total(perfstat_id_t *name,
                                        perfstat_memory_total_t* userbuff,
                                        int sizeof_userbuff, int desired_number) {
-  assert(g_initialized, "libperfstat not initialized");
-  assert(g_fun_perfstat_memory_total, "");
+  if (g_fun_perfstat_memory_total == NULL) {
+    return -1;
+  }
   return g_fun_perfstat_memory_total(name, userbuff, sizeof_userbuff, desired_number);
 }
 
-int libperfstat::perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+int libperfstat::perfstat_cpu_total(perfstat_id_t *name, PERFSTAT_CPU_TOTAL_T_LATEST* userbuff,
                                     int sizeof_userbuff, int desired_number) {
-  assert(g_initialized, "libperfstat not initialized");
-  assert(g_fun_perfstat_cpu_total, "");
+  if (g_fun_perfstat_cpu_total == NULL) {
+    return -1;
+  }
   return g_fun_perfstat_cpu_total(name, userbuff, sizeof_userbuff, desired_number);
 }
 
+int libperfstat::perfstat_partition_total(perfstat_id_t *name, PERFSTAT_PARTITON_TOTAL_T_LATEST* userbuff,
+                                          int sizeof_userbuff, int desired_number) {
+  if (g_fun_perfstat_partition_total == NULL) {
+    return -1;
+  }
+  return g_fun_perfstat_partition_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
+int libperfstat::perfstat_wpar_total(perfstat_id_wpar_t *name, PERFSTAT_WPAR_TOTAL_T_LATEST* userbuff,
+                                     int sizeof_userbuff, int desired_number) {
+  if (g_fun_perfstat_wpar_total == NULL) {
+    return -1;
+  }
+  return g_fun_perfstat_wpar_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
 void libperfstat::perfstat_reset() {
-  assert(g_initialized, "libperfstat not initialized");
-  assert(g_fun_perfstat_reset, "");
-  g_fun_perfstat_reset();
+  if (g_fun_perfstat_reset != NULL) {
+    g_fun_perfstat_reset();
+  }
+}
+
+cid_t libperfstat::wpar_getcid() {
+  if (g_fun_wpar_getcid == NULL) {
+    return (cid_t) -1;
+  }
+  return g_fun_wpar_getcid();
+}
+
+
+//////////////////// convenience functions, release-independent /////////////////////////////
+
+// Excerpts from systemcfg.h definitions newer than AIX 5.3 (our oldest build platform)
+
+#define PV_6 0x100000          /* Power PC 6 */
+#define PV_6_1 0x100001        /* Power PC 6 DD1.x */
+#define PV_7 0x200000          /* Power PC 7 */
+#define PV_5_Compat 0x0F8000   /* Power PC 5 */
+#define PV_6_Compat 0x108000   /* Power PC 6 */
+#define PV_7_Compat 0x208000   /* Power PC 7 */
+#define PV_8 0x300000          /* Power PC 8 */
+#define PV_8_Compat 0x308000   /* Power PC 8 */
+
+
+// Retrieve global cpu information.
+bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
+
+  assert(pci, "get_cpuinfo: invalid parameter");
+  memset(pci, 0, sizeof(cpuinfo_t));
+
+  PERFSTAT_CPU_TOTAL_T_LATEST psct;
+  memset (&psct, '\0', sizeof(psct));
+
+  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(PERFSTAT_CPU_TOTAL_T_LATEST), 1)) {
+    if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
+      if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
+          trcVerbose("perfstat_cpu_total() failed (errno=%d)", errno);
+          return false;
+      }
+    }
+  }
+
+  // Global cpu information.
+  strcpy (pci->description, psct.description);
+  pci->processorHZ = psct.processorHZ;
+  pci->ncpus = psct.ncpus;
+  for (int i = 0; i < 3; i++) {
+    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
+  }
+
+  pci->user_clock_ticks = psct.user;
+  pci->sys_clock_ticks  = psct.sys;
+  pci->idle_clock_ticks = psct.idle;
+  pci->wait_clock_ticks = psct.wait;
+
+  // Get the processor version from _system_configuration.
+  switch (_system_configuration.version) {
+  case PV_8:
+    strcpy(pci->version, "Power PC 8");
+    break;
+  case PV_7:
+    strcpy(pci->version, "Power PC 7");
+    break;
+  case PV_6_1:
+    strcpy(pci->version, "Power PC 6 DD1.x");
+    break;
+  case PV_6:
+    strcpy(pci->version, "Power PC 6");
+    break;
+  case PV_5:
+    strcpy(pci->version, "Power PC 5");
+    break;
+  case PV_5_2:
+    strcpy(pci->version, "Power PC 5_2");
+    break;
+  case PV_5_3:
+    strcpy(pci->version, "Power PC 5_3");
+    break;
+  case PV_5_Compat:
+    strcpy(pci->version, "PV_5_Compat");
+    break;
+  case PV_6_Compat:
+    strcpy(pci->version, "PV_6_Compat");
+    break;
+  case PV_7_Compat:
+    strcpy(pci->version, "PV_7_Compat");
+    break;
+  case PV_8_Compat:
+    strcpy(pci->version, "PV_8_Compat");
+    break;
+  default:
+    strcpy(pci->version, "unknown");
+  }
+
+  return true;
 }
+
+// Retrieve partition information.
+bool libperfstat::get_partitioninfo(partitioninfo_t* ppi) {
+
+  assert(ppi, "get_partitioninfo: invalid parameter");
+  memset(ppi, 0, sizeof(partitioninfo_t));
+
+  PERFSTAT_PARTITON_TOTAL_T_LATEST pspt;
+  memset(&pspt, '\0', sizeof(pspt));
+
+  bool ame_details = true;
+
+  if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(PERFSTAT_PARTITON_TOTAL_T_LATEST), 1)) {
+    if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_71), 1)) {
+      ame_details = false;
+      if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_61), 1)) {
+        if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_53), 1)) {
+          if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_53_5), 1)) {
+            trcVerbose("perfstat_partition_total() failed (errno=%d)", errno);
+            return false;
+          }
+        }
+      }
+    }
+  }
+
+  // partition type info
+  ppi->shared_enabled = pspt.type.b.shared_enabled;
+  ppi->smt_capable = pspt.type.b.smt_capable;
+  ppi->smt_enabled = pspt.type.b.smt_enabled;
+  ppi->lpar_capable = pspt.type.b.lpar_capable;
+  ppi->lpar_enabled = pspt.type.b.lpar_enabled;
+  ppi->dlpar_capable = pspt.type.b.dlpar_capable;
+  ppi->capped = pspt.type.b.capped;
+  ppi->kernel_is_64 = pspt.type.b.kernel_is_64;
+  ppi->pool_util_authority = pspt.type.b.pool_util_authority;
+  ppi->donate_capable = pspt.type.b.donate_capable;
+  ppi->donate_enabled = pspt.type.b.donate_enabled;
+  ppi->ams_capable = pspt.type.b.ams_capable;
+  ppi->ams_enabled = pspt.type.b.ams_enabled;
+  ppi->power_save = pspt.type.b.power_save;
+  ppi->ame_enabled = pspt.type.b.ame_enabled;
+
+  // partition total info
+  ppi->online_cpus = pspt.online_cpus;
+  ppi->entitled_proc_capacity = pspt.entitled_proc_capacity;
+  ppi->var_proc_capacity_weight = pspt.var_proc_capacity_weight;
+  ppi->phys_cpus_pool = pspt.phys_cpus_pool;
+  ppi->pool_id = pspt.pool_id;
+  ppi->entitled_pool_capacity = pspt.entitled_pool_capacity;
+  strcpy(ppi->name, pspt.name);
+
+  // Added values to ppi that we need for later computation of cpu utilization
+  // ( pool authorization needed for pool_idle_time ??? )
+  ppi->timebase_last   = pspt.timebase_last;
+  ppi->pool_idle_time  = pspt.pool_idle_time;
+  ppi->pcpu_tics_user  = pspt.puser;
+  ppi->pcpu_tics_sys   = pspt.psys;
+  ppi->pcpu_tics_idle  = pspt.pidle;
+  ppi->pcpu_tics_wait  = pspt.pwait;
+
+  // Additional AME information.
+  if (ame_details) {
+    ppi->true_memory = pspt.true_memory * 4096;
+    ppi->expanded_memory = pspt.expanded_memory * 4096;
+    ppi->target_memexp_factr = pspt.target_memexp_factr;
+    ppi->current_memexp_factr = pspt.current_memexp_factr;
+    ppi->cmcs_total_time = pspt.cmcs_total_time;
+  }
+
+  return true;
+}
+
+// Retrieve wpar information.
+bool libperfstat::get_wparinfo(wparinfo_t* pwi) {
+
+  assert(pwi, "get_wparinfo: invalid parameter");
+  memset(pwi, 0, sizeof(wparinfo_t));
+
+  if (libperfstat::wpar_getcid() <= 0) {
+    return false;
+  }
+
+  PERFSTAT_WPAR_TOTAL_T_LATEST pswt;
+  memset (&pswt, '\0', sizeof(pswt));
+
+  if (-1 == libperfstat::perfstat_wpar_total(NULL, &pswt, sizeof(PERFSTAT_WPAR_TOTAL_T_LATEST), 1)) {
+    if (-1 == libperfstat::perfstat_wpar_total(NULL, &pswt, sizeof(perfstat_wpar_total_t_61), 1)) {
+      trcVerbose("perfstat_wpar_total() failed (errno=%d)", errno);
+      return false;
+    }
+  }
+
+  // WPAR type info.
+  pwi->app_wpar = pswt.type.b.app_wpar;
+  pwi->cpu_rset = pswt.type.b.cpu_rset;
+  pwi->cpu_xrset = pswt.type.b.cpu_xrset;
+  pwi->cpu_limits = pswt.type.b.cpu_limits;
+  pwi->mem_limits = pswt.type.b.mem_limits;
+  // WPAR total info.
+  strcpy(pwi->name, pswt.name);
+  pwi->wpar_id = pswt.wpar_id;
+  pwi->cpu_limit = pswt.cpu_limit;
+  pwi->mem_limit = pswt.mem_limit;
+
+  return true;
+}
--- a/hotspot/src/os/aix/vm/libperfstat_aix.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/libperfstat_aix.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -22,7 +22,7 @@
  *
  */
 
-// encapsulates the libperfstat library.
+// Encapsulates the libperfstat library.
 //
 // The purpose of this code is to dynamically load the libperfstat library
 // instead of statically linking against it. The libperfstat library is an
@@ -32,7 +32,732 @@
 #ifndef OS_AIX_VM_LIBPERFSTAT_AIX_HPP
 #define OS_AIX_VM_LIBPERFSTAT_AIX_HPP
 
-#include <libperfstat.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// These are excerpts from the AIX 5.3, 6.1, 7.1 libperfstat.h -
+// this is all we need from libperfstat.h and I want to avoid having to include <libperfstat.h>
+//
+// Note: I define all structures as if I were to include libperfstat.h on an AIX 5.2
+// build machine.
+//
+// The ratio behind that is that if I would build on an AIX 5.2 build machine,
+// include libperfstat.h and hard-link against libperfstat.a, the program should
+// work without recompilation on all newer AIX versions.
+//
+
+#define IDENTIFIER_LENGTH 64    /* length of strings included in the structures */
+
+
+typedef struct { /* structure element identifier */
+  char name[IDENTIFIER_LENGTH]; /* name of the identifier */
+} perfstat_id_t;
+
+#define CEC_ID_LEN 40           /* CEC identifier length */
+#define MAXCORRALNAMELEN 25     /* length of the wpar name */
+#define FIRST_WPARNAME ""       /* pseudo-name for the first WPAR */
+#define FIRST_WPARID -1         /* pseudo-id for the first WPAR */
+
+typedef unsigned short cid_t;   /* workload partition identifier */
+
+typedef struct { /* Virtual memory utilization */
+  u_longlong_t virt_total;    /* total virtual memory (in 4KB pages) */
+  u_longlong_t real_total;    /* total real memory (in 4KB pages) */
+  u_longlong_t real_free;     /* free real memory (in 4KB pages) */
+  u_longlong_t real_pinned;   /* real memory which is pinned (in 4KB pages) */
+  u_longlong_t real_inuse;    /* real memory which is in use (in 4KB pages) */
+  u_longlong_t pgbad;         /* number of bad pages */
+  u_longlong_t pgexct;        /* number of page faults */
+  u_longlong_t pgins;         /* number of pages paged in */
+  u_longlong_t pgouts;        /* number of pages paged out */
+  u_longlong_t pgspins;       /* number of page ins from paging space */
+  u_longlong_t pgspouts;      /* number of page outs from paging space */
+  u_longlong_t scans;         /* number of page scans by clock */
+  u_longlong_t cycles;        /* number of page replacement cycles */
+  u_longlong_t pgsteals;      /* number of page steals */
+  u_longlong_t numperm;       /* number of frames used for files (in 4KB pages) */
+  u_longlong_t pgsp_total;    /* total paging space (in 4KB pages) */
+  u_longlong_t pgsp_free;     /* free paging space (in 4KB pages) */
+  u_longlong_t pgsp_rsvd;     /* reserved paging space (in 4KB pages) */
+  u_longlong_t real_system;   /* real memory used by system segments (in 4KB pages). This is the sum of all the used pages in segment marked for system usage.
+                               * Since segment classifications are not always guaranteed to be accurate, this number is only an approximation. */
+  u_longlong_t real_user;     /* real memory used by non-system segments (in 4KB pages). This is the sum of all pages used in segments not marked for system usage.
+                               * Since segment classifications are not always guaranteed to be accurate, this number is only an approximation. */
+  u_longlong_t real_process;  /* real memory used by process segments (in 4KB pages). This is real_total-real_free-numperm-real_system. Since real_system is an
+                               * approximation, this number is too. */
+  u_longlong_t virt_active;   /* Active virtual pages. Virtual pages are considered active if they have been accessed */
+
+} perfstat_memory_total_t;
+
+typedef struct { /* global cpu information AIX 5.3 < TL10 */
+  int ncpus;                            /* number of active logical processors */
+  int ncpus_cfg;                        /* number of configured processors */
+  char description[IDENTIFIER_LENGTH];  /* processor description (type/official name) */
+  u_longlong_t processorHZ;             /* processor speed in Hz */
+  u_longlong_t user;                    /* raw total number of clock ticks spent in user mode */
+  u_longlong_t sys;                     /* raw total number of clock ticks spent in system mode */
+  u_longlong_t idle;                    /* raw total number of clock ticks spent idle */
+  u_longlong_t wait;                    /* raw total number of clock ticks spent waiting for I/O */
+  u_longlong_t pswitch;                 /* number of process switches (change in currently running process) */
+  u_longlong_t syscall;                 /* number of system calls executed */
+  u_longlong_t sysread;                 /* number of read system calls executed */
+  u_longlong_t syswrite;                /* number of write system calls executed */
+  u_longlong_t sysfork;                 /* number of forks system calls executed */
+  u_longlong_t sysexec;                 /* number of execs system calls executed */
+  u_longlong_t readch;                  /* number of characters tranferred with read system call */
+  u_longlong_t writech;                 /* number of characters tranferred with write system call */
+  u_longlong_t devintrs;                /* number of device interrupts */
+  u_longlong_t softintrs;               /* number of software interrupts */
+  time_t lbolt;                         /* number of ticks since last reboot */
+  u_longlong_t loadavg[3];              /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                               * To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+  u_longlong_t runque;                  /* length of the run queue (processes ready) */
+  u_longlong_t swpque;                  /* ength of the swap queue (processes waiting to be paged in) */
+  u_longlong_t bread;                   /* number of blocks read */
+  u_longlong_t bwrite;                  /* number of blocks written */
+  u_longlong_t lread;                   /* number of logical read requests */
+  u_longlong_t lwrite;                  /* number of logical write requests */
+  u_longlong_t phread;                  /* number of physical reads (reads on raw devices) */
+  u_longlong_t phwrite;                 /* number of physical writes (writes on raw devices) */
+  u_longlong_t runocc;                  /* updated whenever runque is updated, i.e. the runqueue is occupied.
+                                               * This can be used to compute the simple average of ready processes  */
+  u_longlong_t swpocc;                  /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
+                                               * This can be used to compute the simple average processes waiting to be paged in */
+  u_longlong_t iget;                    /* number of inode lookups */
+  u_longlong_t namei;                   /* number of vnode lookup from a path name */
+  u_longlong_t dirblk;                  /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+  u_longlong_t msg;                     /* number of IPC message operations */
+  u_longlong_t sema;                    /* number of IPC semaphore operations */
+  u_longlong_t rcvint;                  /* number of tty receive interrupts */
+  u_longlong_t xmtint;                  /* number of tyy transmit interrupts */
+  u_longlong_t mdmint;                  /* number of modem interrupts */
+  u_longlong_t tty_rawinch;             /* number of raw input characters  */
+  u_longlong_t tty_caninch;             /* number of canonical input characters (always zero) */
+  u_longlong_t tty_rawoutch;            /* number of raw output characters */
+  u_longlong_t ksched;                  /* number of kernel processes created */
+  u_longlong_t koverf;                  /* kernel process creation attempts where:
+                                               * -the user has forked to their maximum limit
+                                               * -the configuration limit of processes has been reached */
+  u_longlong_t kexit;                   /* number of kernel processes that became zombies */
+  u_longlong_t rbread;                  /* number of remote read requests */
+  u_longlong_t rcread;                  /* number of cached remote reads */
+  u_longlong_t rbwrt;                   /* number of remote writes */
+  u_longlong_t rcwrt;                   /* number of cached remote writes */
+  u_longlong_t traps;                   /* number of traps */
+  int ncpus_high;                       /* index of highest processor online */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t decrintrs;               /* number of decrementer tics interrupts */
+  u_longlong_t mpcrintrs;               /* number of mpc's received interrupts */
+  u_longlong_t mpcsintrs;               /* number of mpc's sent interrupts */
+  u_longlong_t phantintrs;              /* number of phantom interrupts */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  short iowait;                         /* number of processes that are asleep waiting for buffered I/O */
+  short physio;                         /* number of processes waiting for raw I/O */
+  longlong_t twait;                     /* number of threads that are waiting for filesystem direct(cio) */
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds) */
+} perfstat_cpu_total_t_53;
+
+typedef struct { /* global cpu information AIX 6.1|5.3 > TL09 */
+  int ncpus;                            /* number of active logical processors */
+  int ncpus_cfg;                        /* number of configured processors */
+  char description[IDENTIFIER_LENGTH];  /* processor description (type/official name) */
+  u_longlong_t processorHZ;             /* processor speed in Hz */
+  u_longlong_t user;                    /* raw total number of clock ticks spent in user mode */
+  u_longlong_t sys;                     /* raw total number of clock ticks spent in system mode */
+  u_longlong_t idle;                    /* raw total number of clock ticks spent idle */
+  u_longlong_t wait;                    /* raw total number of clock ticks spent waiting for I/O */
+  u_longlong_t pswitch;                 /* number of process switches (change in currently running process) */
+  u_longlong_t syscall;                 /* number of system calls executed */
+  u_longlong_t sysread;                 /* number of read system calls executed */
+  u_longlong_t syswrite;                /* number of write system calls executed */
+  u_longlong_t sysfork;                 /* number of forks system calls executed */
+  u_longlong_t sysexec;                 /* number of execs system calls executed */
+  u_longlong_t readch;                  /* number of characters tranferred with read system call */
+  u_longlong_t writech;                 /* number of characters tranferred with write system call */
+  u_longlong_t devintrs;                /* number of device interrupts */
+  u_longlong_t softintrs;               /* number of software interrupts */
+  time_t lbolt;                         /* number of ticks since last reboot */
+  u_longlong_t loadavg[3];              /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                               * To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+  u_longlong_t runque;                  /* length of the run queue (processes ready) */
+  u_longlong_t swpque;                  /* length of the swap queue (processes waiting to be paged in) */
+  u_longlong_t bread;                   /* number of blocks read */
+  u_longlong_t bwrite;                  /* number of blocks written */
+  u_longlong_t lread;                   /* number of logical read requests */
+  u_longlong_t lwrite;                  /* number of logical write requests */
+  u_longlong_t phread;                  /* number of physical reads (reads on raw devices) */
+  u_longlong_t phwrite;                 /* number of physical writes (writes on raw devices) */
+  u_longlong_t runocc;                  /* updated whenever runque is updated, i.e. the runqueue is occupied.
+                                               * This can be used to compute the simple average of ready processes  */
+  u_longlong_t swpocc;                  /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
+                                               * This can be used to compute the simple average processes waiting to be paged in */
+  u_longlong_t iget;                    /* number of inode lookups */
+  u_longlong_t namei;                   /* number of vnode lookup from a path name */
+  u_longlong_t dirblk;                  /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+  u_longlong_t msg;                     /* number of IPC message operations */
+  u_longlong_t sema;                    /* number of IPC semaphore operations */
+  u_longlong_t rcvint;                  /* number of tty receive interrupts */
+  u_longlong_t xmtint;                  /* number of tyy transmit interrupts */
+  u_longlong_t mdmint;                  /* number of modem interrupts */
+  u_longlong_t tty_rawinch;             /* number of raw input characters  */
+  u_longlong_t tty_caninch;             /* number of canonical input characters (always zero) */
+  u_longlong_t tty_rawoutch;            /* number of raw output characters */
+  u_longlong_t ksched;                  /* number of kernel processes created */
+  u_longlong_t koverf;                  /* kernel process creation attempts where:
+                                               * -the user has forked to their maximum limit
+                                               * -the configuration limit of processes has been reached */
+  u_longlong_t kexit;                   /* number of kernel processes that became zombies */
+  u_longlong_t rbread;                  /* number of remote read requests */
+  u_longlong_t rcread;                  /* number of cached remote reads */
+  u_longlong_t rbwrt;                   /* number of remote writes */
+  u_longlong_t rcwrt;                   /* number of cached remote writes */
+  u_longlong_t traps;                   /* number of traps */
+  int ncpus_high;                       /* index of highest processor online */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t decrintrs;               /* number of decrementer tics interrupts */
+  u_longlong_t mpcrintrs;               /* number of mpc's received interrupts */
+  u_longlong_t mpcsintrs;               /* number of mpc's sent interrupts */
+  u_longlong_t phantintrs;              /* number of phantom interrupts */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  short iowait;                         /* number of processes that are asleep waiting for buffered I/O */
+  short physio;                         /* number of processes waiting for raw I/O */
+  longlong_t twait;                     /* number of threads that are waiting for filesystem direct(cio) */
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds) */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+} perfstat_cpu_total_t_61;
+
+typedef struct { /* global cpu information AIX 7.1 */
+  int ncpus;                            /* number of active logical processors */
+  int ncpus_cfg;                        /* number of configured processors */
+  char description[IDENTIFIER_LENGTH];  /* processor description (type/official name) */
+  u_longlong_t processorHZ;             /* processor speed in Hz */
+  u_longlong_t user;                    /* raw total number of clock ticks spent in user mode */
+  u_longlong_t sys;                     /* raw total number of clock ticks spent in system mode */
+  u_longlong_t idle;                    /* raw total number of clock ticks spent idle */
+  u_longlong_t wait;                    /* raw total number of clock ticks spent waiting for I/O */
+  u_longlong_t pswitch;                 /* number of process switches (change in currently running process) */
+  u_longlong_t syscall;                 /* number of system calls executed */
+  u_longlong_t sysread;                 /* number of read system calls executed */
+  u_longlong_t syswrite;                /* number of write system calls executed */
+  u_longlong_t sysfork;                 /* number of forks system calls executed */
+  u_longlong_t sysexec;                 /* number of execs system calls executed */
+  u_longlong_t readch;                  /* number of characters tranferred with read system call */
+  u_longlong_t writech;                 /* number of characters tranferred with write system call */
+  u_longlong_t devintrs;                /* number of device interrupts */
+  u_longlong_t softintrs;               /* number of software interrupts */
+  time_t lbolt;                         /* number of ticks since last reboot */
+  u_longlong_t loadavg[3];              /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                               * To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+  u_longlong_t runque;                  /* length of the run queue (processes ready) */
+  u_longlong_t swpque;                  /* ength of the swap queue (processes waiting to be paged in) */
+  u_longlong_t bread;                   /* number of blocks read */
+  u_longlong_t bwrite;                  /* number of blocks written */
+  u_longlong_t lread;                   /* number of logical read requests */
+  u_longlong_t lwrite;                  /* number of logical write requests */
+  u_longlong_t phread;                  /* number of physical reads (reads on raw devices) */
+  u_longlong_t phwrite;                 /* number of physical writes (writes on raw devices) */
+  u_longlong_t runocc;                  /* updated whenever runque is updated, i.e. the runqueue is occupied.
+                                               * This can be used to compute the simple average of ready processes  */
+  u_longlong_t swpocc;                  /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
+                                               * This can be used to compute the simple average processes waiting to be paged in */
+  u_longlong_t iget;                    /* number of inode lookups */
+  u_longlong_t namei;                   /* number of vnode lookup from a path name */
+  u_longlong_t dirblk;                  /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+  u_longlong_t msg;                     /* number of IPC message operations */
+  u_longlong_t sema;                    /* number of IPC semaphore operations */
+  u_longlong_t rcvint;                  /* number of tty receive interrupts */
+  u_longlong_t xmtint;                  /* number of tyy transmit interrupts */
+  u_longlong_t mdmint;                  /* number of modem interrupts */
+  u_longlong_t tty_rawinch;             /* number of raw input characters  */
+  u_longlong_t tty_caninch;             /* number of canonical input characters (always zero) */
+  u_longlong_t tty_rawoutch;            /* number of raw output characters */
+  u_longlong_t ksched;                  /* number of kernel processes created */
+  u_longlong_t koverf;                  /* kernel process creation attempts where:
+                                               * -the user has forked to their maximum limit
+                                               * -the configuration limit of processes has been reached */
+  u_longlong_t kexit;                   /* number of kernel processes that became zombies */
+  u_longlong_t rbread;                  /* number of remote read requests */
+  u_longlong_t rcread;                  /* number of cached remote reads */
+  u_longlong_t rbwrt;                   /* number of remote writes */
+  u_longlong_t rcwrt;                   /* number of cached remote writes */
+  u_longlong_t traps;                   /* number of traps */
+  int ncpus_high;                       /* index of highest processor online */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t decrintrs;               /* number of decrementer tics interrupts */
+  u_longlong_t mpcrintrs;               /* number of mpc's received interrupts */
+  u_longlong_t mpcsintrs;               /* number of mpc's sent interrupts */
+  u_longlong_t phantintrs;              /* number of phantom interrupts */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  short iowait;                         /* number of processes that are asleep waiting for buffered I/O */
+  short physio;                         /* number of processes waiting for raw I/O */
+  longlong_t twait;                     /* number of threads that are waiting for filesystem direct(cio) */
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds) */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+  u_longlong_t  version;                /* version number (1, 2, etc.,) */
+/*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
+#define CURR_VERSION_CPU_TOTAL 1              /* Incremented by one for every new release *
+                                               * of perfstat_cpu_total_t data structure   */
+} perfstat_cpu_total_t_71;
+
+typedef union {
+  uint    w;
+  struct {
+          unsigned smt_capable :1;          /* OS supports SMT mode */
+          unsigned smt_enabled :1;          /* SMT mode is on */
+          unsigned lpar_capable :1;         /* OS supports logical partitioning */
+          unsigned lpar_enabled :1;         /* logical partitioning is on */
+          unsigned shared_capable :1;       /* OS supports shared processor LPAR */
+          unsigned shared_enabled :1;       /* partition runs in shared mode */
+          unsigned dlpar_capable :1;        /* OS supports dynamic LPAR */
+          unsigned capped :1;               /* partition is capped */
+          unsigned kernel_is_64 :1;         /* kernel is 64 bit */
+          unsigned pool_util_authority :1;  /* pool utilization available */
+          unsigned donate_capable :1;       /* capable of donating cycles */
+          unsigned donate_enabled :1;       /* enabled for donating cycles */
+          unsigned ams_capable:1;           /* 1 = AMS(Active Memory Sharing) capable, 0 = Not AMS capable */
+          unsigned ams_enabled:1;           /* 1 = AMS(Active Memory Sharing) enabled, 0 = Not AMS enabled */
+          unsigned power_save:1;            /* 1 = Power saving mode is enabled */
+          unsigned ame_enabled:1;           /* Active Memory Expansion is enabled */
+          unsigned shared_extended :1;
+          unsigned spare :15;               /* reserved for future usage */
+  } b;
+} perfstat_partition_type_t;
+
+typedef struct { /* partition total information AIX 5.3 < TL6 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+} perfstat_partition_total_t_53_5;
+
+typedef struct { /* partition total information AIX 5.3 < TL10 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+  u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+  u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+  u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+  u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+  u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+  u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+  int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+  int var_mem_weight;                   /* variable memory capacity weight */
+  u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+  u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+  u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+} perfstat_partition_total_t_53;
+
+typedef struct { /* partition total information AIX 6.1|5.3 > TL09 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+  u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+  u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+  u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+  u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+  u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+  u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+  int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+  int var_mem_weight;                   /* variable memory capacity weight */
+  u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+  u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+  u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+  uint online_lcpus;                    /* number of online logical cpus */
+  uint smt_thrds;                       /* number of hardware threads that are running */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+} perfstat_partition_total_t_61;
+
+typedef struct { /* partition total information AIX 7.1 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+  u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+  u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+  u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+  u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+  u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+  u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+  int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+  int var_mem_weight;                   /* variable memory capacity weight */
+  u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+  u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+  u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+  uint online_lcpus;                    /* number of online logical cpus */
+  uint smt_thrds;                       /* number of hardware threads that are running */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+  char hardwareid[CEC_ID_LEN];          /* CEC Identifier */
+        uint power_save_mode;                 /* Power save mode for the LPAR. Introduced through LI 53K PRF : Feature 728 292*/
+        ushort ame_version;                   /* AME Version */
+        u_longlong_t true_memory;             /* True Memory Size in 4KB pages */
+        u_longlong_t expanded_memory;         /* Expanded Memory Size in 4KB pages */
+        u_longlong_t target_memexp_factr;     /* Target Memory Expansion Factor scaled by 100 */
+        u_longlong_t current_memexp_factr;    /* Current Memory Expansion Factor scaled by 100 */
+        u_longlong_t target_cpool_size;       /* Target Compressed Pool Size in bytes */
+        u_longlong_t max_cpool_size;          /* Max Size of Compressed Pool in bytes */
+        u_longlong_t min_ucpool_size;         /* Min Size of Uncompressed Pool in bytes */
+        u_longlong_t ame_deficit_size;        /*Deficit memory size in bytes */
+        u_longlong_t version;                 /* version number (1, 2, etc.,) */
+        u_longlong_t cmcs_total_time;         /* Total CPU time spent due to active memory expansion */
+} perfstat_partition_total_t_71;
+
+typedef struct { /* partition total information AIX 7.1 >= TL1*/
+        char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+        perfstat_partition_type_t type;       /* set of bits describing the partition */
+        int lpar_id;                          /* logical partition identifier */
+        int group_id;                         /* identifier of the LPAR group this partition is a member of */
+        int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+        int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+        int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+        int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+        u_longlong_t online_memory;           /* amount of memory currently online */
+        u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+        u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+        int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+        int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+        int min_proc_capacity;                /* minimum number of processor units this partition must have */
+        int proc_capacity_increment;          /* increment value to the entitled capacity */
+        int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+        int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+        int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+        int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+        int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+        int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+        u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+        u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+        u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+        u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+        u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+        u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+        u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+        u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+        u_longlong_t timebase_last;           /* most recently cpu time base */
+        u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+        u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+        u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+        u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+        u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+        u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+        u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+        u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+        u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+        u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+        u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+        int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+        int var_mem_weight;                   /* variable memory capacity weight */
+        u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+        u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+        u_longlong_t hpi;                     /* number of hypervisor page-ins */
+        u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+        u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+        uint online_lcpus;                    /* number of online logical cpus */
+        uint smt_thrds;                       /* number of hardware threads that are running */
+        u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+        u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+        u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+        u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+        int spurrflag;                        /* set if running in spurr mode */
+        char hardwareid[CEC_ID_LEN];          /* CEC Identifier */
+        uint power_save_mode;                 /* Power save mode for the LPAR. Introduced through LI 53K PRF : Feature 728 292*/
+        ushort ame_version;                   /* AME Version */
+        u_longlong_t true_memory;             /* True Memory Size in 4KB pages */
+        u_longlong_t expanded_memory;         /* Expanded Memory Size in 4KB pages */
+        u_longlong_t target_memexp_factr;     /* Target Memory Expansion Factor scaled by 100 */
+        u_longlong_t current_memexp_factr;    /* Current Memory Expansion Factor scaled by 100 */
+        u_longlong_t target_cpool_size;       /* Target Compressed Pool Size in bytes */
+        u_longlong_t max_cpool_size;          /* Max Size of Compressed Pool in bytes */
+        u_longlong_t min_ucpool_size;         /* Min Size of Uncompressed Pool in bytes */
+        u_longlong_t ame_deficit_size;        /*Deficit memory size in bytes */
+        u_longlong_t version;                 /* version number (1, 2, etc.,) */
+        u_longlong_t cmcs_total_time;         /* Total CPU time spent due to active memory expansion */
+        u_longlong_t purr_coalescing;         /* If the calling partition is authorized to see pool wide statistics then PURR cycles consumed to coalesce data else set to zero.*/
+        u_longlong_t spurr_coalescing;        /* If the calling partition is authorized to see pool wide statistics then SPURR cycles consumed to coalesce data else set to zero.*/
+        u_longlong_t MemPoolSize;             /* Indicates the memory pool size of the pool that the partition belongs to (in bytes)., mpsz */
+        u_longlong_t IOMemEntInUse;           /* I/O memory entitlement of the LPAR in use in bytes. iomu */
+        u_longlong_t IOMemEntFree;            /* free I/O memory entitlement in bytes.  iomf */
+        u_longlong_t IOHighWaterMark;         /* high water mark of I/O memory entitlement usage in bytes. iohwn */
+        u_longlong_t purr_counter;            /* number of purr cycles spent in user + kernel mode */
+        u_longlong_t spurr_counter;           /* number of spurr cycles spent in user + kernel mode */
+
+        /* Marketing Requirement(MR): MR1124083744  */
+        u_longlong_t real_free;               /* free real memory (in 4KB pages) */
+        u_longlong_t real_avail;              /* number of pages available for user application (memfree + numperm - minperm - minfree) */
+        /*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
+#define CURR_VERSION_PARTITION_TOTAL 5        /* Incremented by one for every new release     *
+                                               * of perfstat_partition_total_t data structure */
+} perfstat_partition_total_t_71_1;
+
+typedef union { /* WPAR Type & Flags */
+        uint    w;
+        struct {
+                unsigned app_wpar :1;        /* Application WPAR */
+                unsigned cpu_rset :1;        /* WPAR restricted to CPU resource set */
+                unsigned cpu_xrset:1;        /* WPAR restricted to CPU Exclusive resource set */
+                unsigned cpu_limits :1;      /* CPU resource limits enforced */
+                unsigned mem_limits :1;      /* Memory resource limits enforced */
+                unsigned spare :27;          /* reserved for future usage */
+        } b;
+} perfstat_wpar_type_t;
+
+typedef struct { /* Workload partition Information AIX 5.3 & 6.1*/
+       char name[MAXCORRALNAMELEN+1]; /* name of the Workload Partition */
+       perfstat_wpar_type_t type;     /* set of bits describing the wpar */
+       cid_t wpar_id;                 /* workload partition identifier */
+       uint  online_cpus;             /* Number of Virtual CPUs in partition rset or  number of virtual CPUs currently online on the Global partition*/
+       int   cpu_limit;               /* CPU limit in 100ths of % - 1..10000 */
+       int   mem_limit;               /* Memory limit in 100ths of % - 1..10000 */
+       u_longlong_t online_memory;    /* amount of memory currently online in Global Partition */
+       int entitled_proc_capacity;    /* number of processor units this partition is entitled to receive */
+} perfstat_wpar_total_t_61;
+
+typedef struct { /* Workload partition Information AIX 7.1*/
+       char name[MAXCORRALNAMELEN+1]; /* name of the Workload Partition */
+       perfstat_wpar_type_t type;     /* set of bits describing the wpar */
+       cid_t wpar_id;                 /* workload partition identifier */
+       uint  online_cpus;             /* Number of Virtual CPUs in partition rset or  number of virtual CPUs currently online on the Global partition*/
+       int   cpu_limit;               /* CPU limit in 100ths of % - 1..10000 */
+       int   mem_limit;               /* Memory limit in 100ths of % - 1..10000 */
+       u_longlong_t online_memory;    /* amount of memory currently online in Global Partition */
+       int entitled_proc_capacity;    /* number of processor units this partition is entitled to receive */
+       u_longlong_t version;          /* version number (1, 2, etc.,)                  */
+/*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
+#define CURR_VERSION_WPAR_TOTAL 1     /* Incremented by one for every new release      *
+                                       * of perfstat_wpar_total_t data structure       */
+} perfstat_wpar_total_t_71;
+
+typedef void * rsethandle_t;  /* Type to identify a resource set handle: rsethandle_t */
+
+typedef enum { WPARNAME, WPARID, RSETHANDLE } wparid_specifier; /* Type of wparid_specifier */
+
+typedef struct { /* WPAR identifier */
+        wparid_specifier spec;  /* Specifier to choose wpar id or name */
+        union  {
+                cid_t wpar_id;                      /* WPAR ID */
+                rsethandle_t rset;                  /* Rset Handle */
+                char wparname[MAXCORRALNAMELEN+1];  /* WPAR NAME */
+        } u;
+        char name[IDENTIFIER_LENGTH]; /* name of the structure element identifier */
+} perfstat_id_wpar_t;
+
+
+
+// end: libperfstat.h (AIX 5.2, 5.3, 6.1, 7.1)
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define PERFSTAT_PARTITON_TOTAL_T_LATEST perfstat_partition_total_t_71_1/* latest perfstat_partition_total_t structure */
+#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_71             /* latest perfstat_cpu_total_t structure */
+#define PERFSTAT_WPAR_TOTAL_T_LATEST perfstat_wpar_total_t_71           /* latest perfstat_wpar_total_t structure */
 
 class libperfstat {
 
@@ -41,19 +766,107 @@
   // Load the libperfstat library (must be in LIBPATH).
   // Returns true if succeeded, false if error.
   static bool init();
-
-  // cleanup of the libo4 porting library.
   static void cleanup();
 
-  // direct wrappers for the libperfstat functionality. All they do is
+  // Direct wrappers for the libperfstat functionality. All they do is
   // to call the functions with the same name via function pointers.
-  static int perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+  // Get all available data also on newer AIX versions (PERFSTAT_CPU_TOTAL_T_LATEST).
+  static int perfstat_cpu_total(perfstat_id_t *name, PERFSTAT_CPU_TOTAL_T_LATEST* userbuff,
                                 int sizeof_userbuff, int desired_number);
 
   static int perfstat_memory_total(perfstat_id_t *name, perfstat_memory_total_t* userbuff,
                                    int sizeof_userbuff, int desired_number);
 
+  static int perfstat_partition_total(perfstat_id_t *name, PERFSTAT_PARTITON_TOTAL_T_LATEST* userbuff,
+                                      int sizeof_userbuff, int desired_number);
+
   static void perfstat_reset();
+
+  static int perfstat_wpar_total(perfstat_id_wpar_t *name, PERFSTAT_WPAR_TOTAL_T_LATEST* userbuff,
+                                 int sizeof_userbuff, int desired_number);
+
+  static cid_t wpar_getcid();
+
+
+  ////////////////////////////////////////////////////////////////
+  // The convenience functions get_partitioninfo(), get_cpuinfo(), get_wparinfo() return
+  // information about partition, cpu and wpars, respectivly. They can be used without
+  // regard for which OS release we are on. On older AIX release, some output structure
+  // members will be 0.
+
+  // Result struct for get_partitioninfo().
+  struct partitioninfo_t {
+    // partition type info
+    unsigned smt_capable :1;          /* OS supports SMT mode */
+    unsigned smt_enabled :1;          /* SMT mode is on */
+    unsigned lpar_capable :1;         /* OS supports logical partitioning */
+    unsigned lpar_enabled :1;         /* logical partitioning is on */
+    unsigned shared_capable :1;       /* OS supports shared processor LPAR */
+    unsigned shared_enabled :1;       /* partition runs in shared mode */
+    unsigned dlpar_capable :1;        /* OS supports dynamic LPAR */
+    unsigned capped :1;               /* partition is capped */
+    unsigned kernel_is_64 :1;         /* kernel is 64 bit */
+    unsigned pool_util_authority :1;  /* pool utilization available */
+    unsigned donate_capable :1;       /* capable of donating cycles */
+    unsigned donate_enabled :1;       /* enabled for donating cycles */
+    unsigned ams_capable:1;           /* 1 = AMS(Active Memory Sharing) capable, 0 = Not AMS capable */
+    unsigned ams_enabled:1;           /* 1 = AMS(Active Memory Sharing) enabled, 0 = Not AMS enabled */
+    unsigned power_save:1;            /* 1 = Power saving mode is enabled */
+    unsigned ame_enabled:1;           /* Active Memory Expansion is enabled */
+    // partition total info
+    int online_cpus;                  /* number of virtual CPUs currently online on the partition */
+    int entitled_proc_capacity;       /* number of processor units this partition is entitled to receive */
+    int var_proc_capacity_weight;     /* partition priority weight to receive extra capacity */
+    int phys_cpus_pool;               /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+    int pool_id;                      /* identifier of the shared pool of physical processors this partition is a member of */
+    u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+    char name[IDENTIFIER_LENGTH];     /* name of the logical partition */
+
+    u_longlong_t timebase_last;       /* most recently cpu time base (an incremented long int on PowerPC) */
+    u_longlong_t pool_idle_time;      /* pool idle time = number of clock tics a processor in the shared pool was idle */
+    u_longlong_t pcpu_tics_user;      /* raw number of physical processor tics in user mode */
+    u_longlong_t pcpu_tics_sys;       /* raw number of physical processor tics in system mode */
+    u_longlong_t pcpu_tics_idle;      /* raw number of physical processor tics idle */
+    u_longlong_t pcpu_tics_wait;      /* raw number of physical processor tics waiting for I/O */
+
+    u_longlong_t true_memory;          /* True Memory Size in 4KB pages */
+    u_longlong_t expanded_memory;      /* Expanded Memory Size in 4KB pages */
+    u_longlong_t target_memexp_factr;  /* Target Memory Expansion Factor scaled by 100 */
+    u_longlong_t current_memexp_factr; /* Current Memory Expansion Factor scaled by 100 */
+    u_longlong_t cmcs_total_time;      /* Total CPU time spent due to active memory expansion */
+  };
+
+  // Result struct for get_cpuinfo().
+  struct cpuinfo_t {
+    char description[IDENTIFIER_LENGTH];  // processor description (type/official name)
+    u_longlong_t processorHZ;             // processor speed in Hz
+    int ncpus;                            // number of active logical processors
+    double loadavg[3];                    // (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                          // To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>.
+    char version[20];                     // processor version from _system_configuration (sys/systemcfg.h)
+    unsigned long long user_clock_ticks;  // raw total number of clock ticks spent in user mode
+    unsigned long long sys_clock_ticks;   // raw total number of clock ticks spent in system mode
+    unsigned long long idle_clock_ticks;  // raw total number of clock ticks spent idle
+    unsigned long long wait_clock_ticks;  // raw total number of clock ticks spent waiting for I/O
+  };
+
+  // Result struct for get_wparinfo().
+  struct wparinfo_t {
+    char name[MAXCORRALNAMELEN+1];  /* name of the Workload Partition */
+    unsigned short wpar_id;         /* workload partition identifier */
+    unsigned app_wpar :1;           /* Application WPAR */
+    unsigned cpu_rset :1;           /* WPAR restricted to CPU resource set */
+    unsigned cpu_xrset:1;           /* WPAR restricted to CPU Exclusive resource set */
+    unsigned cpu_limits :1;         /* CPU resource limits enforced */
+    unsigned mem_limits :1;         /* Memory resource limits enforced */
+    int cpu_limit;                  /* CPU limit in 100ths of % - 1..10000 */
+    int mem_limit;                  /* Memory limit in 100ths of % - 1..10000 */
+  };
+
+  static bool get_partitioninfo(partitioninfo_t* ppi);
+  static bool get_cpuinfo(cpuinfo_t* pci);
+  static bool get_wparinfo(wparinfo_t* pwi);
+
 };
 
 #endif // OS_AIX_VM_LIBPERFSTAT_AIX_HPP
--- a/hotspot/src/os/aix/vm/loadlib_aix.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/loadlib_aix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,6 @@
 #endif
 
 #include "loadlib_aix.hpp"
-// for CritSect
 #include "misc_aix.hpp"
 #include "porting_aix.hpp"
 #include "utilities/debug.hpp"
--- a/hotspot/src/os/aix/vm/misc_aix.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/misc_aix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -26,6 +26,8 @@
 #include "runtime/stubRoutines.hpp"
 
 #include <pthread.h>
+#include <unistd.h>
+#include <errno.h>
 
 void MiscUtils::init_critsect(MiscUtils::critsect_t* cs) {
   const int rc = pthread_mutex_init(cs, NULL);
--- a/hotspot/src/os/aix/vm/misc_aix.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/misc_aix.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -29,6 +29,8 @@
 // misc_aix.hpp, misc_aix.cpp: convenience functions needed for the OpenJDK AIX
 // port.
 #include "utilities/globalDefinitions.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
 
 #include <pthread.h>
 
@@ -40,7 +42,6 @@
   } \
 }
 #define ERRBYE(s) { trcVerbose(s); return -1; }
-#define trc(fmt, ...)
 
 #define assert0(b) assert((b), "")
 #define guarantee0(b) guarantee((b), "")
--- a/hotspot/src/os/aix/vm/osThread_aix.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/osThread_aix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -35,7 +35,7 @@
 void OSThread::pd_initialize() {
   assert(this != NULL, "check");
   _thread_id        = 0;
-  _pthread_id       = 0;
+  _kernel_thread_id = 0;
   _siginfo = NULL;
   _ucontext = NULL;
   _expanding_stack = 0;
--- a/hotspot/src/os/aix/vm/osThread_aix.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/osThread_aix.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -27,7 +27,7 @@
 #define OS_AIX_VM_OSTHREAD_AIX_HPP
 
  public:
-  typedef pid_t thread_id_t;
+  typedef pthread_t thread_id_t;
 
  private:
   int _thread_type;
@@ -43,9 +43,13 @@
 
  private:
 
-  // _pthread_id is the pthread id, which is used by library calls
-  // (e.g. pthread_kill).
-  pthread_t _pthread_id;
+  // On AIX, we use the pthread id as OSThread::thread_id and keep the kernel thread id
+  // separately for diagnostic purposes.
+  //
+  // Note: this kernel thread id is saved at thread start. Depending on the
+  // AIX scheduling mode, this may not be the current thread id (usually not
+  // a problem though as we run with AIXTHREAD_SCOPE=S).
+  tid_t _kernel_thread_id;
 
   sigset_t _caller_sigmask; // Caller's signal mask
 
@@ -66,11 +70,16 @@
     return false;
   }
 #endif // ASSERT
-  pthread_t pthread_id() const {
-    return _pthread_id;
+  tid_t kernel_thread_id() const {
+    return _kernel_thread_id;
   }
-  void set_pthread_id(pthread_t tid) {
-    _pthread_id = tid;
+  void set_kernel_thread_id(tid_t tid) {
+    _kernel_thread_id = tid;
+  }
+
+  pthread_t pthread_id() const {
+    // Here: same as OSThread::thread_id()
+    return _thread_id;
   }
 
   // ***************************************************************
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -36,6 +36,7 @@
 #include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_aix.h"
+#include "libo4.hpp"
 #include "libperfstat_aix.hpp"
 #include "loadlib_aix.hpp"
 #include "memory/allocation.inline.hpp"
@@ -108,25 +109,14 @@
 #include <sys/vminfo.h>
 #include <sys/wait.h>
 
-// If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
-// getrusage() is prepared to handle the associated failure.
-#ifndef RUSAGE_THREAD
-#define RUSAGE_THREAD   (1)               /* only the calling thread */
-#endif
-
-// PPC port
-static const uintx Use64KPagesThreshold       = 1*M;
-static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
-
-// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
+// Missing prototypes for various system APIs.
+extern "C"
+int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
+
 #if !defined(_AIXVERSION_610)
-extern "C" {
-  int getthrds64(pid_t ProcessIdentifier,
-                 struct thrdentry64* ThreadBuffer,
-                 int ThreadSize,
-                 tid64_t* IndexPointer,
-                 int Count);
-}
+extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
+extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
+extern "C" int getargs   (procsinfo*, int, char*, int);
 #endif
 
 #define MAX_PATH (2 * K)
@@ -150,18 +140,9 @@
 typedef unsigned long stackslot_t;
 typedef stackslot_t* stackptr_t;
 
-// Excerpts from systemcfg.h definitions newer than AIX 5.3.
-#ifndef PV_7
-#define PV_7 0x200000          /* Power PC 7 */
-#define PV_7_Compat 0x208000   /* Power PC 7 */
-#endif
-#ifndef PV_8
-#define PV_8 0x300000          /* Power PC 8 */
-#define PV_8_Compat 0x308000   /* Power PC 8 */
-#endif
-
 // Query dimensions of the stack of the calling thread.
 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
+static address resolve_function_descriptor_to_code_pointer(address p);
 
 // Function to check a given stack pointer against given stack limits.
 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
@@ -185,7 +166,7 @@
   if (((uintptr_t)p) & 0x3) {
     return false;
   }
-  if (!LoadedLibraries::find_for_text_address(p, NULL)) {
+  if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
     return false;
   }
   return true;
@@ -203,31 +184,44 @@
   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 }
 
+static void vmembk_print_on(outputStream* os);
+
 ////////////////////////////////////////////////////////////////////////////////
 // global variables (for a description see os_aix.hpp)
 
 julong    os::Aix::_physical_memory = 0;
+
 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 int       os::Aix::_page_size = -1;
+
+// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 int       os::Aix::_on_pase = -1;
+
+// -1 = uninitialized, otherwise os version in the form 0xMMmm - MM:major, mm:minor
+//  E.g. 0x0601 for  AIX 6.1 or 0x0504 for OS/400 V5R4
 int       os::Aix::_os_version = -1;
+
 int       os::Aix::_stack_page_size = -1;
+
+// -1 = uninitialized, 0 - no, 1 - yes
 int       os::Aix::_xpg_sus_mode = -1;
+
+// -1 = uninitialized, 0 - no, 1 - yes
 int       os::Aix::_extshm = -1;
-int       os::Aix::_logical_cpus = -1;
 
 ////////////////////////////////////////////////////////////////////////////////
 // local variables
 
-static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 static jlong    initial_time_count = 0;
 static int      clock_tics_per_sec = 100;
 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 static bool     check_signals      = true;
-static pid_t    _initial_pid       = 0;
 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 static sigset_t SR_sigset;
 
+// Process break recorded at startup.
+static address g_brk_at_startup = NULL;
+
 // This describes the state of multipage support of the underlying
 // OS. Note that this is of no interest to the outsize world and
 // therefore should not be defined in AIX class.
@@ -278,8 +272,9 @@
 // a specific wish address, e.g. to place the heap in a
 // compressed-oops-friendly way.
 static bool is_close_to_brk(address a) {
-  address a1 = (address) sbrk(0);
-  if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
+  assert0(g_brk_at_startup != NULL);
+  if (a >= g_brk_at_startup &&
+      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
     return true;
   }
   return false;
@@ -290,11 +285,15 @@
 }
 
 julong os::Aix::available_memory() {
+  // Avoid expensive API call here, as returned value will always be null.
+  if (os::Aix::on_pase()) {
+    return 0x0LL;
+  }
   os::Aix::meminfo_t mi;
   if (os::Aix::get_meminfo(&mi)) {
     return mi.real_free;
   } else {
-    return 0xFFFFFFFFFFFFFFFFLL;
+    return ULONG_MAX;
   }
 }
 
@@ -332,7 +331,7 @@
 
   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
+      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
       return false;
     }
     p += maxDisclaimSize;
@@ -340,7 +339,7 @@
 
   if (lastDisclaimSize > 0) {
     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
+      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
       return false;
     }
   }
@@ -357,25 +356,30 @@
 #error Add appropriate cpu_arch setting
 #endif
 
+// Wrap the function "vmgetinfo" which is not available on older OS releases.
+static int checked_vmgetinfo(void *out, int command, int arg) {
+  if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) {
+    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
+  }
+  return ::vmgetinfo(out, command, arg);
+}
 
 // Given an address, returns the size of the page backing that address.
 size_t os::Aix::query_pagesize(void* addr) {
 
+  if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) {
+    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
+    return SIZE_4K;
+  }
+
   vm_page_info pi;
   pi.addr = (uint64_t)addr;
-  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
+  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
     return pi.pagesize;
   } else {
-    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
     assert(false, "vmgetinfo failed to retrieve page size");
     return SIZE_4K;
   }
-
-}
-
-// Returns the kernel thread id of the currently running thread.
-pid_t os::Aix::gettid() {
-  return (pid_t) thread_self();
 }
 
 void os::Aix::initialize_system_info() {
@@ -387,7 +391,6 @@
   // Retrieve total physical storage.
   os::Aix::meminfo_t mi;
   if (!os::Aix::get_meminfo(&mi)) {
-    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
     assert(false, "os::Aix::get_meminfo failed.");
   }
   _physical_memory = (julong) mi.real_total;
@@ -400,7 +403,6 @@
     case SIZE_64K: return "64K";
     case SIZE_16M: return "16M";
     case SIZE_16G: return "16G";
-    case -1:       return "not set";
     default:
       assert(false, "surprise");
       return "??";
@@ -431,6 +433,8 @@
   }
 
   // Query default shm page size (LDR_CNTRL SHMPSIZE).
+  // Note that this is pure curiosity. We do not rely on default page size but set
+  // our own page size after allocated.
   {
     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
     guarantee(shmid != -1, "shmget failed");
@@ -447,26 +451,26 @@
   // number of reasons so we may just as well guarantee it here.
   guarantee0(!os::Aix::is_primordial_thread());
 
-  // Query pthread stack page size.
+  // Query pthread stack page size. Should be the same as data page size because
+  // pthread stacks are allocated from C-Heap.
   {
     int dummy = 0;
     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
   }
 
   // Query default text page size (LDR_CNTRL TEXTPSIZE).
-  /* PPC port: so far unused.
   {
     address any_function =
-      (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
+      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
   }
-  */
 
   // Now probe for support of 64K pages and 16M pages.
 
   // Before OS/400 V6R1, there is no support for pages other than 4K.
   if (os::Aix::on_pase_V5R4_or_older()) {
-    Unimplemented();
+    trcVerbose("OS/400 < V6R1 - no large page support.");
+    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
     goto query_multipage_support_end;
   }
 
@@ -474,10 +478,10 @@
   {
     const int MAX_PAGE_SIZES = 4;
     psize_t sizes[MAX_PAGE_SIZES];
-    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
+    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
     if (num_psizes == -1) {
-      trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
-      trc("disabling multipage support.\n");
+      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
+      trcVerbose("disabling multipage support.");
       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
       goto query_multipage_support_end;
     }
@@ -505,8 +509,8 @@
       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
         const int en = errno;
         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
-        // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
-        // PPC port  MiscUtils::describe_errno(en));
+        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
+          errno);
       } else {
         // Attach and double check pageisze.
         void* p = ::shmat(shmid, NULL, 0);
@@ -532,35 +536,35 @@
 
 query_multipage_support_end:
 
-  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
+  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
       describe_pagesize(g_multipage_support.pagesize));
-  trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
+  trcVerbose("Data page size (C-Heap, bss, etc): %s",
       describe_pagesize(g_multipage_support.datapsize));
-  trcVerbose("Text page size: %s\n",
+  trcVerbose("Text page size: %s",
       describe_pagesize(g_multipage_support.textpsize));
-  trcVerbose("Thread stack page size (pthread): %s\n",
+  trcVerbose("Thread stack page size (pthread): %s",
       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
-  trcVerbose("Default shared memory page size: %s\n",
+  trcVerbose("Default shared memory page size: %s",
       describe_pagesize(g_multipage_support.shmpsize));
-  trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
+  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
-  trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
+  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
-  trcVerbose("Multipage error details: %d\n",
+  trcVerbose("Multipage error details: %d",
       g_multipage_support.error);
 
   // sanity checks
   assert0(g_multipage_support.pagesize == SIZE_4K);
   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
-  // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
+  assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 
-} // end os::Aix::query_multipage_support()
+}
 
 void os::init_system_properties_values() {
 
-#define DEFAULT_LIBPATH "/usr/lib:/lib"
+#define DEFAULT_LIBPATH "/lib:/usr/lib"
 #define EXTENSIONS_DIR  "/lib/ext"
 
   // Buffer that fits several sprintfs.
@@ -578,7 +582,10 @@
 
     // Found the full path to libjvm.so.
     // Now cut the path to <java_home>/jre if we can.
-    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
+    pslash = strrchr(buf, '/');
+    if (pslash != NULL) {
+      *pslash = '\0';            // Get rid of /libjvm.so.
+    }
     pslash = strrchr(buf, '/');
     if (pslash != NULL) {
       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
@@ -753,8 +760,21 @@
   memset(pmi, 0, sizeof(meminfo_t));
 
   if (os::Aix::on_pase()) {
-
-    Unimplemented();
+    // On PASE, use the libo4 porting library.
+
+    unsigned long long virt_total = 0;
+    unsigned long long real_total = 0;
+    unsigned long long real_free = 0;
+    unsigned long long pgsp_total = 0;
+    unsigned long long pgsp_free = 0;
+    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
+      pmi->virt_total = virt_total;
+      pmi->real_total = real_total;
+      pmi->real_free = real_free;
+      pmi->pgsp_total = pgsp_total;
+      pmi->pgsp_free = pgsp_free;
+      return true;
+    }
     return false;
 
   } else {
@@ -770,7 +790,7 @@
     memset (&psmt, '\0', sizeof(psmt));
     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
     if (rc == -1) {
-      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
+      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
       assert(0, "perfstat_memory_total() failed");
       return false;
     }
@@ -798,81 +818,6 @@
   }
 } // end os::Aix::get_meminfo
 
-// Retrieve global cpu information.
-// Returns false if something went wrong;
-// the content of pci is undefined in this case.
-bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
-  assert(pci, "get_cpuinfo: invalid parameter");
-  memset(pci, 0, sizeof(cpuinfo_t));
-
-  perfstat_cpu_total_t psct;
-  memset (&psct, '\0', sizeof(psct));
-
-  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
-    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
-    assert(0, "perfstat_cpu_total() failed");
-    return false;
-  }
-
-  // global cpu information
-  strcpy (pci->description, psct.description);
-  pci->processorHZ = psct.processorHZ;
-  pci->ncpus = psct.ncpus;
-  os::Aix::_logical_cpus = psct.ncpus;
-  for (int i = 0; i < 3; i++) {
-    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
-  }
-
-  // get the processor version from _system_configuration
-  switch (_system_configuration.version) {
-  case PV_8:
-    strcpy(pci->version, "Power PC 8");
-    break;
-  case PV_7:
-    strcpy(pci->version, "Power PC 7");
-    break;
-  case PV_6_1:
-    strcpy(pci->version, "Power PC 6 DD1.x");
-    break;
-  case PV_6:
-    strcpy(pci->version, "Power PC 6");
-    break;
-  case PV_5:
-    strcpy(pci->version, "Power PC 5");
-    break;
-  case PV_5_2:
-    strcpy(pci->version, "Power PC 5_2");
-    break;
-  case PV_5_3:
-    strcpy(pci->version, "Power PC 5_3");
-    break;
-  case PV_5_Compat:
-    strcpy(pci->version, "PV_5_Compat");
-    break;
-  case PV_6_Compat:
-    strcpy(pci->version, "PV_6_Compat");
-    break;
-  case PV_7_Compat:
-    strcpy(pci->version, "PV_7_Compat");
-    break;
-  case PV_8_Compat:
-    strcpy(pci->version, "PV_8_Compat");
-    break;
-  default:
-    strcpy(pci->version, "unknown");
-  }
-
-  return true;
-
-} //end os::Aix::get_cpuinfo
-
-//////////////////////////////////////////////////////////////////////////////
-// detecting pthread library
-
-void os::Aix::libpthread_init() {
-  return;
-}
-
 //////////////////////////////////////////////////////////////////////////////
 // create new thread
 
@@ -889,6 +834,26 @@
     thread->set_stack_size(size);
   }
 
+  const pthread_t pthread_id = ::pthread_self();
+  const tid_t kernel_thread_id = ::thread_self();
+
+  trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
+    ", stack %p ... %p, stacksize 0x%IX (%IB)",
+    pthread_id, kernel_thread_id,
+    thread->stack_base() - thread->stack_size(),
+    thread->stack_base(),
+    thread->stack_size(),
+    thread->stack_size());
+
+  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
+  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
+  // tools hook pthread_create(). In this case, we may run into problems establishing
+  // guard pages on those stacks, because the stacks may reside in memory which is not
+  // protectable (shmated).
+  if (thread->stack_base() > ::sbrk(0)) {
+    trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
+  }
+
   // Do some sanity checks.
   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 
@@ -902,32 +867,35 @@
   int pid = os::current_process_id();
   alloca(((pid ^ counter++) & 7) * 128);
 
-  ThreadLocalStorage::set_thread(thread);
+  thread->initialize_thread_current();
 
   OSThread* osthread = thread->osthread();
 
-  // thread_id is kernel thread id (similar to Solaris LWP id)
-  osthread->set_thread_id(os::Aix::gettid());
-
-  // initialize signal mask for this thread
+  // Thread_id is pthread id.
+  osthread->set_thread_id(pthread_id);
+
+  // .. but keep kernel thread id too for diagnostics
+  osthread->set_kernel_thread_id(kernel_thread_id);
+
+  // Initialize signal mask for this thread.
   os::Aix::hotspot_sigmask(thread);
 
-  // initialize floating point control register
+  // Initialize floating point control register.
   os::Aix::init_thread_fpu_state();
 
   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 
-  // call one more level start routine
+  // Call one more level start routine.
   thread->run();
 
+  trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
+    pthread_id, kernel_thread_id);
+
   return 0;
 }
 
 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 
-  // We want the whole function to be synchronized.
-  ThreadCritical cs;
-
   assert(thread->osthread() == NULL, "caller responsible");
 
   // Allocate the OSThread object
@@ -992,8 +960,14 @@
   pthread_attr_destroy(&attr);
 
   if (ret == 0) {
-    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
+    trcVerbose("Created New Thread : pthread-id %u", tid);
   } else {
+    if (os::Aix::on_pase()) {
+      // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
+      // using QSH. Otherwise pthread_create fails with errno=11.
+      trcVerbose("(Please make sure you set the environment variable "
+              "QIBM_MULTI_THREADED=Y before running this program.)");
+    }
     if (PrintMiscellaneous && (Verbose || WizardMode)) {
       perror("pthread_create()");
     }
@@ -1003,8 +977,8 @@
     return false;
   }
 
-  // Store pthread info into the OSThread
-  osthread->set_pthread_id(tid);
+  // OSThread::thread_id is the pthread id.
+  osthread->set_thread_id(tid);
 
   return true;
 }
@@ -1030,9 +1004,21 @@
     return false;
   }
 
-  // Store pthread info into the OSThread
-  osthread->set_thread_id(os::Aix::gettid());
-  osthread->set_pthread_id(::pthread_self());
+  const pthread_t pthread_id = ::pthread_self();
+  const tid_t kernel_thread_id = ::thread_self();
+
+  trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
+    pthread_id, kernel_thread_id,
+    thread->stack_base() - thread->stack_size(),
+    thread->stack_base(),
+    thread->stack_size(),
+    thread->stack_size());
+
+  // OSThread::thread_id is the pthread id.
+  osthread->set_thread_id(pthread_id);
+
+  // .. but keep kernel thread id too for diagnostics
+  osthread->set_kernel_thread_id(kernel_thread_id);
 
   // initialize floating point control register
   os::Aix::init_thread_fpu_state();
@@ -1077,32 +1063,6 @@
   delete osthread;
 }
 
-//////////////////////////////////////////////////////////////////////////////
-// thread local storage
-
-int os::allocate_thread_local_storage() {
-  pthread_key_t key;
-  int rslt = pthread_key_create(&key, NULL);
-  assert(rslt == 0, "cannot allocate thread local storage");
-  return (int)key;
-}
-
-// Note: This is currently not used by VM, as we don't destroy TLS key
-// on VM exit.
-void os::free_thread_local_storage(int index) {
-  int rslt = pthread_key_delete((pthread_key_t)index);
-  assert(rslt == 0, "invalid index");
-}
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  int rslt = pthread_setspecific((pthread_key_t)index, value);
-  assert(rslt == 0, "pthread_setspecific failed");
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // time support
 
@@ -1152,17 +1112,15 @@
   nanos = jlong(time.tv_usec) * 1000;
 }
 
-
-// We need to manually declare mread_real_time,
-// because IBM didn't provide a prototype in time.h.
-// (they probably only ever tested in C, not C++)
-extern "C"
-int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
-
 jlong os::javaTimeNanos() {
   if (os::Aix::on_pase()) {
-    Unimplemented();
-    return 0;
+
+    timeval time;
+    int status = gettimeofday(&time, NULL);
+    assert(status != -1, "PASE error at gettimeofday()");
+    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
+    return 1000 * usecs;
+
   } else {
     // On AIX use the precision of processors real time clock
     // or time base registers.
@@ -1291,22 +1249,12 @@
   return n;
 }
 
-intx os::current_thread_id() { return (intx)pthread_self(); }
+intx os::current_thread_id() {
+  return (intx)pthread_self();
+}
 
 int os::current_process_id() {
-
-  // This implementation returns a unique pid, the pid of the
-  // launcher thread that starts the vm 'process'.
-
-  // Under POSIX, getpid() returns the same pid as the
-  // launcher thread rather than a unique pid per thread.
-  // Use gettid() if you want the old pre NPTL behaviour.
-
-  // if you are looking for the result of a call to getpid() that
-  // returns a unique pid for the calling thread, then look at the
-  // OSThread::thread_id() method in osThread_linux.hpp file
-
-  return (int)(_initial_pid ? _initial_pid : getpid());
+  return getpid();
 }
 
 // DLL functions
@@ -1343,6 +1291,9 @@
   } else if (strchr(pname, *os::path_separator()) != NULL) {
     int n;
     char** pelements = split_path(pname, &n);
+    if (pelements == NULL) {
+      return false;
+    }
     for (int i = 0; i < n; i++) {
       // Really shouldn't be NULL, but check can't hurt
       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
@@ -1580,62 +1531,98 @@
   os::loadavg(loadavg, 3);
   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
   st->cr();
+
+  // print wpar info
+  libperfstat::wparinfo_t wi;
+  if (libperfstat::get_wparinfo(&wi)) {
+    st->print_cr("wpar info");
+    st->print_cr("name: %s", wi.name);
+    st->print_cr("id:   %d", wi.wpar_id);
+    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
+  }
+
+  // print partition info
+  libperfstat::partitioninfo_t pi;
+  if (libperfstat::get_partitioninfo(&pi)) {
+    st->print_cr("partition info");
+    st->print_cr(" name: %s", pi.name);
+  }
+
 }
 
 void os::print_memory_info(outputStream* st) {
 
   st->print_cr("Memory:");
 
-  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
-  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
+  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
+    describe_pagesize(g_multipage_support.pagesize));
+  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
+    describe_pagesize(g_multipage_support.datapsize));
+  st->print_cr("  Text page size:                         %s",
+    describe_pagesize(g_multipage_support.textpsize));
+  st->print_cr("  Thread stack page size (pthread):       %s",
+    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
   st->print_cr("  Default shared memory page size:        %s",
     describe_pagesize(g_multipage_support.shmpsize));
   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
-  if (g_multipage_error != 0) {
-    st->print_cr("  multipage error: %d", g_multipage_error);
-  }
+  st->print_cr("  Multipage error: %d",
+    g_multipage_support.error);
+  st->cr();
+  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
+  // not used in OpenJDK st->print_cr("  os::stack_page_size:    %s", describe_pagesize(os::stack_page_size()));
 
   // print out LDR_CNTRL because it affects the default page sizes
   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
 
+  // Print out EXTSHM because it is an unsupported setting.
   const char* const extshm = ::getenv("EXTSHM");
   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
   }
 
-  // Call os::Aix::get_meminfo() to retrieve memory statistics.
+  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
+  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
+  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
+      aixthread_guardpages ? aixthread_guardpages : "<unset>");
+
   os::Aix::meminfo_t mi;
   if (os::Aix::get_meminfo(&mi)) {
     char buffer[256];
     if (os::Aix::on_aix()) {
-      jio_snprintf(buffer, sizeof(buffer),
-                   "  physical total : %llu\n"
-                   "  physical free  : %llu\n"
-                   "  swap total     : %llu\n"
-                   "  swap free      : %llu\n",
-                   mi.real_total,
-                   mi.real_free,
-                   mi.pgsp_total,
-                   mi.pgsp_free);
+      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
+      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
+      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
+      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
     } else {
-      Unimplemented();
+      // PASE - Numbers are result of QWCRSSTS; they mean:
+      // real_total: Sum of all system pools
+      // real_free: always 0
+      // pgsp_total: we take the size of the system ASP
+      // pgsp_free: size of system ASP times percentage of system ASP unused
+      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
+      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
+      st->print_cr("%% system asp used : " SIZE_FORMAT,
+        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
     }
     st->print_raw(buffer);
-  } else {
-    st->print_cr("  (no more information available)");
-  }
+  }
+  st->cr();
+
+  // Print segments allocated with os::reserve_memory.
+  st->print_cr("internal virtual memory regions used by vm:");
+  vmembk_print_on(st);
 }
 
 // Get a string for the cpuinfo that is a summary of the cpu type
 void os::get_summary_cpu_info(char* buf, size_t buflen) {
   // This looks good
-  os::Aix::cpuinfo_t ci;
-  if (os::Aix::get_cpuinfo(&ci)) {
+  libperfstat::cpuinfo_t ci;
+  if (libperfstat::get_cpuinfo(&ci)) {
     strncpy(buf, ci.version, buflen);
   } else {
     strncpy(buf, "AIX", buflen);
@@ -1643,10 +1630,15 @@
 }
 
 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
+  st->print("CPU:");
+  st->print("total %d", os::processor_count());
+  // It's not safe to query number of active processors after crash.
+  // st->print("(active %d)", os::active_processor_count());
+  st->print(" %s", VM_Version::cpu_features());
+  st->cr();
 }
 
 void os::print_siginfo(outputStream* st, void* siginfo) {
-  // Use common posix version.
   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
   st->cr();
 }
@@ -1785,21 +1777,75 @@
 // a counter for each possible signal value
 static volatile jint pending_signals[NSIG+1] = { 0 };
 
-// Linux(POSIX) specific hand shaking semaphore.
+// Wrapper functions for: sem_init(), sem_post(), sem_wait()
+// On AIX, we use sem_init(), sem_post(), sem_wait()
+// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
+// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
+// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
+// on AIX, msem_..() calls are suspected of causing problems.
 static sem_t sig_sem;
+static msemaphore* p_sig_msem = 0;
+
+static void local_sem_init() {
+  if (os::Aix::on_aix()) {
+    int rc = ::sem_init(&sig_sem, 0, 0);
+    guarantee(rc != -1, "sem_init failed");
+  } else {
+    // Memory semaphores must live in shared mem.
+    guarantee0(p_sig_msem == NULL);
+    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
+    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
+    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
+  }
+}
+
+static void local_sem_post() {
+  static bool warn_only_once = false;
+  if (os::Aix::on_aix()) {
+    int rc = ::sem_post(&sig_sem);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  } else {
+    guarantee0(p_sig_msem != NULL);
+    int rc = ::msem_unlock(p_sig_msem, 0);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  }
+}
+
+static void local_sem_wait() {
+  static bool warn_only_once = false;
+  if (os::Aix::on_aix()) {
+    int rc = ::sem_wait(&sig_sem);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  } else {
+    guarantee0(p_sig_msem != NULL); // must init before use
+    int rc = ::msem_lock(p_sig_msem, 0);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  }
+}
 
 void os::signal_init_pd() {
   // Initialize signal structures
   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
 
   // Initialize signal semaphore
-  int rc = ::sem_init(&sig_sem, 0, 0);
-  guarantee(rc != -1, "sem_init failed");
+  local_sem_init();
 }
 
 void os::signal_notify(int sig) {
   Atomic::inc(&pending_signals[sig]);
-  ::sem_post(&sig_sem);
+  local_sem_post();
 }
 
 static int check_pending_signals(bool wait) {
@@ -1822,7 +1868,7 @@
       thread->set_suspend_equivalent();
       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
 
-      ::sem_wait(&sig_sem);
+      local_sem_wait();
 
       // were we externally suspended while we were waiting?
       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
@@ -1833,7 +1879,8 @@
         // while suspended because that would surprise the thread that
         // suspended us.
         //
-        ::sem_post(&sig_sem);
+
+        local_sem_post();
 
         thread->java_suspend_self();
       }
@@ -1884,14 +1931,14 @@
   // also check that range is fully page aligned to the page size if the block.
   void assert_is_valid_subrange(char* p, size_t s) const {
     if (!contains_range(p, s)) {
-      fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
-              "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
-              p, p + s - 1, addr, addr + size - 1);
+      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
+              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
+              p, p + s, addr, addr + size);
       guarantee0(false);
     }
     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
-      fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
-              " aligned to pagesize (%s)\n", p, p + s);
+      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
+              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
       guarantee0(false);
     }
   }
@@ -1988,7 +2035,7 @@
   // Reserve the shared segment.
   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
   if (shmid == -1) {
-    trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
+    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
     return NULL;
   }
 
@@ -2017,7 +2064,7 @@
 
   // (A) Right after shmat and before handing shmat errors delete the shm segment.
   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
-    trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
+    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
     assert(false, "failed to remove shared memory segment!");
   }
 
@@ -2082,6 +2129,8 @@
   return true;
 }
 
+////////////////////////////////  mmap-based routines /////////////////////////////////
+
 // Reserve memory via mmap.
 // If <requested_addr> is given, an attempt is made to attach at the given address.
 // Failing that, memory is allocated at any address.
@@ -2227,9 +2276,6 @@
   return rc;
 }
 
-// End: shared memory bookkeeping
-////////////////////////////////////////////////////////////////////////////////////////////////////
-
 int os::vm_page_size() {
   // Seems redundant as all get out.
   assert(os::Aix::page_size() != -1, "must call os::init");
@@ -2263,15 +2309,26 @@
 
 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
 
-  assert0(is_aligned_to(addr, os::vm_page_size()));
-  assert0(is_aligned_to(size, os::vm_page_size()));
+  assert(is_aligned_to(addr, os::vm_page_size()),
+    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    p2i(addr), os::vm_page_size());
+  assert(is_aligned_to(size, os::vm_page_size()),
+    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    size, os::vm_page_size());
 
   vmembk_t* const vmi = vmembk_find(addr);
-  assert0(vmi);
+  guarantee0(vmi);
   vmi->assert_is_valid_subrange(addr, size);
 
   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
 
+  if (UseExplicitCommit) {
+    // AIX commits memory on touch. So, touch all pages to be committed.
+    for (char* p = addr; p < (addr + size); p += SIZE_4K) {
+      *p = '\0';
+    }
+  }
+
   return true;
 }
 
@@ -2287,12 +2344,16 @@
 }
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
-  assert0(is_aligned_to(addr, os::vm_page_size()));
-  assert0(is_aligned_to(size, os::vm_page_size()));
+  assert(is_aligned_to(addr, os::vm_page_size()),
+    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    p2i(addr), os::vm_page_size());
+  assert(is_aligned_to(size, os::vm_page_size()),
+    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    size, os::vm_page_size());
 
   // Dynamically do different things for mmap/shmat.
   const vmembk_t* const vmi = vmembk_find(addr);
-  assert0(vmi);
+  guarantee0(vmi);
   vmi->assert_is_valid_subrange(addr, size);
 
   if (vmi->type == VMEM_SHMATED) {
@@ -2390,7 +2451,7 @@
 
   // Dynamically do different things for mmap/shmat.
   vmembk_t* const vmi = vmembk_find(addr);
-  assert0(vmi);
+  guarantee0(vmi);
 
   // Always round to os::vm_page_size(), which may be larger than 4K.
   size = align_size_up(size, os::vm_page_size());
@@ -2466,11 +2527,31 @@
       } else {
         rc = read_protected;
       }
+
+      if (!rc) {
+        if (os::Aix::on_pase()) {
+          // There is an issue on older PASE systems where mprotect() will return success but the
+          // memory will not be protected.
+          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
+          // machines; we only see it rarely, when using mprotect() to protect the guard page of
+          // a stack. It is an OS error.
+          //
+          // A valid strategy is just to try again. This usually works. :-/
+
+          ::usleep(1000);
+          if (::mprotect(addr, size, prot) == 0) {
+            const bool read_protected_2 =
+              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
+              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
+            rc = true;
+          }
+        }
+      }
     }
   }
-  if (!rc) {
-    assert(false, "mprotect failed.");
-  }
+
+  assert(rc == true, "mprotect failed.");
+
   return rc;
 }
 
@@ -2507,10 +2588,11 @@
 }
 
 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
-  // "exec" is passed in but not used. Creating the shared image for
-  // the code cache doesn't have an SHM_X executable permission to check.
-  Unimplemented();
-  return 0;
+  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
+  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
+  // so this is not needed.
+  assert(false, "should not be called on AIX");
+  return NULL;
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
@@ -2962,7 +3044,9 @@
   // getting raised while being blocked.
   unblock_program_error_signals();
 
+  int orig_errno = errno;  // Preserve errno value over signal handler.
   JVM_handle_aix_signal(sig, info, uc, true);
+  errno = orig_errno;
 }
 
 // This boolean allows users to forward their own non-matching signals
@@ -3084,7 +3168,6 @@
   void* oldhand = oldAct.sa_sigaction
     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
-  // Renamed 'signalHandler' to avoid collision with other shared libs.
   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
@@ -3108,7 +3191,6 @@
     sigAct.sa_handler = SIG_DFL;
     sigAct.sa_flags = SA_RESTART;
   } else {
-    // Renamed 'signalHandler' to avoid collision with other shared libs.
     sigAct.sa_sigaction = javaSignalHandler;
     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
   }
@@ -3300,7 +3382,7 @@
   struct sigaction act;
   if (os_sigaction == NULL) {
     // only trust the default sigaction, in case it has been interposed
-    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
+    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
     if (os_sigaction == NULL) return;
   }
 
@@ -3317,7 +3399,6 @@
   case SIGPIPE:
   case SIGILL:
   case SIGXFSZ:
-    // Renamed 'signalHandler' to avoid collision with other shared libs.
     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
     break;
 
@@ -3362,20 +3443,6 @@
   }
 }
 
-extern bool signal_name(int signo, char* buf, size_t len);
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (!signal_name(exception_code, buf, size)) {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // To install functions for atexit system call
 extern "C" {
   static void perfMemory_exit_helper() {
@@ -3389,6 +3456,10 @@
   // (Shared memory boundary is supposed to be a 256M aligned.)
   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
 
+  // Record process break at startup.
+  g_brk_at_startup = (address) ::sbrk(0);
+  assert(g_brk_at_startup != (address) -1, "sbrk failed");
+
   // First off, we need to know whether we run on AIX or PASE, and
   // the OS level we run on.
   os::Aix::initialize_os_info();
@@ -3396,7 +3467,7 @@
   // Scan environment (SPEC1170 behaviour, etc).
   os::Aix::scan_environment();
 
-  // Check which pages are supported by AIX.
+  // Probe multipage support.
   query_multipage_support();
 
   // Act like we only have one page size by eliminating corner cases which
@@ -3449,9 +3520,9 @@
     }
   } else {
     // datapsize = 64k. Data segment, thread stacks are 64k paged.
-    //   This normally means that we can allocate 64k pages dynamically.
-    //   (There is one special case where this may be false: EXTSHM=on.
-    //    but we decided to not support that mode).
+    // This normally means that we can allocate 64k pages dynamically.
+    // (There is one special case where this may be false: EXTSHM=on.
+    // but we decided to not support that mode).
     assert0(g_multipage_support.can_use_64K_pages);
     Aix::_page_size = SIZE_64K;
     trcVerbose("64K page mode");
@@ -3467,7 +3538,7 @@
   _page_sizes[0] = 0;
 
   // debug trace
-  trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
+  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
 
   // Next, we need to initialize libo4 and libperfstat libraries.
   if (os::Aix::on_pase()) {
@@ -3485,8 +3556,6 @@
   // need libperfstat etc.
   os::Aix::initialize_system_info();
 
-  _initial_pid = getpid();
-
   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
 
   init_random(1234567);
@@ -3511,11 +3580,21 @@
 // This is called _after_ the global arguments have been parsed.
 jint os::init_2(void) {
 
+  if (os::Aix::on_pase()) {
+    trcVerbose("Running on PASE.");
+  } else {
+    trcVerbose("Running on AIX (not PASE).");
+  }
+
   trcVerbose("processor count: %d", os::_processor_count);
   trcVerbose("physical memory: %lu", Aix::_physical_memory);
 
   // Initially build up the loaded dll map.
   LoadedLibraries::reload();
+  if (Verbose) {
+    trcVerbose("Loaded Libraries: ");
+    LoadedLibraries::print(tty);
+  }
 
   const int page_size = Aix::page_size();
   const int map_size = page_size;
@@ -3553,10 +3632,8 @@
                                      map_size, prot,
                                      flags | MAP_FIXED,
                                      -1, 0);
-      if (Verbose) {
-        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
-                address_wishes[i], map_address + (ssize_t)page_size);
-      }
+      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
+                   address_wishes[i], map_address + (ssize_t)page_size);
 
       if (map_address + (ssize_t)page_size == address_wishes[i]) {
         // Map succeeded and map_address is at wished address, exit loop.
@@ -3583,11 +3660,9 @@
     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
     os::set_memory_serialize_page(mem_serialize_page);
 
-#ifndef PRODUCT
-    if (Verbose && PrintMiscellaneous) {
-      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
-    }
-#endif
+    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
+        mem_serialize_page, mem_serialize_page + Aix::page_size(),
+        Aix::page_size(), Aix::page_size());
   }
 
   // initialize suspend/resume support - must do this before signal_sets_init()
@@ -3624,7 +3699,10 @@
   // Note that this can be 0, if no default stacksize was set.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
 
-  Aix::libpthread_init();
+  if (UseNUMA) {
+    UseNUMA = false;
+    warning("NUMA optimizations are not available on this OS.");
+  }
 
   if (MaxFDLimit) {
     // Set the number of file descriptors to max. print out error
@@ -3646,7 +3724,7 @@
 
   if (PerfAllowAtExitRegistration) {
     // Only register atexit functions if PerfAllowAtExitRegistration is set.
-    // Atexit functions can be delayed until process exit time, which
+    // At exit functions can be delayed until process exit time, which
     // can be problematic for embedded VM situations. Embedded VMs should
     // call DestroyJavaVM() to assure that VM resources are released.
 
@@ -3746,16 +3824,6 @@
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
-static address same_page(address x, address y) {
-  intptr_t page_bits = -os::vm_page_size();
-  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
-    return x;
-  else if (x > y)
-    return (address)(intptr_t(y) | ~page_bits) + 1;
-  else
-    return (address)(intptr_t(y) & page_bits);
-}
-
 bool os::find(address addr, outputStream* st) {
 
   st->print(PTR_FORMAT ": ", addr);
@@ -4119,24 +4187,28 @@
 // For now just return the system wide load average (no processor sets).
 int os::loadavg(double values[], int nelem) {
 
-  // Implemented using libperfstat on AIX.
-
   guarantee(nelem >= 0 && nelem <= 3, "argument error");
   guarantee(values, "argument error");
 
   if (os::Aix::on_pase()) {
-    Unimplemented();
-    return -1;
+
+    // AS/400 PASE: use libo4 porting library
+    double v[3] = { 0.0, 0.0, 0.0 };
+
+    if (libo4::get_load_avg(v, v + 1, v + 2)) {
+      for (int i = 0; i < nelem; i ++) {
+        values[i] = v[i];
+      }
+      return nelem;
+    } else {
+      return -1;
+    }
+
   } else {
+
     // AIX: use libperfstat
-    //
-    // See also:
-    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
-    // /usr/include/libperfstat.h:
-
-    // Use the already AIX version independent get_cpuinfo.
-    os::Aix::cpuinfo_t ci;
-    if (os::Aix::get_cpuinfo(&ci)) {
+    libperfstat::cpuinfo_t ci;
+    if (libperfstat::get_cpuinfo(&ci)) {
       for (int i = 0; i < nelem; i++) {
         values[i] = ci.loadavg[i];
       }
@@ -4163,8 +4235,7 @@
       (void)::poll(NULL, 0, 100);
     }
   } else {
-    jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
   }
 }
 
@@ -4186,7 +4257,7 @@
   memset(&uts, 0, sizeof(uts));
   strcpy(uts.sysname, "?");
   if (::uname(&uts) == -1) {
-    trc("uname failed (%d)", errno);
+    trcVerbose("uname failed (%d)", errno);
     guarantee(0, "Could not determine whether we run on AIX or PASE");
   } else {
     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
@@ -4198,15 +4269,22 @@
     assert(minor > 0, "invalid OS release");
     _os_version = (major << 8) | minor;
     if (strcmp(uts.sysname, "OS400") == 0) {
-      Unimplemented();
+      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
+      _on_pase = 1;
+      if (_os_version < 0x0504) {
+        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
+        assert(false, "OS/400 release too old.");
+      } else {
+        trcVerbose("We run on OS/400 (pase) V%dR%d", major, minor);
+      }
     } else if (strcmp(uts.sysname, "AIX") == 0) {
       // We run on AIX. We do not support versions older than AIX 5.3.
       _on_pase = 0;
       if (_os_version < 0x0503) {
-        trc("AIX release older than AIX 5.3 not supported.");
+        trcVerbose("AIX release older than AIX 5.3 not supported.");
         assert(false, "AIX release too old.");
       } else {
-        trcVerbose("We run on AIX %d.%d\n", major, minor);
+        trcVerbose("We run on AIX %d.%d", major, minor);
       }
     } else {
       assert(false, "unknown OS");
@@ -4232,12 +4310,17 @@
   // This switch was needed on AIX 32bit, but on AIX 64bit the general
   // recommendation is (in OSS notes) to switch it off.
   p = ::getenv("EXTSHM");
-  if (Verbose) {
-    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
-  }
+  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
   if (p && strcasecmp(p, "ON") == 0) {
-    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
     _extshm = 1;
+    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
+    if (!AllowExtshm) {
+      // We allow under certain conditions the user to continue. However, we want this
+      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
+      // that the VM is not able to allocate 64k pages for the heap.
+      // We do not want to run with reduced performance.
+      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
+    }
   } else {
     _extshm = 0;
   }
@@ -4254,7 +4337,7 @@
   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
   if (p && strcmp(p, "ON") == 0) {
     _xpg_sus_mode = 1;
-    trc("Unsupported setting: XPG_SUS_ENV=ON");
+    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
     // clobber address ranges. If we ever want to support that, we have to do some
     // testing first.
@@ -4263,35 +4346,46 @@
     _xpg_sus_mode = 0;
   }
 
-  // Switch off AIX internal (pthread) guard pages. This has
-  // immediate effect for any pthread_create calls which follow.
+  if (os::Aix::on_pase()) {
+    p = ::getenv("QIBM_MULTI_THREADED");
+    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
+  }
+
+  p = ::getenv("LDR_CNTRL");
+  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
+  if (os::Aix::on_pase() && os::Aix::os_version() == 0x0701) {
+    if (p && ::strstr(p, "TEXTPSIZE")) {
+      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
+        "you may experience hangs or crashes on OS/400 V7R1.");
+    }
+  }
+
   p = ::getenv("AIXTHREAD_GUARDPAGES");
   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
-  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
-  guarantee(rc == 0, "");
 
 } // end: os::Aix::scan_environment()
 
-// PASE: initialize the libo4 library (AS400 PASE porting library).
+// PASE: initialize the libo4 library (PASE porting library).
 void os::Aix::initialize_libo4() {
-  Unimplemented();
-}
-
-// AIX: initialize the libperfstat library (we load this dynamically
-// because it is only available on AIX.
+  guarantee(os::Aix::on_pase(), "OS/400 only.");
+  if (!libo4::init()) {
+    trcVerbose("libo4 initialization failed.");
+    assert(false, "libo4 initialization failed");
+  } else {
+    trcVerbose("libo4 initialized.");
+  }
+}
+
+// AIX: initialize the libperfstat library.
 void os::Aix::initialize_libperfstat() {
-
   assert(os::Aix::on_aix(), "AIX only");
-
   if (!libperfstat::init()) {
-    trc("libperfstat initialization failed.");
+    trcVerbose("libperfstat initialization failed.");
     assert(false, "libperfstat initialization failed");
   } else {
-    if (Verbose) {
-      fprintf(stderr, "libperfstat initialized.\n");
-    }
-  }
-} // end: os::Aix::initialize_libperfstat
+    trcVerbose("libperfstat initialized.");
+  }
+}
 
 /////////////////////////////////////////////////////////////////////////////
 // thread stack
@@ -4313,7 +4407,7 @@
 
   pthread_t tid = pthread_self();
   struct __pthrdsinfo pinfo;
-  char dummy[1]; // We only need this to satisfy the api and to not get E.
+  char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
   int dummy_size = sizeof(dummy);
 
   memset(&pinfo, 0, sizeof(pinfo));
@@ -4328,44 +4422,47 @@
   }
   guarantee0(pinfo.__pi_stackend);
 
-  // The following can happen when invoking pthread_getthrds_np on a pthread running
-  // on a user provided stack (when handing down a stack to pthread create, see
-  // pthread_attr_setstackaddr).
-  // Not sure what to do here - I feel inclined to forbid this use case completely.
+  // The following may happen when invoking pthread_getthrds_np on a pthread
+  // running on a user provided stack (when handing down a stack to pthread
+  // create, see pthread_attr_setstackaddr).
+  // Not sure what to do then.
+
   guarantee0(pinfo.__pi_stacksize);
 
-  // Note: the pthread stack on AIX seems to look like this:
-  //
-  // ---------------------   real base ? at page border ?
+  // Note: we get three values from pthread_getthrds_np:
+  //       __pi_stackaddr, __pi_stacksize, __pi_stackend
   //
-  //     pthread internal data, like ~2K, see also
-  //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
-  //
-  // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
+  // high addr    ---------------------
   //
-  //     stack
-  //      ....
-  //
-  //     stack
-  //
-  // ---------------------   __pi_stackend  - __pi_stacksize
+  //    |         pthread internal data, like ~2K
+  //    |
+  //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
+  //    |
+  //    |
+  //    |
+  //    |
+  //    |
+  //    |
+  //    |          ---------------------   (__pi_stackend - __pi_stacksize)
+  //    |
+  //    |          padding to align the following AIX guard pages, if enabled.
+  //    |
+  //    V          ---------------------   __pi_stackaddr
   //
-  //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
-  // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
-  //
-  //   AIX guard pages (?)
+  // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
   //
 
-  // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
-  // __pi_stackend however is almost never page aligned.
-  //
+  address stack_base = (address)(pinfo.__pi_stackend);
+  address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
+    os::vm_page_size());
+  size_t stack_size = stack_base - stack_low_addr;
 
   if (p_stack_base) {
-    (*p_stack_base) = (address) (pinfo.__pi_stackend);
+    *p_stack_base = stack_base;
   }
 
   if (p_stack_size) {
-    (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
+    *p_stack_size = stack_size;
   }
 
   return true;
--- a/hotspot/src/os/aix/vm/os_aix.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/os_aix.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -34,9 +34,6 @@
 class Aix {
   friend class os;
 
-  // Length of strings included in the libperfstat structures.
-#define IDENTIFIER_LENGTH 64
-
   static bool libjsig_is_loaded;        // libjsig that interposes sigaction(),
                                         // __sigaction(), signal() is loaded
   static struct sigaction *(*get_signal_action)(int);
@@ -45,13 +42,15 @@
 
   static void check_signal_handler(int sig);
 
- protected:
+ private:
 
   static julong _physical_memory;
   static pthread_t _main_thread;
   static Mutex* _createThread_lock;
   static int _page_size;
-  static int _logical_cpus;
+
+  // Page size of newly created pthreads.
+  static int _stack_page_size;
 
   // -1 = uninitialized, 0 = AIX, 1 = OS/400 (PASE)
   static int _on_pase;
@@ -63,6 +62,9 @@
   //  for OS/400 e.g. 0x0504 for OS/400 V5R4
   static int _os_version;
 
+  // 4 Byte kernel version: Version, Release, Tech Level, Service Pack.
+  static unsigned int _os_kernel_version;
+
   // -1 = uninitialized,
   //  0 - SPEC1170 not requested (XPG_SUS_ENV is OFF or not set)
   //  1 - SPEC1170 requested (XPG_SUS_ENV is ON)
@@ -73,35 +75,6 @@
   //  1 - EXTSHM=ON
   static int _extshm;
 
-  // page sizes on AIX.
-  //
-  //  AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The latter two
-  //  (16M "large" resp. 16G "huge" pages) require special setup and are normally
-  //  not available.
-  //
-  //  AIX supports multiple page sizes per process, for:
-  //  - Stack (of the primordial thread, so not relevant for us)
-  //  - Data - data, bss, heap, for us also pthread stacks
-  //  - Text - text code
-  //  - shared memory
-  //
-  //  Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
-  //  and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...)
-  //
-  //  For shared memory, page size can be set dynamically via shmctl(). Different shared memory
-  //  regions can have different page sizes.
-  //
-  //  More information can be found at AIBM info center:
-  //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
-  //
-  // -----
-  //  We want to support 4K and 64K and, if the machine is set up correctly, 16MB pages.
-  //
-
-  // page size of the stack of newly created pthreads
-  // (should be LDR_CNTRL DATAPSIZE because stack is allocated on heap by pthread lib)
-  static int _stack_page_size;
-
   static julong available_memory();
   static julong physical_memory() { return _physical_memory; }
   static void initialize_system_info();
@@ -125,9 +98,6 @@
  public:
   static void init_thread_fpu_state();
   static pthread_t main_thread(void)                                { return _main_thread; }
-  // returns kernel thread id (similar to LWP id on Solaris), which can be
-  // used to access /proc
-  static pid_t gettid();
   static void set_createThread_lock(Mutex* lk)                      { _createThread_lock = lk; }
   static Mutex* createThread_lock(void)                             { return _createThread_lock; }
   static void hotspot_sigmask(Thread* thread);
@@ -215,6 +185,14 @@
     return _os_version;
   }
 
+  // Get 4 byte AIX kernel version number:
+  // highest 2 bytes: Version, Release
+  // if available: lowest 2 bytes: Tech Level, Service Pack.
+  static unsigned int os_kernel_version() {
+    if (_os_kernel_version) return _os_kernel_version;
+    return os_version() << 16;
+  }
+
   // Convenience method: returns true if running on PASE V5R4 or older.
   static bool on_pase_V5R4_or_older() {
     return on_pase() && os_version() <= 0x0504;
@@ -257,27 +235,12 @@
 
   };
 
-  // Result struct for get_cpuinfo().
-  struct cpuinfo_t {
-    char description[IDENTIFIER_LENGTH];  // processor description (type/official name)
-    u_longlong_t processorHZ;             // processor speed in Hz
-    int ncpus;                            // number of active logical processors
-    double loadavg[3];                    // (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
-                                          // To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>.
-    char version[20];                     // processor version from _system_configuration (sys/systemcfg.h)
-  };
-
   // Functions to retrieve memory information on AIX, PASE.
   // (on AIX, using libperfstat, on PASE with libo4.so).
   // Returns true if ok, false if error.
   static bool get_meminfo(meminfo_t* pmi);
 
-  // Function to retrieve cpu information on AIX
-  // (on AIX, using libperfstat)
-  // Returns true if ok, false if error.
-  static bool get_cpuinfo(cpuinfo_t* pci);
-
-}; // os::Aix class
+};
 
 
 class PlatformEvent : public CHeapObj<mtInternal> {
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -36,10 +36,6 @@
 #include <sys/ioctl.h>
 #include <netdb.h>
 
-inline void* os::thread_local_storage_at(int index) {
-  return pthread_getspecific((pthread_key_t)index);
-}
-
 // File names are case-sensitive on windows only.
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
@@ -64,6 +60,8 @@
 // On Aix, reservations are made on a page by page basis, nothing to do.
 inline void os::pd_split_reserved_memory(char *base, size_t size,
                                          size_t split, bool realloc) {
+  // TODO: Determine whether Sys V memory is split. If yes, we need to treat
+  // this the same way Windows treats its VirtualAlloc allocations.
 }
 
 // Bang the shadow pages if they need to be touched to be mapped.
--- a/hotspot/src/os/aix/vm/thread_aix.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP
-#define OS_AIX_VM_THREAD_AIX_INLINE_HPP
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-#endif // OS_AIX_VM_THREAD_AIX_INLINE_HPP
--- a/hotspot/src/os/bsd/vm/jvm_bsd.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/bsd/vm/jvm_bsd.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -108,84 +108,3 @@
   return JNI_TRUE;
 JVM_END
 
-/*
-  All the defined signal names for Bsd.
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  STOP, and Bsd simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-    HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
-    CLD, CHLD, CONT, TSTP, TTIN, TTOU, URG, XCPU, XFSZ, VTALRM, PROF,
-    WINCH, POLL, IO, PWR, SYS
-
-*/
-
-struct siglabel {
-  const char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] = {
-  /* derived from /usr/include/bits/signum.h on RH7.2 */
-   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
-  "INT",        SIGINT,         /* Interrupt (ANSI).  */
-  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
-  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
-  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
-  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
-  "EMT",        SIGEMT,         /* EMT trap  */
-  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
-  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
-  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
-  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
-  "SYS",        SIGSYS,         /* Bad system call. Only on some Bsden! */
-  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
-  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
-  "TERM",       SIGTERM,        /* Termination (ANSI).  */
-  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
-  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
-  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
-  "CONT",       SIGCONT,        /* Continue (POSIX).  */
-  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
-  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
-  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
-  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
-  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
-  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
-  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
-  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
-  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
-  "INFO",       SIGINFO,        /* Information request.  */
-  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
-  "USR2",       SIGUSR2         /* User-defined signal 2 (POSIX).  */
-  };
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  /* find and return the named signal's number */
-
-  for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-
-  return -1;
-
-JVM_END
-
-// used by os::exception_name()
-extern bool signal_name(int signo, char* buf, size_t len) {
-  for(uint i = 0; i < ARRAY_SIZE(siglabels); i++) {
-    if (signo == siglabels[i].number) {
-      jio_snprintf(buf, len, "SIG%s", siglabels[i].name);
-      return true;
-    }
-  }
-  return false;
-}
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -674,7 +674,7 @@
   int pid = os::current_process_id();
   alloca(((pid ^ counter++) & 7) * 128);
 
-  ThreadLocalStorage::set_thread(thread);
+  thread->initialize_thread_current();
 
   OSThread* osthread = thread->osthread();
   Monitor* sync = osthread->startThread_lock();
@@ -882,44 +882,6 @@
   delete osthread;
 }
 
-//////////////////////////////////////////////////////////////////////////////
-// thread local storage
-
-// Restore the thread pointer if the destructor is called. This is in case
-// someone from JNI code sets up a destructor with pthread_key_create to run
-// detachCurrentThread on thread death. Unless we restore the thread pointer we
-// will hang or crash. When detachCurrentThread is called the key will be set
-// to null and we will not be called again. If detachCurrentThread is never
-// called we could loop forever depending on the pthread implementation.
-static void restore_thread_pointer(void* p) {
-  Thread* thread = (Thread*) p;
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
-
-int os::allocate_thread_local_storage() {
-  pthread_key_t key;
-  int rslt = pthread_key_create(&key, restore_thread_pointer);
-  assert(rslt == 0, "cannot allocate thread local storage");
-  return (int)key;
-}
-
-// Note: This is currently not used by VM, as we don't destroy TLS key
-// on VM exit.
-void os::free_thread_local_storage(int index) {
-  int rslt = pthread_key_delete((pthread_key_t)index);
-  assert(rslt == 0, "invalid index");
-}
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  int rslt = pthread_setspecific((pthread_key_t)index, value);
-  assert(rslt == 0, "pthread_setspecific failed");
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
-
-
 ////////////////////////////////////////////////////////////////////////////////
 // time support
 
@@ -3435,20 +3397,6 @@
 extern void report_error(char* file_name, int line_no, char* title,
                          char* format, ...);
 
-extern bool signal_name(int signo, char* buf, size_t len);
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (!signal_name(exception_code, buf, size)) {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // this is called _before_ the most of global arguments have been parsed
 void os::init(void) {
   char dummy;   // used to get a guess on initial stack address
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,10 +34,6 @@
 #include <sys/poll.h>
 #include <netdb.h>
 
-inline void* os::thread_local_storage_at(int index) {
-  return pthread_getspecific((pthread_key_t)index);
-}
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/hotspot/src/os/bsd/vm/thread_bsd.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_BSD_VM_THREAD_BSD_INLINE_HPP
-#define OS_BSD_VM_THREAD_BSD_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-#endif // OS_BSD_VM_THREAD_BSD_INLINE_HPP
--- a/hotspot/src/os/linux/vm/jvm_linux.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/linux/vm/jvm_linux.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -108,91 +108,3 @@
   return JNI_TRUE;
 JVM_END
 
-/*
-  All the defined signal names for Linux.
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  STOP, and Linux simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-    HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
-    CLD, CHLD, CONT, TSTP, TTIN, TTOU, URG, XCPU, XFSZ, VTALRM, PROF,
-    WINCH, POLL, IO, PWR, SYS
-
-*/
-
-struct siglabel {
-  const char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] = {
-  /* derived from /usr/include/bits/signum.h on RH7.2 */
-   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
-  "INT",        SIGINT,         /* Interrupt (ANSI).  */
-  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
-  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
-  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
-  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
-  "IOT",        SIGIOT,         /* IOT trap (4.2 BSD).  */
-  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
-  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
-  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
-  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
-  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
-  "USR2",       SIGUSR2,        /* User-defined signal 2 (POSIX).  */
-  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
-  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
-  "TERM",       SIGTERM,        /* Termination (ANSI).  */
-#ifdef SIGSTKFLT
-  "STKFLT",     SIGSTKFLT,      /* Stack fault.  */
-#endif
-  "CLD",        SIGCLD,         /* Same as SIGCHLD (System V).  */
-  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
-  "CONT",       SIGCONT,        /* Continue (POSIX).  */
-  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
-  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
-  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
-  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
-  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
-  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
-  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
-  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
-  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
-  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
-  "POLL",       SIGPOLL,        /* Pollable event occurred (System V).  */
-  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
-  "PWR",        SIGPWR,         /* Power failure restart (System V).  */
-#ifdef SIGSYS
-  "SYS",        SIGSYS          /* Bad system call. Only on some Linuxen! */
-#endif
-  };
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  /* find and return the named signal's number */
-
-  for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-
-  return -1;
-
-JVM_END
-
-// used by os::exception_name()
-extern bool signal_name(int signo, char* buf, size_t len) {
-  for(uint i = 0; i < ARRAY_SIZE(siglabels); i++) {
-    if (signo == siglabels[i].number) {
-      jio_snprintf(buf, len, "SIG%s", siglabels[i].name);
-      return true;
-    }
-  }
-  return false;
-}
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -646,7 +646,7 @@
   int pid = os::current_process_id();
   alloca(((pid ^ counter++) & 7) * 128);
 
-  ThreadLocalStorage::set_thread(thread);
+  thread->initialize_thread_current();
 
   OSThread* osthread = thread->osthread();
   Monitor* sync = osthread->startThread_lock();
@@ -874,43 +874,6 @@
 }
 
 //////////////////////////////////////////////////////////////////////////////
-// thread local storage
-
-// Restore the thread pointer if the destructor is called. This is in case
-// someone from JNI code sets up a destructor with pthread_key_create to run
-// detachCurrentThread on thread death. Unless we restore the thread pointer we
-// will hang or crash. When detachCurrentThread is called the key will be set
-// to null and we will not be called again. If detachCurrentThread is never
-// called we could loop forever depending on the pthread implementation.
-static void restore_thread_pointer(void* p) {
-  Thread* thread = (Thread*) p;
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
-
-int os::allocate_thread_local_storage() {
-  pthread_key_t key;
-  int rslt = pthread_key_create(&key, restore_thread_pointer);
-  assert(rslt == 0, "cannot allocate thread local storage");
-  return (int)key;
-}
-
-// Note: This is currently not used by VM, as we don't destroy TLS key
-// on VM exit.
-void os::free_thread_local_storage(int index) {
-  int rslt = pthread_key_delete((pthread_key_t)index);
-  assert(rslt == 0, "invalid index");
-}
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  int rslt = pthread_setspecific((pthread_key_t)index, value);
-  assert(rslt == 0, "pthread_setspecific failed");
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
-
-//////////////////////////////////////////////////////////////////////////////
 // initial thread
 
 // Check if current thread is the initial thread, similar to Solaris thr_main.
@@ -4585,20 +4548,6 @@
 extern void report_error(char* file_name, int line_no, char* title,
                          char* format, ...);
 
-extern bool signal_name(int signo, char* buf, size_t len);
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (!signal_name(exception_code, buf, size)) {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // this is called _before_ the most of global arguments have been parsed
 void os::init(void) {
   char dummy;   // used to get a guess on initial stack address
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,10 +34,6 @@
 #include <sys/poll.h>
 #include <netdb.h>
 
-inline void* os::thread_local_storage_at(int index) {
-  return pthread_getspecific((pthread_key_t)index);
-}
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/hotspot/src/os/linux/vm/thread_linux.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
-#define OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-#endif // OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -493,166 +493,171 @@
   return interrupted;
 }
 
-// Returned string is a constant. For unknown signals "UNKNOWN" is returned.
-const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
+
 
-  static const struct {
-    int sig; const char* name;
-  }
-  info[] =
+static const struct {
+  int sig; const char* name;
+}
+ g_signal_info[] =
   {
-    {  SIGABRT,     "SIGABRT" },
+  {  SIGABRT,     "SIGABRT" },
 #ifdef SIGAIO
-    {  SIGAIO,      "SIGAIO" },
+  {  SIGAIO,      "SIGAIO" },
 #endif
-    {  SIGALRM,     "SIGALRM" },
+  {  SIGALRM,     "SIGALRM" },
 #ifdef SIGALRM1
-    {  SIGALRM1,    "SIGALRM1" },
+  {  SIGALRM1,    "SIGALRM1" },
 #endif
-    {  SIGBUS,      "SIGBUS" },
+  {  SIGBUS,      "SIGBUS" },
 #ifdef SIGCANCEL
-    {  SIGCANCEL,   "SIGCANCEL" },
+  {  SIGCANCEL,   "SIGCANCEL" },
 #endif
-    {  SIGCHLD,     "SIGCHLD" },
+  {  SIGCHLD,     "SIGCHLD" },
 #ifdef SIGCLD
-    {  SIGCLD,      "SIGCLD" },
+  {  SIGCLD,      "SIGCLD" },
 #endif
-    {  SIGCONT,     "SIGCONT" },
+  {  SIGCONT,     "SIGCONT" },
 #ifdef SIGCPUFAIL
-    {  SIGCPUFAIL,  "SIGCPUFAIL" },
+  {  SIGCPUFAIL,  "SIGCPUFAIL" },
 #endif
 #ifdef SIGDANGER
-    {  SIGDANGER,   "SIGDANGER" },
+  {  SIGDANGER,   "SIGDANGER" },
 #endif
 #ifdef SIGDIL
-    {  SIGDIL,      "SIGDIL" },
+  {  SIGDIL,      "SIGDIL" },
 #endif
 #ifdef SIGEMT
-    {  SIGEMT,      "SIGEMT" },
+  {  SIGEMT,      "SIGEMT" },
 #endif
-    {  SIGFPE,      "SIGFPE" },
+  {  SIGFPE,      "SIGFPE" },
 #ifdef SIGFREEZE
-    {  SIGFREEZE,   "SIGFREEZE" },
+  {  SIGFREEZE,   "SIGFREEZE" },
 #endif
 #ifdef SIGGFAULT
-    {  SIGGFAULT,   "SIGGFAULT" },
+  {  SIGGFAULT,   "SIGGFAULT" },
 #endif
 #ifdef SIGGRANT
-    {  SIGGRANT,    "SIGGRANT" },
+  {  SIGGRANT,    "SIGGRANT" },
 #endif
-    {  SIGHUP,      "SIGHUP" },
-    {  SIGILL,      "SIGILL" },
-    {  SIGINT,      "SIGINT" },
+  {  SIGHUP,      "SIGHUP" },
+  {  SIGILL,      "SIGILL" },
+  {  SIGINT,      "SIGINT" },
 #ifdef SIGIO
-    {  SIGIO,       "SIGIO" },
+  {  SIGIO,       "SIGIO" },
 #endif
 #ifdef SIGIOINT
-    {  SIGIOINT,    "SIGIOINT" },
+  {  SIGIOINT,    "SIGIOINT" },
 #endif
 #ifdef SIGIOT
-  // SIGIOT is there for BSD compatibility, but on most Unices just a
-  // synonym for SIGABRT. The result should be "SIGABRT", not
-  // "SIGIOT".
-  #if (SIGIOT != SIGABRT )
-    {  SIGIOT,      "SIGIOT" },
-  #endif
+// SIGIOT is there for BSD compatibility, but on most Unices just a
+// synonym for SIGABRT. The result should be "SIGABRT", not
+// "SIGIOT".
+#if (SIGIOT != SIGABRT )
+  {  SIGIOT,      "SIGIOT" },
+#endif
 #endif
 #ifdef SIGKAP
-    {  SIGKAP,      "SIGKAP" },
+  {  SIGKAP,      "SIGKAP" },
 #endif
-    {  SIGKILL,     "SIGKILL" },
+  {  SIGKILL,     "SIGKILL" },
 #ifdef SIGLOST
-    {  SIGLOST,     "SIGLOST" },
+  {  SIGLOST,     "SIGLOST" },
 #endif
 #ifdef SIGLWP
-    {  SIGLWP,      "SIGLWP" },
+  {  SIGLWP,      "SIGLWP" },
 #endif
 #ifdef SIGLWPTIMER
-    {  SIGLWPTIMER, "SIGLWPTIMER" },
+  {  SIGLWPTIMER, "SIGLWPTIMER" },
 #endif
 #ifdef SIGMIGRATE
-    {  SIGMIGRATE,  "SIGMIGRATE" },
+  {  SIGMIGRATE,  "SIGMIGRATE" },
 #endif
 #ifdef SIGMSG
-    {  SIGMSG,      "SIGMSG" },
+  {  SIGMSG,      "SIGMSG" },
 #endif
-    {  SIGPIPE,     "SIGPIPE" },
+  {  SIGPIPE,     "SIGPIPE" },
 #ifdef SIGPOLL
-    {  SIGPOLL,     "SIGPOLL" },
+  {  SIGPOLL,     "SIGPOLL" },
 #endif
 #ifdef SIGPRE
-    {  SIGPRE,      "SIGPRE" },
+  {  SIGPRE,      "SIGPRE" },
 #endif
-    {  SIGPROF,     "SIGPROF" },
+  {  SIGPROF,     "SIGPROF" },
 #ifdef SIGPTY
-    {  SIGPTY,      "SIGPTY" },
+  {  SIGPTY,      "SIGPTY" },
 #endif
 #ifdef SIGPWR
-    {  SIGPWR,      "SIGPWR" },
+  {  SIGPWR,      "SIGPWR" },
 #endif
-    {  SIGQUIT,     "SIGQUIT" },
+  {  SIGQUIT,     "SIGQUIT" },
 #ifdef SIGRECONFIG
-    {  SIGRECONFIG, "SIGRECONFIG" },
+  {  SIGRECONFIG, "SIGRECONFIG" },
 #endif
 #ifdef SIGRECOVERY
-    {  SIGRECOVERY, "SIGRECOVERY" },
+  {  SIGRECOVERY, "SIGRECOVERY" },
 #endif
 #ifdef SIGRESERVE
-    {  SIGRESERVE,  "SIGRESERVE" },
+  {  SIGRESERVE,  "SIGRESERVE" },
 #endif
 #ifdef SIGRETRACT
-    {  SIGRETRACT,  "SIGRETRACT" },
+  {  SIGRETRACT,  "SIGRETRACT" },
 #endif
 #ifdef SIGSAK
-    {  SIGSAK,      "SIGSAK" },
+  {  SIGSAK,      "SIGSAK" },
 #endif
-    {  SIGSEGV,     "SIGSEGV" },
+  {  SIGSEGV,     "SIGSEGV" },
 #ifdef SIGSOUND
-    {  SIGSOUND,    "SIGSOUND" },
+  {  SIGSOUND,    "SIGSOUND" },
+#endif
+#ifdef SIGSTKFLT
+  {  SIGSTKFLT,    "SIGSTKFLT" },
 #endif
-    {  SIGSTOP,     "SIGSTOP" },
-    {  SIGSYS,      "SIGSYS" },
+  {  SIGSTOP,     "SIGSTOP" },
+  {  SIGSYS,      "SIGSYS" },
 #ifdef SIGSYSERROR
-    {  SIGSYSERROR, "SIGSYSERROR" },
+  {  SIGSYSERROR, "SIGSYSERROR" },
 #endif
 #ifdef SIGTALRM
-    {  SIGTALRM,    "SIGTALRM" },
+  {  SIGTALRM,    "SIGTALRM" },
 #endif
-    {  SIGTERM,     "SIGTERM" },
+  {  SIGTERM,     "SIGTERM" },
 #ifdef SIGTHAW
-    {  SIGTHAW,     "SIGTHAW" },
+  {  SIGTHAW,     "SIGTHAW" },
 #endif
-    {  SIGTRAP,     "SIGTRAP" },
+  {  SIGTRAP,     "SIGTRAP" },
 #ifdef SIGTSTP
-    {  SIGTSTP,     "SIGTSTP" },
+  {  SIGTSTP,     "SIGTSTP" },
 #endif
-    {  SIGTTIN,     "SIGTTIN" },
-    {  SIGTTOU,     "SIGTTOU" },
+  {  SIGTTIN,     "SIGTTIN" },
+  {  SIGTTOU,     "SIGTTOU" },
 #ifdef SIGURG
-    {  SIGURG,      "SIGURG" },
+  {  SIGURG,      "SIGURG" },
 #endif
-    {  SIGUSR1,     "SIGUSR1" },
-    {  SIGUSR2,     "SIGUSR2" },
+  {  SIGUSR1,     "SIGUSR1" },
+  {  SIGUSR2,     "SIGUSR2" },
 #ifdef SIGVIRT
-    {  SIGVIRT,     "SIGVIRT" },
+  {  SIGVIRT,     "SIGVIRT" },
 #endif
-    {  SIGVTALRM,   "SIGVTALRM" },
+  {  SIGVTALRM,   "SIGVTALRM" },
 #ifdef SIGWAITING
-    {  SIGWAITING,  "SIGWAITING" },
+  {  SIGWAITING,  "SIGWAITING" },
 #endif
 #ifdef SIGWINCH
-    {  SIGWINCH,    "SIGWINCH" },
+  {  SIGWINCH,    "SIGWINCH" },
 #endif
 #ifdef SIGWINDOW
-    {  SIGWINDOW,   "SIGWINDOW" },
+  {  SIGWINDOW,   "SIGWINDOW" },
 #endif
-    {  SIGXCPU,     "SIGXCPU" },
-    {  SIGXFSZ,     "SIGXFSZ" },
+  {  SIGXCPU,     "SIGXCPU" },
+  {  SIGXFSZ,     "SIGXFSZ" },
 #ifdef SIGXRES
-    {  SIGXRES,     "SIGXRES" },
+  {  SIGXRES,     "SIGXRES" },
 #endif
-    { -1, NULL }
-  };
+  { -1, NULL }
+};
+
+// Returned string is a constant. For unknown signals "UNKNOWN" is returned.
+const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
 
   const char* ret = NULL;
 
@@ -670,9 +675,9 @@
 #endif
 
   if (sig > 0) {
-    for (int idx = 0; info[idx].sig != -1; idx ++) {
-      if (info[idx].sig == sig) {
-        ret = info[idx].name;
+    for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
+      if (g_signal_info[idx].sig == sig) {
+        ret = g_signal_info[idx].name;
         break;
       }
     }
@@ -693,6 +698,25 @@
   return out;
 }
 
+int os::Posix::get_signal_number(const char* signal_name) {
+  char tmp[30];
+  const char* s = signal_name;
+  if (s[0] != 'S' || s[1] != 'I' || s[2] != 'G') {
+    jio_snprintf(tmp, sizeof(tmp), "SIG%s", signal_name);
+    s = tmp;
+  }
+  for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
+    if (strcmp(g_signal_info[idx].name, s) == 0) {
+      return g_signal_info[idx].sig;
+    }
+  }
+  return -1;
+}
+
+int os::get_signal_number(const char* signal_name) {
+  return os::Posix::get_signal_number(signal_name);
+}
+
 // Returns true if signal number is valid.
 bool os::Posix::is_valid_signal(int sig) {
   // MacOS not really POSIX compliant: sigaddset does not return
@@ -711,6 +735,21 @@
 #endif
 }
 
+// Returns:
+// "invalid (<num>)" for an invalid signal number
+// "SIG<num>" for a valid but unknown signal number
+// signal name otherwise.
+const char* os::exception_name(int sig, char* buf, size_t size) {
+  if (!os::Posix::is_valid_signal(sig)) {
+    jio_snprintf(buf, size, "invalid (%d)", sig);
+  }
+  const char* const name = os::Posix::get_signal_name(sig, buf, size);
+  if (strcmp(name, "UNKNOWN") == 0) {
+    jio_snprintf(buf, size, "SIG%d", sig);
+  }
+  return buf;
+}
+
 #define NUM_IMPORTANT_SIGS 32
 // Returns one-line short description of a signal set in a user provided buffer.
 const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) {
--- a/hotspot/src/os/posix/vm/os_posix.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/posix/vm/os_posix.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -51,6 +51,12 @@
   // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
   static const char* get_signal_name(int sig, char* out, size_t outlen);
 
+  // Helper function, returns a signal number for a given signal name, e.g. 11
+  // for "SIGSEGV". Name can be given with or without "SIG" prefix, so both
+  // "SEGV" or "SIGSEGV" work. Name must be uppercase.
+  // Returns -1 for an unknown signal name.
+  static int get_signal_number(const char* signal_name);
+
   // Returns one-line short description of a signal set in a user provided buffer.
   static const char* describe_signal_set_short(const sigset_t* set, char* buffer, size_t size);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os/posix/vm/threadLocalStorage_posix.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "runtime/threadLocalStorage.hpp"
+#include <pthread.h>
+
+static pthread_key_t _thread_key;
+static bool _initialized = false;
+
+// Restore the thread pointer if the destructor is called. This is in case
+// someone from JNI code sets up a destructor with pthread_key_create to run
+// detachCurrentThread on thread death. Unless we restore the thread pointer we
+// will hang or crash. When detachCurrentThread is called the key will be set
+// to null and we will not be called again. If detachCurrentThread is never
+// called we could loop forever depending on the pthread implementation.
+extern "C" void restore_thread_pointer(void* p) {
+  ThreadLocalStorage::set_thread((Thread*) p);
+}
+
+void ThreadLocalStorage::init() {
+  assert(!_initialized, "initializing TLS more than once!");
+  int rslt = pthread_key_create(&_thread_key, restore_thread_pointer);
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file
+  assert_status(rslt == 0, rslt, "pthread_key_create");
+  _initialized = true;
+}
+
+bool ThreadLocalStorage::is_initialized() {
+  return _initialized;
+}
+
+Thread* ThreadLocalStorage::thread() {
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file.
+  // Which most likely indicates we have taken an error path early in
+  // the initialization process, which is using Thread::current without
+  // checking TLS is initialized - see java.cpp vm_exit
+  assert(_initialized, "TLS not initialized yet!");
+  return (Thread*) pthread_getspecific(_thread_key); // may be NULL
+}
+
+void ThreadLocalStorage::set_thread(Thread* current) {
+  assert(_initialized, "TLS not initialized yet!");
+  int rslt = pthread_setspecific(_thread_key, current);
+  assert_status(rslt == 0, rslt, "pthread_setspecific");
+}
--- a/hotspot/src/os/solaris/vm/jvm_solaris.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/solaris/vm/jvm_solaris.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -106,40 +106,3 @@
   return JNI_TRUE;
 JVM_END
 
-
-/*
-  All the defined signal names for Solaris are defined by str2sig().
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  CANCEL, and Solaris simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-      HUP, INT, TRAP, IOT, ABRT, EMT, BUS, SYS, PIPE, ALRM, TERM, USR2,
-      CLD, CHLD, PWR, WINCH, URG, POLL, IO, TSTP, CONT, TTIN, TTOU, VTALRM,
-      PROF, XCPU, XFSZ, FREEZE, THAW, LOST
-*/
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  int sig;
-
-  /* return the named signal's number */
-
-  if(str2sig(name, &sig))
-    return -1;
-  else
-    return sig;
-
-JVM_END
-
-
-//Reconciliation History
-// 1.4 98/10/07 13:39:41 jvm_win32.cpp
-// 1.6 99/06/22 16:39:00 jvm_win32.cpp
-//End
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -728,6 +728,9 @@
 
   int prio;
   Thread* thread = (Thread*)thread_addr;
+
+  thread->initialize_thread_current();
+
   OSThread* osthr = thread->osthread();
 
   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
@@ -4144,32 +4147,6 @@
 void report_error(const char* file_name, int line_no, const char* title,
                   const char* format, ...);
 
-const char * signames[] = {
-  "SIG0",
-  "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
-  "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
-  "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
-  "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
-  "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
-  "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
-  "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
-  "SIGCANCEL", "SIGLOST"
-};
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (exception_code < sizeof(signames)/sizeof(const char*)) {
-      jio_snprintf(buf, size, "%s", signames[exception_code]);
-    } else {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // (Static) wrapper for getisax(2) call.
 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
 
@@ -5605,7 +5582,7 @@
 
   // fork is async-safe, fork1 is not so can't use in signal handler
   pid_t pid;
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
   if (t != NULL && t->is_inside_signal_handler()) {
     pid = fork();
   } else {
--- a/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
-#define OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
-// startup.
-// ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
-// period.   Thread::current() now calls ThreadLocalStorage::thread() directly.
-// For SPARC, to avoid excessive register window spill-fill faults,
-// we aggressively inline these routines.
-
-inline void ThreadLocalStorage::set_thread(Thread* thread) {
-  _thr_current = thread;
-}
-
-inline Thread* ThreadLocalStorage::thread()  {
-  return _thr_current;
-}
-
-#endif // OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
--- a/hotspot/src/os/windows/vm/jvm_windows.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/windows/vm/jvm_windows.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -89,39 +89,3 @@
 JVM_END
 
 
-/*
-  All the defined signal names for Windows.
-
-  NOTE that not all of these names are accepted by FindSignal!
-
-  For various reasons some of these may be rejected at runtime.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-     (LIST TBD)
-
-*/
-struct siglabel {
-  char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] =
-  /* derived from version 6.0 VC98/include/signal.h */
-  {"ABRT",      SIGABRT,        /* abnormal termination triggered by abort cl */
-  "FPE",        SIGFPE,         /* floating point exception */
-  "SEGV",       SIGSEGV,        /* segment violation */
-  "INT",        SIGINT,         /* interrupt */
-  "TERM",       SIGTERM,        /* software term signal from kill */
-  "BREAK",      SIGBREAK,       /* Ctrl-Break sequence */
-  "ILL",        SIGILL};        /* illegal instruction */
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-  /* find and return the named signal's number */
-
-  for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-  return -1;
-JVM_END
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -419,6 +419,8 @@
   int pid = os::current_process_id();
   _alloca(((pid ^ counter++) & 7) * 128);
 
+  thread->initialize_thread_current();
+
   OSThread* osthr = thread->osthread();
   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 
@@ -1799,24 +1801,32 @@
 void os::print_siginfo(outputStream *st, void *siginfo) {
   EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
   st->print("siginfo:");
-  st->print(" ExceptionCode=0x%x", er->ExceptionCode);
-
-  if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
-      er->NumberParameters >= 2) {
+
+  char tmp[64];
+  if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
+    strcpy(tmp, "EXCEPTION_??");
+  }
+  st->print(" %s (0x%x)", tmp, er->ExceptionCode);
+
+  if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
+       er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
+       er->NumberParameters >= 2) {
     switch (er->ExceptionInformation[0]) {
     case 0: st->print(", reading address"); break;
     case 1: st->print(", writing address"); break;
+    case 8: st->print(", data execution prevention violation at address"); break;
     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
                        er->ExceptionInformation[0]);
     }
     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
-  } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
-             er->NumberParameters >= 2 && UseSharedSpaces) {
-    FileMapInfo* mapinfo = FileMapInfo::current_info();
-    if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
-      st->print("\n\nError accessing class data sharing archive."       \
-                " Mapped file inaccessible during execution, "          \
-                " possible disk/network problem.");
+
+    if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && UseSharedSpaces) {
+      FileMapInfo* mapinfo = FileMapInfo::current_info();
+      if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
+        st->print("\n\nError accessing class data sharing archive."       \
+                  " Mapped file inaccessible during execution, "          \
+                  " possible disk/network problem.");
+      }
     }
   } else {
     int num = er->NumberParameters;
@@ -2146,7 +2156,7 @@
 
 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
                       address handler) {
-  JavaThread* thread = JavaThread::current();
+    JavaThread* thread = (JavaThread*) Thread::current_or_null();
   // Save pc in thread
 #ifdef _M_IA64
   // Do not blow up if no thread info available.
@@ -2384,7 +2394,7 @@
   address pc = (address) exceptionInfo->ContextRecord->Eip;
   #endif
 #endif
-  Thread* t = ThreadLocalStorage::get_thread_slow();          // slow & steady
+  Thread* t = Thread::current_or_null_safe();
 
   // Handle SafeFetch32 and SafeFetchN exceptions.
   if (StubRoutines::is_safefetch_fault(pc)) {
@@ -4011,27 +4021,6 @@
   return result == IDYES;
 }
 
-int os::allocate_thread_local_storage() {
-  return TlsAlloc();
-}
-
-
-void os::free_thread_local_storage(int index) {
-  TlsFree(index);
-}
-
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  TlsSetValue(index, value);
-  assert(thread_local_storage_at(index) == value, "Just checking");
-}
-
-
-void* os::thread_local_storage_at(int index) {
-  return TlsGetValue(index);
-}
-
-
 #ifndef PRODUCT
 #ifndef _WIN64
 // Helpers to check whether NX protection is enabled
@@ -4079,6 +4068,9 @@
     fatal("DuplicateHandle failed\n");
   }
   main_thread_id = (int) GetCurrentThreadId();
+
+  // initialize fast thread access - only used for 32-bit
+  win32::initialize_thread_ptr_offset();
 }
 
 // To install functions for atexit processing
@@ -5177,9 +5169,7 @@
     }
   }
 
-  JavaThread* thread = (JavaThread*)(Thread::current());
-  assert(thread->is_Java_thread(), "Must be JavaThread");
-  JavaThread *jt = (JavaThread *)thread;
+  JavaThread* thread = JavaThread::current();
 
   // Don't wait if interrupted or already triggered
   if (Thread::is_interrupted(thread, false) ||
@@ -5187,16 +5177,16 @@
     ResetEvent(_ParkEvent);
     return;
   } else {
-    ThreadBlockInVM tbivm(jt);
+    ThreadBlockInVM tbivm(thread);
     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
-    jt->set_suspend_equivalent();
+    thread->set_suspend_equivalent();
 
     WaitForSingleObject(_ParkEvent, time);
     ResetEvent(_ParkEvent);
 
     // If externally suspended while waiting, re-suspend
-    if (jt->handle_special_suspend_equivalent_condition()) {
-      jt->java_suspend_self();
+    if (thread->handle_special_suspend_equivalent_condition()) {
+      thread->java_suspend_self();
     }
   }
 }
@@ -5299,7 +5289,7 @@
   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
 
   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
-    JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
+    JavaThread* thread = JavaThread::current();
     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
     address addr = (address) exceptionRecord->ExceptionInformation[1];
 
@@ -6033,3 +6023,48 @@
   UseNUMAInterleaving = old_use_numa_interleaving;
 }
 #endif // PRODUCT
+
+/*
+  All the defined signal names for Windows.
+
+  NOTE that not all of these names are accepted by FindSignal!
+
+  For various reasons some of these may be rejected at runtime.
+
+  Here are the names currently accepted by a user of sun.misc.Signal with
+  1.4.1 (ignoring potential interaction with use of chaining, etc):
+
+     (LIST TBD)
+
+*/
+int os::get_signal_number(const char* name) {
+  static const struct {
+    char* name;
+    int   number;
+  } siglabels [] =
+    // derived from version 6.0 VC98/include/signal.h
+  {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
+  "FPE",        SIGFPE,         // floating point exception
+  "SEGV",       SIGSEGV,        // segment violation
+  "INT",        SIGINT,         // interrupt
+  "TERM",       SIGTERM,        // software term signal from kill
+  "BREAK",      SIGBREAK,       // Ctrl-Break sequence
+  "ILL",        SIGILL};        // illegal instruction
+  for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++)
+    if(!strcmp(name, siglabels[i].name))
+      return siglabels[i].number;
+  return -1;
+}
+
+// Fast current thread access
+
+int os::win32::_thread_ptr_offset = 0;
+
+static void call_wrapper_dummy() {}
+
+// We need to call the os_exception_wrapper once so that it sets
+// up the offset from FS of the thread pointer.
+void os::win32::initialize_thread_ptr_offset() {
+  os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
+                           NULL, NULL, NULL, NULL);
+}
--- a/hotspot/src/os/windows/vm/os_windows.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -117,6 +117,17 @@
 
   // filter function to ignore faults on serializations page
   static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
+
+  // Fast access to current thread
+protected:
+  static int _thread_ptr_offset;
+private:
+  static void initialize_thread_ptr_offset();
+public:
+  static inline void set_thread_ptr_offset(int offset) {
+    _thread_ptr_offset = offset;
+  }
+  static inline int get_thread_ptr_offset() { return _thread_ptr_offset; }
 };
 
 /*
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os/windows/vm/threadLocalStorage_windows.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/threadLocalStorage.hpp"
+#include <windows.h>
+
+static DWORD _thread_key;
+static bool _initialized = false;
+
+
+void ThreadLocalStorage::init() {
+  assert(!_initialized, "initializing TLS more than once!");
+  _thread_key = TlsAlloc();
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file
+  assert(_thread_key != TLS_OUT_OF_INDEXES, "TlsAlloc failed: out of indices");
+  _initialized = true;
+}
+
+bool ThreadLocalStorage::is_initialized() {
+  return _initialized;
+}
+
+Thread* ThreadLocalStorage::thread() {
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file.
+  // Which most likely indicates we have taken an error path early in
+  // the initialization process, which is using Thread::current without
+  // checking TLS is initialized - see java.cpp vm_exit
+  assert(_initialized, "TLS not initialized yet!");
+  Thread* current = (Thread*) TlsGetValue(_thread_key);
+  assert(current != 0 || GetLastError() == ERROR_SUCCESS,
+         "TlsGetValue failed with error code: %lu", GetLastError());
+  return current;
+}
+
+void ThreadLocalStorage::set_thread(Thread* current) {
+  assert(_initialized, "TLS not initialized yet!");
+  BOOL res = TlsSetValue(_thread_key, current);
+  assert(res, "TlsSetValue failed with error code: %lu", GetLastError());
+}
--- a/hotspot/src/os/windows/vm/thread_windows.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
-#define OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all()            { return; }
-
-#endif // OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
--- a/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -167,7 +167,7 @@
 
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();   // slow & steady
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
--- a/hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadLocalStorage.hpp"
-#include "runtime/thread.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-  // Nothing we can do here for user-level thread.
-}
-
-void ThreadLocalStorage::pd_init() {
-  // Nothing to do.
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
-#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-public:
-  static Thread* thread() {
-    return (Thread *) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
--- a/hotspot/src/os_cpu/aix_ppc/vm/vmStructs_aix_ppc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/aix_ppc/vm/vmStructs_aix_ppc.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -35,8 +35,7 @@
   /******************************/                                                                                                   \
   /* Threads (NOTE: incomplete) */                                                                                                   \
   /******************************/                                                                                                   \
-  nonstatic_field(OSThread,                      _thread_id,                                      pid_t)                             \
-  nonstatic_field(OSThread,                      _pthread_id,                                     pthread_t)
+  nonstatic_field(OSThread,                      _thread_id,                                      pthread_t)                         \
 
 
 #define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
@@ -45,7 +44,6 @@
   /* Posix Thread IDs   */                                                \
   /**********************/                                                \
                                                                           \
-  declare_integer_type(pid_t)                                             \
   declare_unsigned_integer_type(pthread_t)
 
 #define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
--- a/hotspot/src/os_cpu/bsd_x86/vm/assembler_bsd_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/assembler_bsd_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,62 +26,7 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
 
-#ifndef _LP64
-void MacroAssembler::int3() {
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MacroAssembler::get_thread(Register thread) {
-  movl(thread, rsp);
-  shrl(thread, PAGE_SHIFT);
-
-  ExternalAddress tls_base((address)ThreadLocalStorage::sp_map_addr());
-  Address index(noreg, thread, Address::times_4);
-  ArrayAddress tls(tls_base, index);
-
-  movptr(thread, tls);
-}
-#else
 void MacroAssembler::int3() {
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 }
-
-void MacroAssembler::get_thread(Register thread) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-   if (thread != rax) {
-     push(rax);
-   }
-   push(rdi);
-   push(rsi);
-   push(rdx);
-   push(rcx);
-   push(r8);
-   push(r9);
-   push(r10);
-   // XXX
-   mov(r10, rsp);
-   andq(rsp, -16);
-   push(r10);
-   push(r11);
-
-   movl(rdi, ThreadLocalStorage::thread_index());
-   call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
-
-   pop(r11);
-   pop(rsp);
-   pop(r10);
-   pop(r9);
-   pop(r8);
-   pop(rcx);
-   pop(rdx);
-   pop(rsi);
-   pop(rdi);
-   if (thread != rax) {
-       mov(thread, rax);
-       pop(rax);
-   }
-}
-#endif
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -405,7 +405,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
--- a/hotspot/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Map stack pointer (%esp) to thread pointer for faster TLS access
-//
-// Here we use a flat table for better performance. Getting current thread
-// is down to one memory access (read _sp_map[%esp>>12]) in generated code
-// and two in runtime code (-fPIC code needs an extra load for _sp_map).
-//
-// This code assumes stack page is not shared by different threads. It works
-// in 32-bit VM when page size is 4K (or a multiple of 4K, if that matters).
-//
-// Notice that _sp_map is allocated in the bss segment, which is ZFOD
-// (zero-fill-on-demand). While it reserves 4M address space upfront,
-// actual memory pages are committed on demand.
-//
-// If an application creates and destroys a lot of threads, usually the
-// stack space freed by a thread will soon get reused by new thread
-// (this is especially true in NPTL or BsdThreads in fixed-stack mode).
-// No memory page in _sp_map is wasted.
-//
-// However, it's still possible that we might end up populating &
-// committing a large fraction of the 4M table over time, but the actual
-// amount of live data in the table could be quite small. The max wastage
-// is less than 4M bytes. If it becomes an issue, we could use madvise()
-// with MADV_DONTNEED to reclaim unused (i.e. all-zero) pages in _sp_map.
-// MADV_DONTNEED on Bsd keeps the virtual memory mapping, but zaps the
-// physical memory page (i.e. similar to MADV_FREE on Solaris).
-
-#ifndef AMD64
-Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-#ifndef AMD64
-  assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
-         "page size must be multiple of PAGE_SIZE");
-#endif // !AMD64
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-
-#ifndef AMD64
-  address stack_top = os::current_stack_base();
-  size_t stack_size = os::current_stack_size();
-
-  for (address p = stack_top - stack_size; p < stack_top; p += PAGE_SIZE) {
-    // pd_set_thread() is called with non-NULL value when a new thread is
-    // created/attached, or with NULL value when a thread is about to exit.
-    // If both "thread" and the corresponding _sp_map[] entry are non-NULL,
-    // they should have the same value. Otherwise it might indicate that the
-    // stack page is shared by multiple threads. However, a more likely cause
-    // for this assertion to fail is that an attached thread exited without
-    // detaching itself from VM, which is a program error and could cause VM
-    // to crash.
-    assert(thread == NULL || _sp_map[(uintptr_t)p >> PAGE_SHIFT] == NULL ||
-           thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT],
-           "thread exited without detaching from VM??");
-    _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
-  }
-#endif // !AMD64
-}
--- a/hotspot/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_X86_VM_THREADLS_BSD_X86_HPP
-#define OS_CPU_BSD_X86_VM_THREADLS_BSD_X86_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-#ifndef AMD64
-  // map stack pointer to thread pointer - see notes in threadLS_bsd_x86.cpp
-  #define SP_BITLENGTH  32
-#ifndef PAGE_SHIFT
-  #define PAGE_SHIFT    12
-  #define PAGE_SIZE     (1UL << PAGE_SHIFT)
-#endif
-  static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
-
-public:
-
-#ifndef AMD64
-  static Thread** sp_map_addr() { return _sp_map; }
-#endif // !AMD64
-
-  static Thread* thread() {
-#ifdef AMD64
-    return (Thread*) os::thread_local_storage_at(thread_index());
-#else
-    uintptr_t sp;
-    __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
-    return _sp_map[sp >> PAGE_SHIFT];
-#endif // AMD64
-  }
-
-#endif // OS_CPU_BSD_X86_VM_THREADLS_BSD_X86_HPP
--- a/hotspot/src/os_cpu/bsd_zero/vm/assembler_bsd_zero.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/bsd_zero/vm/assembler_bsd_zero.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -23,10 +23,4 @@
  *
  */
 
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_zero.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
 // This file is intentionally empty
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -134,7 +134,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
--- a/hotspot/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_init() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/hotspot/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_ZERO_VM_THREADLS_BSD_ZERO_HPP
-#define OS_CPU_BSD_ZERO_VM_THREADLS_BSD_ZERO_HPP
-
-// Processor dependent parts of ThreadLocalStorage
-
- public:
-  static Thread* thread() {
-    return (Thread*) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_BSD_ZERO_VM_THREADLS_BSD_ZERO_HPP
--- a/hotspot/src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -23,32 +23,6 @@
  *
  */
 
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
+// nothing required here
 
 
-// get_thread can be called anywhere inside generated code so we need
-// to save whatever non-callee save context might get clobbered by the
-// call to the C thread_local lookup call or, indeed, the call setup
-// code. x86 appears to save C arg registers.
-
-void MacroAssembler::get_thread(Register dst) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-
-  // Save all call-clobbered regs except dst, plus r19 and r20.
-  RegSet saved_regs = RegSet::range(r0, r20) + lr - dst;
-  push(saved_regs, sp);
-  mov(c_rarg0, ThreadLocalStorage::thread_index());
-  mov(r19, CAST_FROM_FN_PTR(address, pthread_getspecific));
-  blrt(r19, 1, 0, 1);
-  if (dst != c_rarg0) {
-    mov(dst, c_rarg0);
-  }
-  // restore pushed registers
-  pop(saved_regs, sp);
-}
-
--- a/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -249,7 +249,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
--- a/hotspot/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadLocalStorage.hpp"
-#include "runtime/thread.inline.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-}
-
-__thread Thread *aarch64_currentThread;
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-  aarch64_currentThread = thread;
-}
--- a/hotspot/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_AARCH64_VM_THREADLS_LINUX_AARCH64_HPP
-#define OS_CPU_LINUX_AARCH64_VM_THREADLS_LINUX_AARCH64_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-public:
-
-  static Thread *thread() {
-    return aarch64_currentThread;
-  }
-
-#endif // OS_CPU_LINUX_AARCH64_VM_THREADLS_LINUX_AARCH64_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.s	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,44 @@
+// Copyright (c) 2015, Red Hat Inc. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+
+        // JavaThread::aarch64_get_thread_helper()
+        //
+        // Return the current thread pointer in x0.
+        // Clobber x1, flags.
+        // All other registers are preserved,
+
+	.global	_ZN10JavaThread25aarch64_get_thread_helperEv
+	.type	_ZN10JavaThread25aarch64_get_thread_helperEv, %function
+
+_ZN10JavaThread25aarch64_get_thread_helperEv:
+	stp x29, x30, [sp, -16]!
+	adrp x0, :tlsdesc:_ZN6Thread12_thr_currentE
+	ldr x1, [x0, #:tlsdesc_lo12:_ZN6Thread12_thr_currentE]
+	add x0, x0, :tlsdesc_lo12:_ZN6Thread12_thr_currentE
+	.tlsdesccall _ZN6Thread12_thr_currentE
+	blr x1
+	mrs x1, tpidr_el0
+	add x0, x1, x0
+	ldr x0, [x0]
+	ldp x29, x30, [sp], 16
+	ret
+
+	.size _ZN10JavaThread25aarch64_get_thread_helperEv, .-_ZN10JavaThread25aarch64_get_thread_helperEv
--- a/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -77,6 +77,8 @@
   bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
 public:
 
+  static Thread *aarch64_get_thread_helper();
+
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
   static bool register_stack_overflow() { return false; }
--- a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -182,7 +182,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
--- a/hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-  // Nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
-#define OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-public:
-  static Thread* thread() {
-    return (Thread *) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -541,7 +541,7 @@
   ucontext_t* ucFake = (ucontext_t*) ucVoid;
   sigcontext* uc = (sigcontext*)ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
--- a/hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-}
-
-void ThreadLocalStorage::pd_init() {
-   // Nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_SPARC_VM_THREADLS_LINUX_SPARC_HPP
-#define OS_CPU_LINUX_SPARC_VM_THREADLS_LINUX_SPARC_HPP
-
-public:
-  static Thread* thread() {
-    return (Thread*) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_LINUX_SPARC_VM_THREADLS_LINUX_SPARC_HPP
--- a/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,85 +26,7 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
 
-#ifndef _LP64
 void MacroAssembler::int3() {
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 }
-
-#ifdef MINIMIZE_RAM_USAGE
-
-void MacroAssembler::get_thread(Register thread) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-  if (thread != rax) push(rax);
-  push(rcx);
-  push(rdx);
-
-  push(ThreadLocalStorage::thread_index());
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
-  increment(rsp, wordSize);
-
-  pop(rdx);
-  pop(rcx);
-  if (thread != rax) {
-    mov(thread, rax);
-    pop(rax);
-  }
-}
-
-#else
-void MacroAssembler::get_thread(Register thread) {
-  movl(thread, rsp);
-  shrl(thread, PAGE_SHIFT);
-
-  ExternalAddress tls_base((address)ThreadLocalStorage::sp_map_addr());
-  Address index(noreg, thread, Address::times_4);
-  ArrayAddress tls(tls_base, index);
-
-  movptr(thread, tls);
-}
-#endif // MINIMIZE_RAM_USAGE
-#else
-void MacroAssembler::int3() {
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MacroAssembler::get_thread(Register thread) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-   if (thread != rax) {
-     push(rax);
-   }
-   push(rdi);
-   push(rsi);
-   push(rdx);
-   push(rcx);
-   push(r8);
-   push(r9);
-   push(r10);
-   // XXX
-   mov(r10, rsp);
-   andq(rsp, -16);
-   push(r10);
-   push(r11);
-
-   movl(rdi, ThreadLocalStorage::thread_index());
-   call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
-
-   pop(r11);
-   pop(rsp);
-   pop(r10);
-   pop(r9);
-   pop(r8);
-   pop(rcx);
-   pop(rdx);
-   pop(rsi);
-   pop(rdi);
-   if (thread != rax) {
-       mov(thread, rax);
-       pop(rax);
-   }
-}
-#endif
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -221,7 +221,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
--- a/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Map stack pointer (%esp) to thread pointer for faster TLS access
-//
-// Here we use a flat table for better performance. Getting current thread
-// is down to one memory access (read _sp_map[%esp>>12]) in generated code
-// and two in runtime code (-fPIC code needs an extra load for _sp_map).
-//
-// This code assumes stack page is not shared by different threads. It works
-// in 32-bit VM when page size is 4K (or a multiple of 4K, if that matters).
-//
-// Notice that _sp_map is allocated in the bss segment, which is ZFOD
-// (zero-fill-on-demand). While it reserves 4M address space upfront,
-// actual memory pages are committed on demand.
-//
-// If an application creates and destroys a lot of threads, usually the
-// stack space freed by a thread will soon get reused by new thread.
-// No memory page in _sp_map is wasted.
-//
-// However, it's still possible that we might end up populating &
-// committing a large fraction of the 4M table over time, but the actual
-// amount of live data in the table could be quite small. The max wastage
-// is less than 4M bytes. If it becomes an issue, we could use madvise()
-// with MADV_DONTNEED to reclaim unused (i.e. all-zero) pages in _sp_map.
-// MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the
-// physical memory page (i.e. similar to MADV_FREE on Solaris).
-
-#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
-Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-  assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
-         "page size must be multiple of PAGE_SIZE");
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-  address stack_top = os::current_stack_base();
-  size_t stack_size = os::current_stack_size();
-
-  for (address p = stack_top - stack_size; p < stack_top; p += PAGE_SIZE) {
-    // pd_set_thread() is called with non-NULL value when a new thread is
-    // created/attached, or with NULL value when a thread is about to exit.
-    // If both "thread" and the corresponding _sp_map[] entry are non-NULL,
-    // they should have the same value. Otherwise it might indicate that the
-    // stack page is shared by multiple threads. However, a more likely cause
-    // for this assertion to fail is that an attached thread exited without
-    // detaching itself from VM, which is a program error and could cause VM
-    // to crash.
-    assert(thread == NULL || _sp_map[(uintptr_t)p >> PAGE_SHIFT] == NULL ||
-           thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT],
-           "thread exited without detaching from VM??");
-    _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
-  }
-}
-#else
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
-#endif // !AMD64 && !MINIMIZE_RAM_USAGE
--- a/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
-#define OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
-
-  // map stack pointer to thread pointer - see notes in threadLS_linux_x86.cpp
-  #define SP_BITLENGTH  32
-  #define PAGE_SHIFT    12
-  #define PAGE_SIZE     (1UL << PAGE_SHIFT)
-  static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-
-public:
-
-  static Thread** sp_map_addr() { return _sp_map; }
-
-  static Thread* thread() {
-    uintptr_t sp;
-    __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
-    return _sp_map[sp >> PAGE_SHIFT];
-  }
-
-#else
-
-public:
-
-   static Thread* thread() {
-     return (Thread*) os::thread_local_storage_at(thread_index());
-   }
-
-#endif // AMD64 || MINIMIZE_RAM_USAGE
-
-#endif // OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
--- a/hotspot/src/os_cpu/linux_zero/vm/assembler_linux_zero.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_zero/vm/assembler_linux_zero.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -23,10 +23,4 @@
  *
  */
 
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_zero.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
 // This file is intentionally empty
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -125,7 +125,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
--- a/hotspot/src/os_cpu/linux_zero/vm/threadLS_linux_zero.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_init() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/hotspot/src/os_cpu/linux_zero/vm/threadLS_linux_zero.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_ZERO_VM_THREADLS_LINUX_ZERO_HPP
-#define OS_CPU_LINUX_ZERO_VM_THREADLS_LINUX_ZERO_HPP
-
-// Processor dependent parts of ThreadLocalStorage
-
- public:
-  static Thread* thread() {
-    return (Thread*) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_LINUX_ZERO_VM_THREADLS_LINUX_ZERO_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -290,7 +290,7 @@
                           int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
--- a/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// True thread-local variable
-__thread Thread * ThreadLocalStorage::_thr_current = NULL;
-
-// Implementations needed to support the shared API
-
-void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-bool ThreadLocalStorage::_initialized = false;
-
-void ThreadLocalStorage::init() {
-  _initialized = true;
-}
-
-bool ThreadLocalStorage::is_initialized() {
-  return _initialized;
-}
-
-Thread* ThreadLocalStorage::get_thread_slow() {
-    return thread();
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
--- a/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
-#define OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
-
-// Solaris specific implementation involves simple, direct use
-// of a compiler-based thread-local variable
-
-private:
-  static __thread Thread * _thr_current;
-
-  static bool _initialized;  // needed for shared API
-
-public:
-  static inline Thread* thread();
-
-#endif // OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
--- a/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -25,8 +25,6 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-#include "runtime/thread.inline.hpp"
 
 void MacroAssembler::int3() {
   push(rax);
@@ -37,33 +35,3 @@
   pop(rdx);
   pop(rax);
 }
-
-// This is simply a call to ThreadLocalStorage::thread()
-void MacroAssembler::get_thread(Register thread) {
-  if (thread != rax) {
-    push(rax);
-  }
-  push(rdi);
-  push(rsi);
-  push(rdx);
-  push(rcx);
-  push(r8);
-  push(r9);
-  push(r10);
-  push(r11);
-
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, ThreadLocalStorage::thread)));
-
-  pop(r11);
-  pop(r10);
-  pop(r9);
-  pop(r8);
-  pop(rcx);
-  pop(rdx);
-  pop(rsi);
-  pop(rdi);
-  if (thread != rax) {
-    movl(thread, rax);
-    pop(rax);
-  }
-}
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -346,7 +346,7 @@
   }
 #endif // !AMD64
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();  // slow & steady
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
--- a/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// True thread-local variable
-__thread Thread * ThreadLocalStorage::_thr_current = NULL;
-
-// Implementations needed to support the shared API
-
-void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-bool ThreadLocalStorage::_initialized = false;
-
-void ThreadLocalStorage::init() {
-  _initialized = true;
-}
-
-bool ThreadLocalStorage::is_initialized() {
-  return _initialized;
-}
-
-Thread* ThreadLocalStorage::get_thread_slow() {
-    return thread();
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
--- a/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
-#define OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
-
-// Solaris specific implementation involves simple, direct use
-// of a compiler-based thread-local variable
-
-private:
-  static __thread Thread * _thr_current;
-
-  static bool _initialized;  // needed for shared API
-
-public:
-  static inline Thread* thread();
-
-#endif // OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
--- a/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,6 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
 
 void MacroAssembler::int3() {
   emit_int8((unsigned char)0xCC);
@@ -58,44 +56,11 @@
 
   prefix(FS_segment);
   movptr(thread, null);
-  assert(ThreadLocalStorage::get_thread_ptr_offset() != 0,
+  assert(os::win32::get_thread_ptr_offset() != 0,
          "Thread Pointer Offset has not been initialized");
-  movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset()));
+  movl(thread, Address(thread, os::win32::get_thread_ptr_offset()));
 }
-#else
-// call (Thread*)TlsGetValue(thread_index());
-void MacroAssembler::get_thread(Register thread) {
-   if (thread != rax) {
-     push(rax);
-   }
-   push(rdi);
-   push(rsi);
-   push(rdx);
-   push(rcx);
-   push(r8);
-   push(r9);
-   push(r10);
-   // XXX
-   mov(r10, rsp);
-   andq(rsp, -16);
-   push(r10);
-   push(r11);
 
-   movl(c_rarg0, ThreadLocalStorage::thread_index());
-   call(RuntimeAddress((address)TlsGetValue));
+// #else - use shared x86 implementation in cpu/x86/vm/macroAssembler_x86.cpp
 
-   pop(r11);
-   pop(rsp);
-   pop(r10);
-   pop(r9);
-   pop(r8);
-   pop(rcx);
-   pop(rdx);
-   pop(rsi);
-   pop(rdi);
-   if (thread != rax) {
-       mov(thread, rax);
-       pop(rax);
-   }
-}
 #endif
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -85,14 +85,14 @@
     //
     volatile Thread* wrapperthread = thread;
 
-    if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) {
+    if (os::win32::get_thread_ptr_offset() == 0) {
       int thread_ptr_offset;
       __asm {
         lea eax, dword ptr wrapperthread;
         sub eax, dword ptr FS:[0H];
         mov thread_ptr_offset, eax
       };
-      ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset);
+      os::win32::set_thread_ptr_offset(thread_ptr_offset);
     }
 #ifdef ASSERT
     // Verify that the offset hasn't changed since we initally captured
@@ -105,7 +105,7 @@
         sub eax, dword ptr FS:[0H];
         mov test_thread_ptr_offset, eax
       };
-      assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(),
+      assert(test_thread_ptr_offset == os::win32::get_thread_ptr_offset(),
              "thread pointer offset from SEH changed");
     }
 #endif // ASSERT
--- a/hotspot/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Provides an entry point we can link against and
-// a buffer we can emit code into. The buffer is
-// filled by ThreadLocalStorage::generate_code_for_get_thread
-// and called from ThreadLocalStorage::thread()
-
-int ThreadLocalStorage::_thread_ptr_offset = 0;
-
-static void call_wrapper_dummy() {}
-
-// We need to call the os_exception_wrapper once so that it sets
-// up the offset from FS of the thread pointer.
-void ThreadLocalStorage::generate_code_for_get_thread() {
-      os::os_exception_wrapper( (java_call_t)call_wrapper_dummy,
-                                NULL, NULL, NULL, NULL);
-}
-
-void ThreadLocalStorage::pd_init() { }
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread)  {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/hotspot/src/os_cpu/windows_x86/vm/threadLS_windows_x86.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_WINDOWS_X86_VM_THREADLS_WINDOWS_X86_HPP
-#define OS_CPU_WINDOWS_X86_VM_THREADLS_WINDOWS_X86_HPP
-
-// Processor dependent parts of ThreadLocalStorage
-
-protected:
-
-  static int _thread_ptr_offset;
-
-public:
-
-  // Java Thread
-  static inline Thread* thread() {
-    return (Thread*)TlsGetValue(thread_index());
-  }
-
-  static inline Thread* get_thread() {
-    return (Thread*)TlsGetValue(thread_index());
-  }
-
-  static inline void set_thread_ptr_offset( int offset ) { _thread_ptr_offset = offset; }
-
-  static inline int get_thread_ptr_offset() { return _thread_ptr_offset; }
-
-#endif // OS_CPU_WINDOWS_X86_VM_THREADLS_WINDOWS_X86_HPP
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -48,6 +48,7 @@
 #include "runtime/thread.hpp"
 #include "services/threadService.hpp"
 #include "utilities/bytes.hpp"
+#include "logging/log.hpp"
 
 #define NOFAILOVER_MAJOR_VERSION                       51
 #define NONZERO_PADDING_BYTES_IN_SWITCH_MAJOR_VERSION  51
@@ -111,6 +112,18 @@
   }
 }
 
+// Prints the end-verification message to the appropriate output.
+void Verifier::log_end_verification(outputStream* st, const char* klassName, Symbol* exception_name, TRAPS) {
+  if (HAS_PENDING_EXCEPTION) {
+    st->print("Verification for %s has", klassName);
+    st->print_cr(" exception pending %s ",
+                 PENDING_EXCEPTION->klass()->external_name());
+  } else if (exception_name != NULL) {
+    st->print_cr("Verification for %s failed", klassName);
+  }
+  st->print_cr("End class verification for: %s", klassName);
+}
+
 bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) {
   HandleMark hm;
   ResourceMark rm(THREAD);
@@ -155,9 +168,7 @@
   bool can_failover = FailOverToOldVerifier &&
      klass->major_version() < NOFAILOVER_MAJOR_VERSION;
 
-  if (TraceClassInitialization) {
-    tty->print_cr("Start class verification for: %s", klassName);
-  }
+  log_info(classinit)("Start class verification for: %s", klassName);
   if (klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
     ClassVerifier split_verifier(klass, THREAD);
     split_verifier.verify_class(THREAD);
@@ -165,10 +176,10 @@
     if (can_failover && !HAS_PENDING_EXCEPTION &&
         (exception_name == vmSymbols::java_lang_VerifyError() ||
          exception_name == vmSymbols::java_lang_ClassFormatError())) {
-      if (TraceClassInitialization || VerboseVerification) {
-        tty->print_cr(
-          "Fail over class verification to old verifier for: %s", klassName);
+      if (VerboseVerification) {
+        tty->print_cr("Fail over class verification to old verifier for: %s", klassName);
       }
+      log_info(classinit)("Fail over class verification to old verifier for: %s", klassName);
       exception_name = inference_verify(
         klass, message_buffer, message_buffer_len, THREAD);
     }
@@ -180,15 +191,11 @@
         klass, message_buffer, message_buffer_len, THREAD);
   }
 
-  if (TraceClassInitialization || VerboseVerification) {
-    if (HAS_PENDING_EXCEPTION) {
-      tty->print("Verification for %s has", klassName);
-      tty->print_cr(" exception pending %s ",
-        PENDING_EXCEPTION->klass()->external_name());
-    } else if (exception_name != NULL) {
-      tty->print_cr("Verification for %s failed", klassName);
-    }
-    tty->print_cr("End class verification for: %s", klassName);
+  if (log_is_enabled(Info, classinit)){
+    log_end_verification(LogHandle(classinit)::info_stream(), klassName, exception_name, THREAD);
+  }
+  if (VerboseVerification){
+    log_end_verification(tty, klassName, exception_name, THREAD);
   }
 
   if (HAS_PENDING_EXCEPTION) {
@@ -598,10 +605,13 @@
     verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
   }
 
-  if (VerboseVerification || TraceClassInitialization) {
-    if (was_recursively_verified())
+  if (was_recursively_verified()){
+    if (VerboseVerification){
       tty->print_cr("Recursive verification detected for: %s",
-          _klass->external_name());
+                    _klass->external_name());
+    }
+    log_info(classinit)("Recursive verification detected for: %s",
+                        _klass->external_name());
   }
 }
 
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -50,6 +50,7 @@
    * Otherwise, no exception is thrown and the return indicates the
    * error.
    */
+  static void log_end_verification(outputStream* st, const char* klassName, Symbol* exception_name, TRAPS);
   static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS);
 
   // Return false if the class is loaded by the bootstrap loader,
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -2609,7 +2609,7 @@
   int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
 #ifdef ASSERT
   if (cont_offset == 0) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    Thread* thread = Thread::current();
     ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
     HandleMark hm(thread);
     ResourceMark rm(thread);
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -144,9 +144,6 @@
     _cmst = NULL;
     Terminator_lock->notify();
   }
-
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
 }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -112,18 +112,37 @@
 
 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
  private:
-  size_t _num_processed;
+  size_t _num_dirtied;
+  G1CollectedHeap* _g1h;
+  G1SATBCardTableLoggingModRefBS* _g1_bs;
+
+  HeapRegion* region_for_card(jbyte* card_ptr) const {
+    return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
+  }
+
+  bool will_become_free(HeapRegion* hr) const {
+    // A region will be freed by free_collection_set if the region is in the
+    // collection set and has not had an evacuation failure.
+    return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
+  }
 
  public:
-  RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
+  RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
+    _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    *card_ptr = CardTableModRefBS::dirty_card_val();
-    _num_processed++;
+    HeapRegion* hr = region_for_card(card_ptr);
+
+    // Should only dirty cards in regions that won't be freed.
+    if (!will_become_free(hr)) {
+      *card_ptr = CardTableModRefBS::dirty_card_val();
+      _num_dirtied++;
+    }
+
     return true;
   }
 
-  size_t num_processed() const { return _num_processed; }
+  size_t num_dirtied()   const { return _num_dirtied; }
 };
 
 
@@ -2268,15 +2287,21 @@
   return blk.result();
 }
 
+bool  G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
+  switch (cause) {
+    case GCCause::_java_lang_system_gc:                 return ExplicitGCInvokesConcurrent;
+    case GCCause::_dcmd_gc_run:                         return ExplicitGCInvokesConcurrent;
+    case GCCause::_update_allocation_context_stats_inc: return true;
+    case GCCause::_wb_conc_mark:                        return true;
+    default :                                           return false;
+  }
+}
+
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   switch (cause) {
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
-    case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
-    case GCCause::_dcmd_gc_run:             return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
-    case GCCause::_update_allocation_context_stats_inc: return true;
-    case GCCause::_wb_conc_mark:            return true;
-    default:                                return false;
+    default:                                return is_user_requested_concurrent_full_gc(cause);
   }
 }
 
@@ -4619,24 +4644,26 @@
 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
  private:
   DirtyCardQueueSet* _queue;
+  G1CollectedHeap* _g1h;
  public:
-  G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
+  G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
+    _queue(queue), _g1h(g1h) { }
 
   virtual void work(uint worker_id) {
-    G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
+    G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
 
-    RedirtyLoggedCardTableEntryClosure cl;
+    RedirtyLoggedCardTableEntryClosure cl(_g1h);
     _queue->par_apply_closure_to_all_completed_buffers(&cl);
 
-    phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
+    phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
   }
 };
 
 void G1CollectedHeap::redirty_logged_cards() {
   double redirty_logged_cards_start = os::elapsedTime();
 
-  G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
+  G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
   dirty_card_queue_set().reset_for_par_iteration();
   workers()->run_task(&redirty_task);
 
@@ -5471,36 +5498,36 @@
         return true;
       }
       if (cset_state.is_in_cset()) {
-        gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
+        gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
-        gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
+        gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
     } else {
       if (cset_state.is_humongous()) {
-        gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
+        gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (hr->in_collection_set() != cset_state.is_in_cset()) {
-        gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
+        gclog_or_tty->print_cr("\n## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
                                hr->in_collection_set(), cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (cset_state.is_in_cset()) {
         if (hr->is_young() != (cset_state.is_young())) {
-          gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
+          gclog_or_tty->print_cr("\n## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
                                  hr->is_young(), cset_state.value(), i);
           _failures = true;
           return true;
         }
         if (hr->is_old() != (cset_state.is_old())) {
-          gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
+          gclog_or_tty->print_cr("\n## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
                                  hr->is_old(), cset_state.value(), i);
           _failures = true;
           return true;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -245,9 +245,11 @@
   // instead of doing a STW GC. Currently, a concurrent cycle is
   // explicitly started if:
   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
-  // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
-  // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
-  // (d) cause == _g1_humongous_allocation
+  // (b) cause == _g1_humongous_allocation
+  // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+  // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
+  // (e) cause == _update_allocation_context_stats_inc
+  // (f) cause == _wb_conc_mark
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
   // indicates whether we are in young or mixed GC mode
@@ -579,6 +581,8 @@
     _in_cset_fast_test.clear();
   }
 
+  bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
+
   // This is called at the start of either a concurrent cycle or a Full
   // GC to update the number of old marking cycles started.
   void increment_old_marking_cycles_started();
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -291,7 +291,7 @@
   // for the first time during initialization.
   _reserve_regions = 0;
 
-  _collectionSetChooser = new CollectionSetChooser();
+  _cset_chooser = new CollectionSetChooser();
 }
 
 G1CollectorPolicy::~G1CollectorPolicy() {
@@ -854,7 +854,7 @@
   _survivor_surv_rate_group->reset();
   update_young_list_max_and_target_length();
   update_rs_lengths_prediction();
-  _collectionSetChooser->clear();
+  cset_chooser()->clear();
 
   _bytes_allocated_in_old_since_last_gc = 0;
 
@@ -1237,7 +1237,7 @@
                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
                                update_rs_time_goal_ms);
 
-  _collectionSetChooser->verify();
+  cset_chooser()->verify();
 }
 
 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
@@ -1710,6 +1710,11 @@
   }
 }
 
+void G1CollectorPolicy::initiate_conc_mark() {
+  collector_state()->set_during_initial_mark_pause(true);
+  collector_state()->set_initiate_conc_mark_if_possible(false);
+}
+
 void G1CollectorPolicy::decide_on_conc_mark_initiation() {
   // We are about to decide on whether this pause will be an
   // initial-mark pause.
@@ -1726,17 +1731,22 @@
     // concurrent marking cycle. So we might initiate one.
 
     if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
-      // Initiate a new initial mark only if there is no marking or reclamation going
-      // on.
+      // Initiate a new initial mark if there is no marking or reclamation going on.
+      initiate_conc_mark();
+      ergo_verbose0(ErgoConcCycles,
+                    "initiate concurrent cycle",
+                    ergo_format_reason("concurrent cycle initiation requested"));
+    } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
+      // Initiate a user requested initial mark. An initial mark must be young only
+      // GC, so the collector state must be updated to reflect this.
+      collector_state()->set_gcs_are_young(true);
+      collector_state()->set_last_young_gc(false);
 
-      collector_state()->set_during_initial_mark_pause(true);
-      // And we can now clear initiate_conc_mark_if_possible() as
-      // we've already acted on it.
-      collector_state()->set_initiate_conc_mark_if_possible(false);
-
+      abort_time_to_mixed_tracking();
+      initiate_conc_mark();
       ergo_verbose0(ErgoConcCycles,
-                  "initiate concurrent cycle",
-                  ergo_format_reason("concurrent cycle initiation requested"));
+                    "initiate concurrent cycle",
+                    ergo_format_reason("user requested concurrent cycle"));
     } else {
       // The concurrent marking thread is still finishing up the
       // previous cycle. If we start one right now the two cycles
@@ -1807,18 +1817,18 @@
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
-  _collectionSetChooser->clear();
+  cset_chooser()->clear();
 
   WorkGang* workers = _g1->workers();
   uint n_workers = workers->active_workers();
 
   uint n_regions = _g1->num_regions();
   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
-  _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
-  ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
+  cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
+  ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
   workers->run_task(&par_known_garbage_task);
 
-  _collectionSetChooser->sort_regions();
+  cset_chooser()->sort_regions();
 
   double end_sec = os::elapsedTime();
   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
@@ -2097,8 +2107,7 @@
 
 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
                                                 const char* false_action_str) const {
-  CollectionSetChooser* cset_chooser = _collectionSetChooser;
-  if (cset_chooser->is_empty()) {
+  if (cset_chooser()->is_empty()) {
     ergo_verbose0(ErgoMixedGCs,
                   false_action_str,
                   ergo_format_reason("candidate old regions not available"));
@@ -2106,7 +2115,7 @@
   }
 
   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
-  size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
+  size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
   double threshold = (double) G1HeapWastePercent;
   if (reclaimable_perc <= threshold) {
@@ -2116,7 +2125,7 @@
               ergo_format_region("candidate old regions")
               ergo_format_byte_perc("reclaimable")
               ergo_format_perc("threshold"),
-              cset_chooser->remaining_regions(),
+              cset_chooser()->remaining_regions(),
               reclaimable_bytes,
               reclaimable_perc, threshold);
     return false;
@@ -2128,7 +2137,7 @@
                 ergo_format_region("candidate old regions")
                 ergo_format_byte_perc("reclaimable")
                 ergo_format_perc("threshold"),
-                cset_chooser->remaining_regions(),
+                cset_chooser()->remaining_regions(),
                 reclaimable_bytes,
                 reclaimable_perc, threshold);
   return true;
@@ -2145,7 +2154,7 @@
   // to the CSet chooser in the first place, not how many remain, so
   // that the result is the same during all mixed GCs that follow a cycle.
 
-  const size_t region_num = (size_t) _collectionSetChooser->length();
+  const size_t region_num = (size_t) cset_chooser()->length();
   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
   size_t result = region_num / gc_num;
   // emulate ceiling
@@ -2254,15 +2263,14 @@
 
 
   if (!collector_state()->gcs_are_young()) {
-    CollectionSetChooser* cset_chooser = _collectionSetChooser;
-    cset_chooser->verify();
+    cset_chooser()->verify();
     const uint min_old_cset_length = calc_min_old_cset_length();
     const uint max_old_cset_length = calc_max_old_cset_length();
 
     uint expensive_region_num = 0;
     bool check_time_remaining = adaptive_young_list_length();
 
-    HeapRegion* hr = cset_chooser->peek();
+    HeapRegion* hr = cset_chooser()->peek();
     while (hr != NULL) {
       if (old_cset_region_length() >= max_old_cset_length) {
         // Added maximum number of old regions to the CSet.
@@ -2278,7 +2286,7 @@
 
       // Stop adding regions if the remaining reclaimable space is
       // not above G1HeapWastePercent.
-      size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
+      size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
       double threshold = (double) G1HeapWastePercent;
       if (reclaimable_perc <= threshold) {
@@ -2340,11 +2348,11 @@
       // We will add this region to the CSet.
       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
       predicted_old_time_ms += predicted_time_ms;
-      cset_chooser->pop(); // already have region via peek()
+      cset_chooser()->pop(); // already have region via peek()
       _g1->old_set_remove(hr);
       add_old_region_to_cset(hr);
 
-      hr = cset_chooser->peek();
+      hr = cset_chooser()->peek();
     }
     if (hr == NULL) {
       ergo_verbose0(ErgoCSetConstruction,
@@ -2369,7 +2377,7 @@
                     time_remaining_ms);
     }
 
-    cset_chooser->verify();
+    cset_chooser()->verify();
   }
 
   stop_incremental_cset_building();
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -191,7 +191,7 @@
   void initialize_alignments();
   void initialize_flags();
 
-  CollectionSetChooser* _collectionSetChooser;
+  CollectionSetChooser* _cset_chooser;
 
   double _full_collection_start_sec;
 
@@ -405,6 +405,10 @@
   double non_young_other_time_ms() const;
   double constant_other_time_ms(double pause_time_ms) const;
 
+  CollectionSetChooser* cset_chooser() const {
+    return _cset_chooser;
+  }
+
 private:
   // Statistics kept per GC stoppage, pause or full.
   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
@@ -725,6 +729,11 @@
   // (should not be called directly).
   void add_region_to_incremental_cset_common(HeapRegion* hr);
 
+  // Set the state to start a concurrent marking cycle and clear
+  // _initiate_conc_mark_if_possible because it has now been
+  // acted on.
+  void initiate_conc_mark();
+
 public:
   // Add hr to the LHS of the incremental collection set.
   void add_region_to_incremental_cset_lhs(HeapRegion* hr);
--- a/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -47,8 +47,9 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
   template <class T> void do_oop_work(T* p) {
     assert(_from->is_in_reserved(p), "paranoia");
-    if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
-        !_from->is_survivor()) {
+    assert(!_from->is_survivor(), "Unexpected evac failure in survivor region");
+
+    if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p))) {
       size_t card_index = _ct_bs->index_for(p);
       if (_ct_bs->mark_card_deferred(card_index)) {
         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -141,6 +141,7 @@
   assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
   _active_gc_threads = active_gc_threads;
   _cur_expand_heap_time_ms = 0.0;
+  _external_accounted_time_ms = 0.0;
 
   for (int i = 0; i < GCParPhasesSentinel; i++) {
     _gc_par_phases[i]->reset();
@@ -185,9 +186,12 @@
 }
 
 double G1GCPhaseTimes::accounted_time_ms() {
+    // First subtract any externally accounted time
+    double misc_time_ms = _external_accounted_time_ms;
+
     // Subtract the root region scanning wait time. It's initialized to
     // zero at the start of the pause.
-    double misc_time_ms = _root_region_scan_wait_time_ms;
+    misc_time_ms += _root_region_scan_wait_time_ms;
 
     misc_time_ms += _cur_collection_par_time_ms;
 
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -99,6 +99,8 @@
   double _cur_collection_start_sec;
   double _root_region_scan_wait_time_ms;
 
+  double _external_accounted_time_ms;
+
   double _recorded_young_cset_choice_time_ms;
   double _recorded_non_young_cset_choice_time_ms;
 
@@ -244,6 +246,10 @@
     _cur_verify_after_time_ms = time_ms;
   }
 
+  void inc_external_accounted_time_ms(double time_ms) {
+    _external_accounted_time_ms += time_ms;
+  }
+
   double accounted_time_ms();
 
   double cur_collection_start_sec() {
--- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -123,7 +123,7 @@
   // Resets the hot card cache and discards the entries.
   void reset_hot_cache() {
     assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
-    assert(Thread::current_noinline()->is_VM_thread(), "Current thread should be the VMthread");
+    assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
     if (default_use_cache()) {
         reset_hot_cache_internal();
     }
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -91,7 +91,7 @@
       if (state.is_humongous()) {
         _g1->set_humongous_is_live(obj);
       }
-      _par_scan_state->update_rs(_from, p);
+      _par_scan_state->update_rs(_from, p, obj);
     }
   }
 }
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -98,10 +98,10 @@
 
   template <class T> void push_on_queue(T* ref);
 
-  template <class T> void update_rs(HeapRegion* from, T* p) {
+  template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
     // If the new value of the field points to the same region or
     // is the to-space, we don't need to include it in the Rset updates.
-    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
+    if (!HeapRegion::is_in_same_region(p, o) && !from->is_young()) {
       size_t card_index = ctbs()->index_for(p);
       // If the card hasn't been added to the buffer, do it.
       if (ctbs()->mark_card_deferred(card_index)) {
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -40,14 +40,13 @@
   // processed multiple times. So redo this check.
   const InCSetState in_cset_state = _g1h->in_cset_state(obj);
   if (in_cset_state.is_in_cset()) {
-    oop forwardee;
     markOop m = obj->mark();
     if (m->is_marked()) {
-      forwardee = (oop) m->decode_pointer();
+      obj = (oop) m->decode_pointer();
     } else {
-      forwardee = copy_to_survivor_space(in_cset_state, obj, m);
+      obj = copy_to_survivor_space(in_cset_state, obj, m);
     }
-    oopDesc::encode_store_heap_oop(p, forwardee);
+    oopDesc::encode_store_heap_oop(p, obj);
   } else if (in_cset_state.is_humongous()) {
     _g1h->set_humongous_is_live(obj);
   } else {
@@ -56,7 +55,7 @@
   }
 
   assert(obj != NULL, "Must be");
-  update_rs(from, p);
+  update_rs(from, p, obj);
 }
 
 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
--- a/hotspot/src/share/vm/gc/g1/g1Predictions.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1Predictions.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_G1_G1PREDICTIONS_HPP
 #define SHARE_VM_GC_G1_G1PREDICTIONS_HPP
 
-#include "memory/allocation.inline.hpp"
 #include "utilities/numberSeq.hpp"
 
 // Utility class containing various helper methods for prediction.
--- a/hotspot/src/share/vm/gc/g1/g1RootClosures.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1RootClosures.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -23,7 +23,38 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/g1RootClosures.inline.hpp"
+#include "gc/g1/g1OopClosures.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
+#include "gc/g1/g1SharedClosures.hpp"
+
+// Closures used for standard G1 evacuation.
+class G1EvacuationClosures : public G1EvacuationRootClosures {
+  G1SharedClosures<G1MarkNone> _closures;
+
+public:
+  G1EvacuationClosures(G1CollectedHeap* g1h,
+                       G1ParScanThreadState* pss,
+                       bool gcs_are_young) :
+      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
+
+  OopClosure* weak_oops()   { return &_closures._buffered_oops; }
+  OopClosure* strong_oops() { return &_closures._buffered_oops; }
+
+  CLDClosure* weak_clds()             { return &_closures._clds; }
+  CLDClosure* strong_clds()           { return &_closures._clds; }
+  CLDClosure* thread_root_clds()      { return NULL; }
+  CLDClosure* second_pass_weak_clds() { return NULL; }
+
+  CodeBlobClosure* strong_codeblobs()      { return &_closures._codeblobs; }
+  CodeBlobClosure* weak_codeblobs()        { return &_closures._codeblobs; }
+
+  void flush()                 { _closures._buffered_oops.done(); }
+  double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
+
+  OopClosure* raw_strong_oops() { return &_closures._oops; }
+
+  bool trace_metadata()         { return false; }
+};
 
 // Closures used during initial mark.
 // The treatment of "weak" roots is selectable through the template parameter,
--- a/hotspot/src/share/vm/gc/g1/g1RootClosures.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "gc/g1/bufferingOopClosure.hpp"
-#include "gc/g1/g1CodeBlobClosure.hpp"
-#include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1OopClosures.inline.hpp"
-#include "gc/g1/g1RootClosures.hpp"
-
-// Simple holder object for a complete set of closures used by the G1 evacuation code.
-template <G1Mark Mark>
-class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
-public:
-  G1ParCopyClosure<G1BarrierNone,  Mark> _oops;
-  G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
-  G1KlassScanClosure                     _klass_in_cld_closure;
-  CLDToKlassAndOopClosure                _clds;
-  G1CodeBlobClosure                      _codeblobs;
-  BufferingOopClosure                    _buffered_oops;
-
-  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
-    _oops(g1h, pss),
-    _oop_in_klass(g1h, pss),
-    _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
-    _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
-    _codeblobs(&_oops),
-    _buffered_oops(&_oops) {}
-};
-
-class G1EvacuationClosures : public G1EvacuationRootClosures {
-  G1SharedClosures<G1MarkNone> _closures;
-
-public:
-  G1EvacuationClosures(G1CollectedHeap* g1h,
-                       G1ParScanThreadState* pss,
-                       bool gcs_are_young) :
-      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
-
-  OopClosure* weak_oops()   { return &_closures._buffered_oops; }
-  OopClosure* strong_oops() { return &_closures._buffered_oops; }
-
-  CLDClosure* weak_clds()             { return &_closures._clds; }
-  CLDClosure* strong_clds()           { return &_closures._clds; }
-  CLDClosure* thread_root_clds()      { return NULL; }
-  CLDClosure* second_pass_weak_clds() { return NULL; }
-
-  CodeBlobClosure* strong_codeblobs()      { return &_closures._codeblobs; }
-  CodeBlobClosure* weak_codeblobs()        { return &_closures._codeblobs; }
-
-  void flush()                 { _closures._buffered_oops.done(); }
-  double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
-
-  OopClosure* raw_strong_oops() { return &_closures._oops; }
-
-  bool trace_metadata()         { return false; }
-};
--- a/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -85,11 +85,6 @@
     return false;
   }
 
-  if  (val == g1_young_gen) {
-    // the card is for a young gen region. We don't need to keep track of all pointers into young
-    return false;
-  }
-
   // Cached bit can be installed either on a clean card or on a claimed card.
   jbyte new_val = val;
   if (val == clean_card_val()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1SharedClosures.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc/g1/bufferingOopClosure.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
+#include "gc/g1/g1OopClosures.hpp"
+#include "memory/iterator.hpp"
+
+class G1CollectedHeap;
+class G1ParScanThreadState;
+
+// Simple holder object for a complete set of closures used by the G1 evacuation code.
+template <G1Mark Mark>
+class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
+public:
+  G1ParCopyClosure<G1BarrierNone,  Mark> _oops;
+  G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
+  G1KlassScanClosure                     _klass_in_cld_closure;
+  CLDToKlassAndOopClosure                _clds;
+  G1CodeBlobClosure                      _codeblobs;
+  BufferingOopClosure                    _buffered_oops;
+
+  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
+    _oops(g1h, pss),
+    _oop_in_klass(g1h, pss),
+    _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
+    _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
+    _codeblobs(&_oops),
+    _buffered_oops(&_oops) {}
+};
--- a/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -33,9 +33,11 @@
 
 #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, range, constraint) \
                                                                             \
-  product(bool, G1UseAdaptiveIHOP, false,                                   \
-          "Adaptively adjust InitiatingHeapOccupancyPercent from the "      \
-          "initial value.")                                                 \
+  product(bool, G1UseAdaptiveIHOP, true,                                    \
+          "Adaptively adjust the initiating heap occupancy from the "       \
+          "initial value of InitiatingHeapOccupancyPercent. The policy "    \
+          "attempts to start marking in time based on application "         \
+          "behavior.")                                                      \
                                                                             \
   experimental(size_t, G1AdaptiveIHOPNumInitialSamples, 3,                  \
           "How many completed time periods from initial mark to first "     \
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -351,6 +351,15 @@
                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   }
 
+
+  // Returns whether a field is in the same region as the obj it points to.
+  template <typename T>
+  static bool is_in_same_region(T* p, oop obj) {
+    assert(p != NULL, "p can't be NULL");
+    assert(obj != NULL, "obj can't be NULL");
+    return (((uintptr_t) p ^ cast_from_oop<uintptr_t>(obj)) >> LogOfHRGrainBytes) == 0;
+  }
+
   static size_t max_region_size();
   static size_t min_region_size_in_words();
 
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1020,24 +1020,6 @@
 }
 
 #ifndef PRODUCT
-void PerRegionTable::test_fl_mem_size() {
-  PerRegionTable* dummy = alloc(NULL);
-
-  size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
-  assert(dummy->mem_size() > min_prt_size,
-         "PerRegionTable memory usage is suspiciously small, only has " SIZE_FORMAT " bytes. "
-         "Should be at least " SIZE_FORMAT " bytes.", dummy->mem_size(), min_prt_size);
-  free(dummy);
-  guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
-  // try to reset the state
-  _free_list = NULL;
-  delete dummy;
-}
-
-void HeapRegionRemSet::test_prt() {
-  PerRegionTable::test_fl_mem_size();
-}
-
 void HeapRegionRemSet::test() {
   os::sleep(Thread::current(), (jlong)5000, false);
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -392,7 +392,6 @@
 
   // Run unit tests.
 #ifndef PRODUCT
-  static void test_prt();
   static void test();
 #endif
 };
--- a/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,4 +1,3 @@
-
 /*
  * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -96,7 +95,6 @@
 void GCTaskThread::run() {
   // Set up the thread for stack overflow support
   this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
   this->initialize_named_thread();
   // Bind yourself to your processor.
   if (processor_id() != GCTaskManager::sentinel_worker()) {
--- a/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -51,7 +51,6 @@
 
 void ConcurrentGCThread::initialize_in_thread() {
   this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
   this->initialize_named_thread();
   this->set_active_handles(JNIHandleBlock::allocate_block());
   // From this time Thread::current() should be working.
@@ -74,9 +73,6 @@
     _has_terminated = true;
     Terminator_lock->notify();
   }
-
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
 }
 
 static void _sltLoop(JavaThread* thread, TRAPS) {
--- a/hotspot/src/share/vm/gc/shared/workgroup.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/gc/shared/workgroup.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -275,7 +275,6 @@
 }
 
 void AbstractGangWorker::initialize() {
-  this->initialize_thread_local_storage();
   this->record_stack_base_and_size();
   this->initialize_named_thread();
   assert(_gang != NULL, "No gang to run in");
--- a/hotspot/src/share/vm/logging/logConfiguration.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logConfiguration.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -39,6 +39,7 @@
 
 LogOutput** LogConfiguration::_outputs = NULL;
 size_t      LogConfiguration::_n_outputs = 0;
+bool        LogConfiguration::_post_initialized = false;
 
 void LogConfiguration::post_initialize() {
   assert(LogConfiguration_lock != NULL, "Lock must be initialized before post-initialization");
@@ -51,6 +52,8 @@
     MutexLocker ml(LogConfiguration_lock);
     describe(log.trace_stream());
   }
+
+  _post_initialized = true;
 }
 
 void LogConfiguration::initialize(jlong vm_start_time) {
@@ -422,3 +425,12 @@
               "\t Turn off all logging, including warnings and errors,\n"
               "\t and then enable messages tagged with 'rt' using 'trace' level to file 'rttrace.txt'.\n");
 }
+
+void LogConfiguration::rotate_all_outputs() {
+  for (size_t idx = 0; idx < _n_outputs; idx++) {
+    if (_outputs[idx]->is_rotatable()) {
+      _outputs[idx]->rotate(true);
+    }
+  }
+}
+
--- a/hotspot/src/share/vm/logging/logConfiguration.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logConfiguration.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -40,6 +40,7 @@
  private:
   static LogOutput**  _outputs;
   static size_t       _n_outputs;
+  static bool         _post_initialized;
 
   // Create a new output. Returns NULL if failed.
   static LogOutput* new_output(char* name, const char* options = NULL);
@@ -94,6 +95,13 @@
 
   // Prints usage help for command line log configuration.
   static void print_command_line_help(FILE* out);
+
+  static bool is_post_initialized() {
+    return _post_initialized;
+  }
+
+  // Rotates all LogOutput
+  static void rotate_all_outputs();
 };
 
 #endif // SHARE_VM_LOGGING_LOGCONFIGURATION_HPP
--- a/hotspot/src/share/vm/logging/logDecorations.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logDecorations.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -96,7 +96,7 @@
 
 char * LogDecorations::create_tid_decoration(char* pos) {
   int written = jio_snprintf(pos, DecorationsBufferSize - (pos - _decorations_buffer),
-                             INTX_FORMAT, Thread::current()->osthread()->thread_id());
+                             INTX_FORMAT, os::current_thread_id());
   ASSERT_AND_RETURN(written, pos)
 }
 
--- a/hotspot/src/share/vm/logging/logDiagnosticCommand.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logDiagnosticCommand.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -35,13 +35,15 @@
     _what("what", "Configures what tags to log.", "STRING", false),
     _decorators("decorators", "Configures which decorators to use. Use 'none' or an empty value to remove all.", "STRING", false),
     _disable("disable", "Turns off all logging and clears the log configuration.", "BOOLEAN", false),
-    _list("list", "Lists current log configuration.", "BOOLEAN", false) {
+    _list("list", "Lists current log configuration.", "BOOLEAN", false),
+    _rotate("rotate", "Rotates all logs.", "BOOLEAN", false) {
   _dcmdparser.add_dcmd_option(&_output);
   _dcmdparser.add_dcmd_option(&_output_options);
   _dcmdparser.add_dcmd_option(&_what);
   _dcmdparser.add_dcmd_option(&_decorators);
   _dcmdparser.add_dcmd_option(&_disable);
   _dcmdparser.add_dcmd_option(&_list);
+  _dcmdparser.add_dcmd_option(&_rotate);
 }
 
 int LogDiagnosticCommand::num_arguments() {
@@ -86,6 +88,11 @@
     any_command = true;
   }
 
+  if (_rotate.has_value()) {
+    LogConfiguration::rotate_all_outputs();
+    any_command = true;
+  }
+
   if (!any_command) {
     // If no argument was provided, print usage
     print_help(LogDiagnosticCommand::name());
--- a/hotspot/src/share/vm/logging/logDiagnosticCommand.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logDiagnosticCommand.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -43,6 +43,7 @@
   DCmdArgument<char *> _decorators;
   DCmdArgument<bool> _disable;
   DCmdArgument<bool> _list;
+  DCmdArgument<bool> _rotate;
 
  public:
   LogDiagnosticCommand(outputStream* output, bool heap_allocated);
@@ -55,7 +56,7 @@
   }
 
   static const char* description() {
-    return "Lists, enables, disables or changes a log output configuration.";
+    return "Lists current log configuration, enables/disables/configures a log output, or rotates all logs.";
   }
 
   // Used by SecurityManager. This DCMD requires ManagementPermission = control.
--- a/hotspot/src/share/vm/logging/logFileOutput.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logFileOutput.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -155,12 +155,7 @@
   int written = LogFileStreamOutput::write(decorations, msg);
   _current_size += written;
 
-  if (should_rotate()) {
-    MutexLockerEx ml(&_rotation_lock, true /* no safepoint check */);
-    if (should_rotate()) {
-      rotate();
-    }
-  }
+  rotate(false);
 
   return written;
 }
@@ -182,7 +177,14 @@
   }
 }
 
-void LogFileOutput::rotate() {
+void LogFileOutput::rotate(bool force) {
+
+  if (!should_rotate(force)) {
+    return;
+  }
+
+  MutexLockerEx ml(&_rotation_lock, true /* no safepoint check */);
+
   // Archive the current log file
   archive();
 
--- a/hotspot/src/share/vm/logging/logFileOutput.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logFileOutput.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -58,13 +58,13 @@
   size_t  _current_size;
 
   void archive();
-  void rotate();
   bool configure_rotation(const char* options);
   char *make_file_name(const char* file_name, const char* pid_string, const char* timestamp_string);
   static size_t parse_value(const char* value_str);
 
-  bool should_rotate() const {
-    return _file_count > 0 && _rotate_size > 0 && _current_size >= _rotate_size;
+  bool should_rotate(bool force) {
+    return is_rotatable() &&
+             (force || (_rotate_size > 0 && _current_size >= _rotate_size));
   }
 
  public:
@@ -73,6 +73,12 @@
   virtual bool initialize(const char* options);
   virtual int write(const LogDecorations& decorations, const char* msg);
 
+  virtual bool is_rotatable() {
+    return LogConfiguration::is_post_initialized() && (_file_count > 0);
+  }
+
+  virtual void rotate(bool force);
+
   virtual const char* name() const {
     return _name;
   }
--- a/hotspot/src/share/vm/logging/logOutput.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logOutput.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -75,6 +75,14 @@
   virtual const char* name() const = 0;
   virtual bool initialize(const char* options) = 0;
   virtual int write(const LogDecorations &decorations, const char* msg) = 0;
+
+  virtual bool is_rotatable() {
+    return false;
+  }
+
+  virtual void rotate(bool force) {
+    // Do nothing by default.
+  }
 };
 
 #endif // SHARE_VM_LOGGING_LOGOUTPUT_HPP
--- a/hotspot/src/share/vm/logging/logTag.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/logging/logTag.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -31,6 +31,7 @@
 // (The tags 'all', 'disable' and 'help' are special tags that can
 // not be used in log calls, and should not be listed below.)
 #define LOG_TAG_LIST \
+  LOG_TAG(classinit) \
   LOG_TAG(defaultmethods) \
   LOG_TAG(gc) \
   LOG_TAG(logging) \
--- a/hotspot/src/share/vm/memory/allocation.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -790,7 +790,7 @@
 
 ReallocMark::ReallocMark() {
 #ifdef ASSERT
-  Thread *thread = ThreadLocalStorage::get_thread_slow();
+  Thread *thread = Thread::current();
   _nesting = thread->resource_area()->nesting();
 #endif
 }
--- a/hotspot/src/share/vm/memory/resourceArea.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/memory/resourceArea.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,7 +121,7 @@
     debug_only(_area->_nesting++;)
     assert( _area->_nesting > 0, "must stack allocate RMs" );
 #ifdef ASSERT
-    Thread* thread = ThreadLocalStorage::thread();
+    Thread* thread = Thread::current_or_null();
     if (thread != NULL) {
       _thread = thread;
       _previous_resource_mark = thread->current_resource_mark();
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -63,6 +63,7 @@
 #include "services/threadService.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
+#include "logging/log.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #endif
@@ -491,9 +492,9 @@
     this_k->set_init_state (fully_initialized);
     this_k->fence_and_clear_init_lock();
     // trace
-    if (TraceClassInitialization) {
+    if (log_is_enabled(Info, classinit)) {
       ResourceMark rm(THREAD);
-      tty->print_cr("[Initialized %s without side effects]", this_k->external_name());
+      log_info(classinit)("[Initialized %s without side effects]", this_k->external_name());
     }
   }
 }
@@ -1129,10 +1130,12 @@
 
   methodHandle h_method(THREAD, this_k->class_initializer());
   assert(!this_k->is_initialized(), "we cannot initialize twice");
-  if (TraceClassInitialization) {
-    tty->print("%d Initializing ", call_class_initializer_impl_counter++);
-    this_k->name()->print_value();
-    tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this_k()));
+  if (log_is_enabled(Info, classinit)) {
+    ResourceMark rm;
+    outputStream* log = LogHandle(classinit)::info_stream();
+    log->print("%d Initializing ", call_class_initializer_impl_counter++);
+    this_k->name()->print_value_on(log);
+    log->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this_k()));
   }
   if (h_method() != NULL) {
     JavaCallArguments args; // No arguments
--- a/hotspot/src/share/vm/oops/oopsHierarchy.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/oops/oopsHierarchy.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -35,7 +35,7 @@
   assert (CheckUnhandledOops, "should only call when CheckUnhandledOops");
   if (!Universe::is_fully_initialized()) return;
   // This gets expensive, which is why checking unhandled oops is on a switch.
-  Thread* t = ThreadLocalStorage::thread();
+  Thread* t = Thread::current_or_null();
   if (t != NULL && t->is_Java_thread()) {
      frame fr = os::current_frame();
      // This points to the oop creator, I guess current frame points to caller
@@ -48,7 +48,7 @@
   assert (CheckUnhandledOops, "should only call when CheckUnhandledOops");
   if (!Universe::is_fully_initialized()) return;
   // This gets expensive, which is why checking unhandled oops is on a switch.
-  Thread* t = ThreadLocalStorage::thread();
+  Thread* t = Thread::current_or_null();
   if (t != NULL && t->is_Java_thread()) {
     t->unhandled_oops()->unregister_unhandled_oop(this);
   }
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -203,7 +203,6 @@
 # include "runtime/stubRoutines.hpp"
 # include "runtime/synchronizer.hpp"
 # include "runtime/thread.hpp"
-# include "runtime/threadLocalStorage.hpp"
 # include "runtime/timer.hpp"
 # include "runtime/unhandledOops.hpp"
 # include "runtime/vframe.hpp"
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/prims/jni.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -3933,7 +3933,6 @@
 #if INCLUDE_ALL_GCS
     run_unit_test(TestOldFreeSpaceCalculation_test());
     run_unit_test(TestG1BiasedArray_test());
-    run_unit_test(HeapRegionRemSet::test_prt());
     run_unit_test(TestBufferingOopClosure_test());
     run_unit_test(TestCodeCacheRemSet_test());
     if (UseG1GC) {
@@ -4175,7 +4174,7 @@
   }
   */
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null();
   if (t != NULL) {
     // If the thread has been attached this operation is a no-op
     *(JNIEnv**)penv = ((JavaThread*) t)->jni_environment();
@@ -4190,10 +4189,8 @@
   // initializing the Java level thread object. Hence, the correct state must
   // be set in order for the Safepoint code to deal with it correctly.
   thread->set_thread_state(_thread_in_vm);
-  // Must do this before initialize_thread_local_storage
   thread->record_stack_base_and_size();
-
-  thread->initialize_thread_local_storage();
+  thread->initialize_thread_current();
 
   if (!os::create_attached_thread(thread)) {
     delete thread;
@@ -4300,8 +4297,8 @@
 
   JNIWrapper("DetachCurrentThread");
 
-  // If the thread has been deattacted the operations is a no-op
-  if (ThreadLocalStorage::thread() == NULL) {
+  // If the thread has already been detached the operation is a no-op
+  if (Thread::current_or_null() == NULL) {
   HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
     return JNI_OK;
   }
@@ -4358,7 +4355,7 @@
 #define JVMPI_VERSION_1_2 ((jint)0x10000003)
 #endif // !JVMPI_VERSION_1
 
-  Thread* thread = ThreadLocalStorage::thread();
+  Thread* thread = Thread::current_or_null();
   if (thread != NULL && thread->is_Java_thread()) {
     if (Threads::is_supported_jni_version_including_1_1(version)) {
       *(JNIEnv**)penv = ((JavaThread*) thread)->jni_environment();
--- a/hotspot/src/share/vm/prims/jniCheck.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -87,9 +87,9 @@
 #define JNI_ENTRY_CHECKED(result_type, header)                           \
 extern "C" {                                                             \
   result_type JNICALL header {                                           \
-    JavaThread* thr = (JavaThread*)ThreadLocalStorage::get_thread_slow();\
+    JavaThread* thr = (JavaThread*) Thread::current_or_null();           \
     if (thr == NULL || !thr->is_Java_thread()) {                         \
-      tty->print_cr("%s", fatal_using_jnienv_in_nonjava);                      \
+      tty->print_cr("%s", fatal_using_jnienv_in_nonjava);                \
       os::abort(true);                                                   \
     }                                                                    \
     JNIEnv* xenv = thr->jni_environment();                               \
--- a/hotspot/src/share/vm/prims/jvm.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -3719,3 +3719,8 @@
   info->is_attachable = AttachListener::is_attach_supported();
 }
 JVM_END
+
+JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
+  return os::get_signal_number(name);
+JVM_END
+
--- a/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnter.xsl	Mon Dec 07 17:04:42 2015 +0000
@@ -494,7 +494,7 @@
   }</xsl:text>  
 
       <xsl:text>  
-  Thread* this_thread = (Thread*)ThreadLocalStorage::thread(); </xsl:text>
+  Thread* this_thread = Thread::current_or_null(); </xsl:text>
 
       <xsl:apply-templates select="." mode="transition"/>
     </xsl:when>
@@ -528,7 +528,7 @@
     </xsl:if>
     <xsl:text>    return JVMTI_ERROR_WRONG_PHASE;
   }
-  Thread* this_thread = (Thread*)ThreadLocalStorage::thread(); </xsl:text>
+  Thread* this_thread = Thread::current_or_null(); </xsl:text>
       <xsl:apply-templates select="." mode="transition"/>
       </xsl:if>
     </xsl:otherwise>
@@ -558,7 +558,7 @@
       <xsl:choose>
         <xsl:when test="count(@callbacksafe)=0 or not(contains(@callbacksafe,'safe'))">
           <xsl:text>  if (Threads::number_of_threads() != 0) {
-    Thread* this_thread = (Thread*)ThreadLocalStorage::thread();</xsl:text>
+    Thread* this_thread = Thread::current_or_null();</xsl:text>
         </xsl:when>
         <xsl:otherwise>
 
@@ -567,7 +567,7 @@
   if (Threads::number_of_threads() == 0) {
     transition = false;
   } else {
-    this_thread = (Thread*)ThreadLocalStorage::thread();
+    this_thread = Thread::current_or_null();
     transition = ((this_thread != NULL) &amp;&amp; !this_thread->is_VM_thread() &amp;&amp; !this_thread->is_ConcurrentGC_thread());
   }
   if (transition) {</xsl:text>
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -374,7 +374,7 @@
   }
 
   if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) {
-    JavaThread* current_thread = (JavaThread*) ThreadLocalStorage::thread();
+    JavaThread* current_thread = JavaThread::current();
     // transition code: native to VM
     ThreadInVMfromNative __tiv(current_thread);
     VM_ENTRY_BASE(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread)
@@ -1901,7 +1901,7 @@
 
 // Collect all the vm internally allocated objects which are visible to java world
 void JvmtiExport::record_vm_internal_object_allocation(oop obj) {
-  Thread* thread = ThreadLocalStorage::thread();
+  Thread* thread = Thread::current_or_null();
   if (thread != NULL && thread->is_Java_thread())  {
     // Can not take safepoint here.
     No_Safepoint_Verifier no_sfpt;
@@ -2436,7 +2436,7 @@
   if (!JvmtiExport::should_post_vm_object_alloc()) {
     return;
   }
-  Thread* thread = ThreadLocalStorage::thread();
+  Thread* thread = Thread::current_or_null();
   if (thread != NULL && thread->is_Java_thread())  {
     JavaThread* current_thread = (JavaThread*)thread;
     JvmtiThreadState *state = current_thread->jvmti_thread_state();
--- a/hotspot/src/share/vm/prims/jvmtiUtil.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiUtil.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@
     if (Threads::number_of_threads() == 0) {
       return JvmtiUtil::single_threaded_resource_area();
     }
-    thread = ThreadLocalStorage::thread();
+    thread = Thread::current_or_null();
     if (thread == NULL) {
       return JvmtiUtil::single_threaded_resource_area();
     }
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -571,6 +571,23 @@
   thread->dec_in_deopt_handler();
 }
 
+// Moved from cpu directories because none of the cpus has callee save values.
+// If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
+void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
+
+  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
+  // the days we had adapter frames. When we deoptimize a situation where a
+  // compiled caller calls a compiled caller will have registers it expects
+  // to survive the call to the callee. If we deoptimize the callee the only
+  // way we can restore these registers is to have the oldest interpreter
+  // frame that we create restore these values. That is what this routine
+  // will accomplish.
+
+  // At the moment we have modified c2 to not have any callee save registers
+  // so this problem does not exist and this routine is just a place holder.
+
+  assert(f->is_interpreted_frame(), "must be interpreted");
+}
 
 // Return BasicType of value being returned
 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1449,9 +1449,6 @@
   develop(bool, TraceBytecodes, false,                                      \
           "Trace bytecode execution")                                       \
                                                                             \
-  develop(bool, TraceClassInitialization, false,                            \
-          "Trace class initialization")                                     \
-                                                                            \
   product(bool, TraceExceptions, false,                                     \
           "Trace exceptions")                                               \
                                                                             \
--- a/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -32,7 +32,6 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/preserveException.hpp"
 
--- a/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -562,7 +562,7 @@
 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 extern "C" {                                                         \
   result_type JNICALL header {                                       \
-    JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
+    JavaThread* thread = JavaThread::current();                      \
     ThreadInVMfromNative __tiv(thread);                              \
     debug_only(VMNativeEntryWrapper __vew;)                          \
     VM_ENTRY_BASE(result_type, header, thread)
--- a/hotspot/src/share/vm/runtime/java.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/java.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -512,10 +512,10 @@
 }
 
 void vm_exit(int code) {
-  Thread* thread = ThreadLocalStorage::is_initialized() ?
-    ThreadLocalStorage::get_thread_slow() : NULL;
+  Thread* thread =
+      ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : NULL;
   if (thread == NULL) {
-    // we have serious problems -- just exit
+    // very early initialization failure -- just exit
     vm_direct_exit(code);
   }
 
@@ -551,8 +551,7 @@
   // Calling 'exit_globals()' will disable thread-local-storage and cause all
   // kinds of assertions to trigger in debug mode.
   if (is_init_completed()) {
-    Thread* thread = ThreadLocalStorage::is_initialized() ?
-                     ThreadLocalStorage::get_thread_slow() : NULL;
+    Thread* thread = Thread::current_or_null();
     if (thread != NULL && thread->is_Java_thread()) {
       // We are leaving the VM, set state to native (in case any OS exit
       // handlers call back to the VM)
@@ -606,7 +605,7 @@
   // If there are exceptions on this thread it must be cleared
   // first and here. Any future calls to EXCEPTION_MARK requires
   // that no pending exceptions exist.
-  Thread *THREAD = Thread::current();
+  Thread *THREAD = Thread::current(); // can't be NULL
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
   }
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1035,10 +1035,10 @@
  Exeunt:
     assert(ILocked(), "invariant");
     assert(_owner == NULL, "invariant");
-    // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
+    // This can potentially be called by non-java Threads. Thus, the Thread::current_or_null()
     // might return NULL. Don't call set_owner since it will break on an NULL owner
     // Consider installing a non-null "ANON" distinguished value instead of just NULL.
-    _owner = ThreadLocalStorage::thread();
+    _owner = Thread::current_or_null();
     return;
   }
 
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -27,7 +27,6 @@
 #include "runtime/os.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/vmThread.hpp"
 
 // Mutexes used in the VM (see comment in mutexLocker.hpp):
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -420,28 +420,6 @@
     }
 #endif
   }
-  static jboolean onLoaded = JNI_FALSE;
-  if (onLoaded) {
-    // We may have to wait to fire OnLoad until TLS is initialized.
-    if (ThreadLocalStorage::is_initialized()) {
-      // The JNI_OnLoad handling is normally done by method load in
-      // java.lang.ClassLoader$NativeLibrary, but the VM loads the base library
-      // explicitly so we have to check for JNI_OnLoad as well
-      const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS;
-      JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR(
-          JNI_OnLoad_t, dll_lookup(_native_java_library, onLoadSymbols[0]));
-      if (JNI_OnLoad != NULL) {
-        JavaThread* thread = JavaThread::current();
-        ThreadToNativeFromVM ttn(thread);
-        HandleMark hm(thread);
-        jint ver = (*JNI_OnLoad)(&main_vm, NULL);
-        onLoaded = JNI_TRUE;
-        if (!Threads::is_supported_jni_version_including_1_1(ver)) {
-          vm_exit_during_initialization("Unsupported JNI version");
-        }
-      }
-    }
-  }
   return _native_java_library;
 }
 
@@ -574,7 +552,7 @@
   // exists and has crash protection.
   WatcherThread *wt = WatcherThread::watcher_thread();
   if (wt != NULL && wt->has_crash_protection()) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    Thread* thread = Thread::current_or_null();
     if (thread == wt) {
       assert(!wt->has_crash_protection(),
           "Can't malloc with crash protection from WatcherThread");
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -642,6 +642,9 @@
   // returns NULL if exception_code is not an OS exception/signal.
   static const char* exception_name(int exception_code, char* buf, size_t buflen);
 
+  // Returns the signal number (e.g. 11) for a given signal name (SIGSEGV).
+  static int get_signal_number(const char* signal_name);
+
   // Returns native Java library, loads if necessary
   static void*    native_java_library();
 
@@ -667,12 +670,6 @@
   static jlong current_file_offset(int fd);
   static jlong seek_to_file_offset(int fd, jlong offset);
 
-  // Thread Local Storage
-  static int   allocate_thread_local_storage();
-  static void  thread_local_storage_at_put(int index, void* value);
-  static void* thread_local_storage_at(int index);
-  static void  free_thread_local_storage(int index);
-
   // Retrieve native stack frames.
   // Parameter:
   //   stack:  an array to storage stack pointers.
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -30,7 +30,6 @@
 #include "interpreter/linkResolver.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
 
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -78,7 +78,6 @@
 #include "runtime/task.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 #include "runtime/vframe_hp.hpp"
@@ -142,6 +141,10 @@
 
 #endif // ndef DTRACE_ENABLED
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+// Current thread is maintained as a thread-local variable
+THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL;
+#endif
 
 // Class hierarchy
 // - Thread
@@ -281,22 +284,22 @@
 #endif // ASSERT
 }
 
-// Non-inlined version to be used where thread.inline.hpp shouldn't be included.
-Thread* Thread::current_noinline() {
-  return Thread::current();
+void Thread::initialize_thread_current() {
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  assert(_thr_current == NULL, "Thread::current already initialized");
+  _thr_current = this;
+#endif
+  assert(ThreadLocalStorage::thread() == NULL, "ThreadLocalStorage::thread already initialized");
+  ThreadLocalStorage::set_thread(this);
+  assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
 }
 
-void Thread::initialize_thread_local_storage() {
-  // Note: Make sure this method only calls
-  // non-blocking operations. Otherwise, it might not work
-  // with the thread-startup/safepoint interaction.
-
-  // During Java thread startup, safepoint code should allow this
-  // method to complete because it may need to allocate memory to
-  // store information for the new thread.
-
-  // initialize structure dependent on thread local storage
-  ThreadLocalStorage::set_thread(this);
+void Thread::clear_thread_current() {
+  assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  _thr_current = NULL;
+#endif
+  ThreadLocalStorage::set_thread(NULL);
 }
 
 void Thread::record_stack_base_and_size() {
@@ -364,15 +367,12 @@
 
   delete _SR_lock;
 
-  // clear thread local storage if the Thread is deleting itself
+  // clear Thread::current if thread is deleting itself.
+  // Needed to ensure JNI correctly detects non-attached threads.
   if (this == Thread::current()) {
-    ThreadLocalStorage::set_thread(NULL);
-  } else {
-    // In the case where we're not the current thread, invalidate all the
-    // caches in case some code tries to get the current thread or the
-    // thread that was destroyed, and gets stale information.
-    ThreadLocalStorage::invalidate_all();
+    clear_thread_current();
   }
+
   CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
 }
 
@@ -1273,7 +1273,6 @@
   assert(this == watcher_thread(), "just checking");
 
   this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
   this->set_native_thread_name(this->name());
   this->set_active_handles(JNIHandleBlock::allocate_block());
   while (true) {
@@ -1326,9 +1325,6 @@
     _watcher_thread = NULL;
     Terminator_lock->notify();
   }
-
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
 }
 
 void WatcherThread::start() {
@@ -1663,9 +1659,6 @@
   // Record real stack base and size.
   this->record_stack_base_and_size();
 
-  // Initialize thread local storage; set before calling MutexLocker
-  this->initialize_thread_local_storage();
-
   this->create_stack_guard_pages();
 
   this->cache_global_variables();
@@ -1997,8 +1990,7 @@
 
 
 JavaThread* JavaThread::active() {
-  Thread* thread = ThreadLocalStorage::thread();
-  assert(thread != NULL, "just checking");
+  Thread* thread = Thread::current();
   if (thread->is_Java_thread()) {
     return (JavaThread*) thread;
   } else {
@@ -3407,7 +3399,7 @@
   jint adjust_after_os_result = Arguments::adjust_after_os();
   if (adjust_after_os_result != JNI_OK) return adjust_after_os_result;
 
-  // initialize TLS
+  // Initialize library-based TLS
   ThreadLocalStorage::init();
 
   // Initialize output stream logging
@@ -3444,14 +3436,9 @@
   // Attach the main thread to this os thread
   JavaThread* main_thread = new JavaThread();
   main_thread->set_thread_state(_thread_in_vm);
-  // must do this before set_active_handles and initialize_thread_local_storage
-  // Note: on solaris initialize_thread_local_storage() will (indirectly)
-  // change the stack size recorded here to one based on the java thread
-  // stacksize. This adjusted size is what is used to figure the placement
-  // of the guard pages.
+  main_thread->initialize_thread_current();
+  // must do this before set_active_handles
   main_thread->record_stack_base_and_size();
-  main_thread->initialize_thread_local_storage();
-
   main_thread->set_active_handles(JNIHandleBlock::allocate_block());
 
   if (!main_thread->set_as_starting_thread()) {
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -102,6 +102,12 @@
 class Thread: public ThreadShadow {
   friend class VMStructs;
  private:
+
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  // Current thread is maintained as a thread-local variable
+  static THREAD_LOCAL_DECL Thread* _thr_current;
+#endif
+
   // Exception handling
   // (Note: _pending_exception and friends are in ThreadShadow)
   //oop       _pending_exception;                // pending exception for current thread
@@ -260,7 +266,6 @@
   friend class No_Alloc_Verifier;
   friend class No_Safepoint_Verifier;
   friend class Pause_No_Safepoint_Verifier;
-  friend class ThreadLocalStorage;
   friend class GC_locker;
 
   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
@@ -307,9 +312,12 @@
   Thread();
   virtual ~Thread();
 
-  // initializtion
-  void initialize_thread_local_storage();
+  // Manage Thread::current()
+  void initialize_thread_current();
+  private:
+  void clear_thread_current(); // needed for detaching JNI threads
 
+  public:
   // thread entry point
   virtual void run();
 
@@ -337,10 +345,13 @@
 
   virtual char* name() const { return (char*)"Unknown thread"; }
 
-  // Returns the current thread
+  // Returns the current thread (ASSERTS if NULL)
   static inline Thread* current();
-  // ... without having to include thread.inline.hpp.
-  static Thread* current_noinline();
+  // Returns the current thread, or NULL if not attached
+  static inline Thread* current_or_null();
+  // Returns the current thread, or NULL if not attached, and is
+  // safe for use from signal-handlers
+  static inline Thread* current_or_null_safe();
 
   // Common thread operations
   static void set_priority(Thread* thread, ThreadPriority priority);
@@ -649,25 +660,22 @@
 };
 
 // Inline implementation of Thread::current()
-// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
-// startup.
-// ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
-// period.   This is inlined in thread_<os_family>.inline.hpp.
+inline Thread* Thread::current() {
+  Thread* current = current_or_null();
+  assert(current != NULL, "Thread::current() called on detached thread");
+  return current;
+}
 
-inline Thread* Thread::current() {
-#ifdef ASSERT
-  // This function is very high traffic. Define PARANOID to enable expensive
-  // asserts.
-#ifdef PARANOID
-  // Signal handler should call ThreadLocalStorage::get_thread_slow()
-  Thread* t = ThreadLocalStorage::get_thread_slow();
-  assert(t != NULL && !t->is_inside_signal_handler(),
-         "Don't use Thread::current() inside signal handler");
+inline Thread* Thread::current_or_null() {
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  return _thr_current;
+#else
+  return ThreadLocalStorage::thread();
 #endif
-#endif
-  Thread* thread = ThreadLocalStorage::thread();
-  assert(thread != NULL, "just checking");
-  return thread;
+}
+
+inline Thread* Thread::current_or_null_safe() {
+  return ThreadLocalStorage::thread();
 }
 
 // Name support for threads.  non-JavaThread subclasses with multiple
@@ -1842,8 +1850,8 @@
 
 // Inline implementation of JavaThread::current
 inline JavaThread* JavaThread::current() {
-  Thread* thread = ThreadLocalStorage::thread();
-  assert(thread != NULL && thread->is_Java_thread(), "just checking");
+  Thread* thread = Thread::current();
+  assert(thread->is_Java_thread(), "just checking");
   return (JavaThread*)thread;
 }
 
--- a/hotspot/src/share/vm/runtime/thread.inline.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/thread.inline.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,21 +30,6 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "thread_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
 
 #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
 
--- a/hotspot/src/share/vm/runtime/threadLocalStorage.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/os.inline.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Solaris no longer has this kind of ThreadLocalStorage implementation.
-// This will be removed from all platforms in the near future.
-
-#ifndef SOLARIS
-
-// static member initialization
-int ThreadLocalStorage::_thread_index = -1;
-
-Thread* ThreadLocalStorage::get_thread_slow() {
-  return (Thread*) os::thread_local_storage_at(ThreadLocalStorage::thread_index());
-}
-
-void ThreadLocalStorage::set_thread(Thread* thread) {
-  pd_set_thread(thread);
-
-  // The following ensure that any optimization tricks we have tried
-  // did not backfire on us:
-  guarantee(get_thread()      == thread, "must be the same thread, quickly");
-  guarantee(get_thread_slow() == thread, "must be the same thread, slowly");
-}
-
-void ThreadLocalStorage::init() {
-  assert(!is_initialized(),
-         "More than one attempt to initialize threadLocalStorage");
-  pd_init();
-  set_thread_index(os::allocate_thread_local_storage());
-  generate_code_for_get_thread();
-}
-
-bool ThreadLocalStorage::is_initialized() {
-    return (thread_index() != -1);
-}
-
-#endif // SOLARIS
--- a/hotspot/src/share/vm/runtime/threadLocalStorage.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/threadLocalStorage.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -25,86 +25,26 @@
 #ifndef SHARE_VM_RUNTIME_THREADLOCALSTORAGE_HPP
 #define SHARE_VM_RUNTIME_THREADLOCALSTORAGE_HPP
 
-#include "gc/shared/gcUtil.hpp"
-#include "runtime/os.hpp"
 #include "utilities/top.hpp"
 
-// Interface for thread local storage
+// forward-decl as we can't have an include cycle
+class Thread;
 
-// Fast variant of ThreadLocalStorage::get_thread_slow
-extern "C" Thread*   get_thread();
-
-// Get raw thread id: e.g., %g7 on sparc, fs or gs on x86
-extern "C" uintptr_t _raw_thread_id();
+// Wrapper class for library-based (as opposed to compiler-based)
+// thread-local storage (TLS). All platforms require this for
+// signal-handler based TLS access (which while not strictly async-signal
+// safe in theory, is and has-been for a long time, in practice).
+// Platforms without compiler-based TLS (i.e. __thread storage-class modifier)
+// will use this implementation for all TLS access - see thread.hpp/cpp
 
 class ThreadLocalStorage : AllStatic {
 
  // Exported API
  public:
-  static void    set_thread(Thread* thread);
-  static Thread* get_thread_slow();
-  static void    invalidate_all() { pd_invalidate_all(); }
+  static Thread* thread(); // return current thread, if attached
+  static void    set_thread(Thread* thread); // set current thread
   static void    init();
-  static bool    is_initialized();
-
-  // Machine dependent stuff
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "threadLS_linux_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "threadLS_linux_sparc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "threadLS_linux_zero.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "threadLS_solaris_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "threadLS_solaris_sparc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "threadLS_windows_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "threadLS_linux_arm.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "threadLS_linux_ppc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_aarch64
-# include "threadLS_linux_aarch64.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "threadLS_aix_ppc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "threadLS_bsd_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "threadLS_bsd_zero.hpp"
-#endif
-
-#ifndef SOLARIS
- public:
-  // Accessor
-  static inline int  thread_index()              { return _thread_index; }
-  static inline void set_thread_index(int index) { _thread_index = index; }
-
- private:
-  static int     _thread_index;
-
-  static void    generate_code_for_get_thread();
-
-  // Processor dependent parts of set_thread and initialization
-  static void pd_set_thread(Thread* thread);
-  static void pd_init();
-
-#endif // SOLARIS
-
-  // Invalidate any thread cacheing or optimization schemes.
-  static void pd_invalidate_all();
-
+  static bool    is_initialized(); // can't use TLS prior to initialization
 };
 
 #endif // SHARE_VM_RUNTIME_THREADLOCALSTORAGE_HPP
--- a/hotspot/src/share/vm/runtime/vframe.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/vframe.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -232,14 +232,12 @@
             // disable the extra printing below.
             mark = NULL;
           }
-        } else if (frame_count != 0 && ObjectMonitor::Knob_Verbose) {
+        } else if (frame_count != 0) {
           // This is not the first frame so we either own this monitor
           // or we owned the monitor before and called wait(). Because
           // wait() could have been called on any monitor in a lower
           // numbered frame on the stack, we have to check all the
           // monitors on the list for this frame.
-          // Note: Only enable this new output line in verbose mode
-          // since existing tests are not ready for it.
           mark = monitor->owner()->mark();
           if (mark->has_monitor() &&
               ( // we have marked ourself as pending on this monitor
--- a/hotspot/src/share/vm/runtime/vmThread.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -240,7 +240,6 @@
 void VMThread::run() {
   assert(this == vm_thread(), "check");
 
-  this->initialize_thread_local_storage();
   this->initialize_named_thread();
   this->record_stack_base_and_size();
   // Notify_lock wait checks on active_handles() to rewait in
@@ -308,9 +307,6 @@
     _terminate_lock->notify();
   }
 
-  // Thread destructor usually does this.
-  ThreadLocalStorage::set_thread(NULL);
-
   // Deletion must be done synchronously by the JNI DestroyJavaVM thread
   // so that the VMThread deletion completes before the main thread frees
   // up the CodeHeap.
--- a/hotspot/src/share/vm/runtime/vm_operations.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/runtime/vm_operations.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -378,7 +378,7 @@
 int VM_Exit::set_vm_exited() {
   CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::LastStep);
 
-  Thread * thr_cur = ThreadLocalStorage::get_thread_slow();
+  Thread * thr_cur = Thread::current();
 
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
 
@@ -400,7 +400,7 @@
   // to wait for threads in _thread_in_native state to be quiescent.
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
 
-  Thread * thr_cur = ThreadLocalStorage::get_thread_slow();
+  Thread * thr_cur = Thread::current();
   Monitor timer(Mutex::leaf, "VM_Exit timer", true,
                 Monitor::_safepoint_check_never);
 
@@ -477,7 +477,7 @@
 
 void VM_Exit::wait_if_vm_exited() {
   if (_vm_exited &&
-      ThreadLocalStorage::get_thread_slow() != _shutdown_thread) {
+      Thread::current_or_null() != _shutdown_thread) {
     // _vm_exited is set at safepoint, and the Threads_lock is never released
     // we will block here until the process dies
     Threads_lock->lock_without_safepoint_check();
--- a/hotspot/src/share/vm/utilities/debug.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -215,7 +215,7 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(ThreadLocalStorage::get_thread_slow(), file, line, error_msg, detail_fmt, detail_args);
+  VMError::report_and_die(Thread::current_or_null(), file, line, error_msg, detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -224,7 +224,7 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(ThreadLocalStorage::get_thread_slow(), file, line, "fatal error", detail_fmt, detail_args);
+  VMError::report_and_die(Thread::current_or_null(), file, line, "fatal error", detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -233,7 +233,7 @@
   if (Debugging) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(ThreadLocalStorage::get_thread_slow(), file, line, size, vm_err_type, detail_fmt, detail_args);
+  VMError::report_and_die(Thread::current_or_null(), file, line, size, vm_err_type, detail_fmt, detail_args);
   va_end(detail_args);
 
   // The UseOSErrorReporting option in report_and_die() may allow a return
@@ -536,7 +536,7 @@
 #endif // !PRODUCT
 
 extern "C" void ps() { // print stack
-  if (Thread::current() == NULL) return;
+  if (Thread::current_or_null() == NULL) return;
   Command c("ps");
 
 
@@ -615,7 +615,7 @@
 #endif // !PRODUCT
 
 extern "C" void pss() { // print all stacks
-  if (Thread::current() == NULL) return;
+  if (Thread::current_or_null() == NULL) return;
   Command c("pss");
   Threads::print(true, PRODUCT_ONLY(false) NOT_PRODUCT(true));
 }
@@ -772,7 +772,7 @@
 extern "C" void pns(void* sp, void* fp, void* pc) { // print native stack
   Command c("pns");
   static char buf[O_BUFLEN];
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null();
   // Call generic frame constructor (certain arguments may be ignored)
   frame fr(sp, fp, pc);
   print_native_stack(tty, fr, t, buf, sizeof(buf));
--- a/hotspot/src/share/vm/utilities/elfSymbolTable.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/elfSymbolTable.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -69,6 +69,26 @@
   }
 }
 
+bool ElfSymbolTable::compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) {
+  if (STT_FUNC == ELF_ST_TYPE(sym->st_info)) {
+    Elf_Word st_size = sym->st_size;
+    address sym_addr;
+    if (funcDescTable != NULL && funcDescTable->get_index() == sym->st_shndx) {
+      // We need to go another step trough the function descriptor table (currently PPC64 only)
+      sym_addr = funcDescTable->lookup(sym->st_value);
+    } else {
+      sym_addr = (address)sym->st_value;
+    }
+    if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
+      *offset = (int)(addr - sym_addr);
+      *posIndex = sym->st_name;
+      *stringtableIndex = m_shdr.sh_link;
+      return true;
+    }
+  }
+  return false;
+}
+
 bool ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) {
   assert(stringtableIndex, "null string table index pointer");
   assert(posIndex, "null string table offset pointer");
@@ -83,21 +103,8 @@
   int count = m_shdr.sh_size / sym_size;
   if (m_symbols != NULL) {
     for (int index = 0; index < count; index ++) {
-      if (STT_FUNC == ELF_ST_TYPE(m_symbols[index].st_info)) {
-        Elf_Word st_size = m_symbols[index].st_size;
-        address sym_addr;
-        if (funcDescTable != NULL && funcDescTable->get_index() == m_symbols[index].st_shndx) {
-          // We need to go another step trough the function descriptor table (currently PPC64 only)
-          sym_addr = funcDescTable->lookup(m_symbols[index].st_value);
-        } else {
-          sym_addr = (address)m_symbols[index].st_value;
-        }
-        if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
-          *offset = (int)(addr - sym_addr);
-          *posIndex = m_symbols[index].st_name;
-          *stringtableIndex = m_shdr.sh_link;
-          return true;
-        }
+      if (compare(&m_symbols[index], addr, stringtableIndex, posIndex, offset, funcDescTable)) {
+        return true;
       }
     }
   } else {
@@ -111,21 +118,8 @@
     Elf_Sym sym;
     for (int index = 0; index < count; index ++) {
       if (fread(&sym, sym_size, 1, m_file) == 1) {
-        if (STT_FUNC == ELF_ST_TYPE(sym.st_info)) {
-          Elf_Word st_size = sym.st_size;
-          address sym_addr;
-          if (funcDescTable != NULL && funcDescTable->get_index() == sym.st_shndx) {
-            // We need to go another step trough the function descriptor table (currently PPC64 only)
-            sym_addr = funcDescTable->lookup(sym.st_value);
-          } else {
-            sym_addr = (address)sym.st_value;
-          }
-          if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
-            *offset = (int)(addr - sym_addr);
-            *posIndex = sym.st_name;
-            *stringtableIndex = m_shdr.sh_link;
-            return true;
-          }
+        if (compare(&sym, addr, stringtableIndex, posIndex, offset, funcDescTable)) {
+          return true;
         }
       } else {
         m_status = NullDecoder::file_invalid;
@@ -134,7 +128,7 @@
     }
     fseek(m_file, cur_pos, SEEK_SET);
   }
-  return true;
+  return false;
 }
 
 #endif // !_WINDOWS && !__APPLE__
--- a/hotspot/src/share/vm/utilities/elfSymbolTable.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/elfSymbolTable.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -63,6 +63,8 @@
   Elf_Shdr            m_shdr;
 
   NullDecoder::decoder_status  m_status;
+
+  bool compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable);
 };
 
 #endif // !_WINDOWS and !__APPLE__
--- a/hotspot/src/share/vm/utilities/events.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/events.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -29,7 +29,6 @@
 #include "runtime/osThread.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/events.hpp"
 
--- a/hotspot/src/share/vm/utilities/events.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/events.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -248,8 +248,8 @@
 
 template <class T>
 inline void EventLogBase<T>::print_log_on(outputStream* out) {
-  if (ThreadLocalStorage::get_thread_slow() == NULL) {
-    // Not a regular Java thread so don't bother locking
+  if (Thread::current_or_null() == NULL) {
+    // Not yet attached? Don't try to use locking
     print_log_impl(out);
   } else {
     MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
--- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -326,4 +326,8 @@
 #define JLONG_FORMAT           "%ld"
 #endif // _LP64 && __APPLE__
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __thread
+#endif
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP
--- a/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -273,4 +273,8 @@
 
 #define offset_of(klass,field) offsetof(klass,field)
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __thread
+#endif
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
--- a/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -234,4 +234,8 @@
 
 #define offset_of(klass,field) offsetof(klass,field)
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __declspec( thread )
+#endif
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP
--- a/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp	Mon Dec 07 17:04:42 2015 +0000
@@ -180,5 +180,8 @@
 #define SIZE_64G  ((uint64_t) UCONST64( 0x1000000000))
 #define SIZE_1T   ((uint64_t) UCONST64(0x10000000000))
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __thread
+#endif
 
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Mon Dec 07 17:04:42 2015 +0000
@@ -738,7 +738,7 @@
   }
 
 #ifdef ASSERT
-  Thread *thread = Thread::current();
+  Thread *thread = Thread::current_or_null();
   assert(thread == NULL ||
          (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()),
          "Must be VMThread at safepoint");
@@ -1058,8 +1058,8 @@
       // bootstrap problem
       tty_lock == NULL ||
 
-      // can't grab a lock or call Thread::current() if TLS isn't initialized
-      ThreadLocalStorage::thread() == NULL ||
+      // can't grab a lock if current Thread isn't set
+      Thread::current_or_null() == NULL ||
 
       // developer hook
       !SerializeVMOutput ||
--- a/hotspot/test/TEST.ROOT	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/TEST.ROOT	Mon Dec 07 17:04:42 2015 +0000
@@ -30,7 +30,7 @@
 keys=cte_test jcmd nmt regression gc stress
 
 groups=TEST.groups [closed/TEST.groups]
-requires.properties=sun.arch.data.model
+requires.properties=sun.arch.data.model java.version
 
 # Tests using jtreg 4.1 b12 features
 requiredVersion=4.1 b12
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/parallel/TestPrintGCDetailsVerbose.java	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestPrintGCDetailsVerbose
+ * @bug 8016740
+ * @summary Tests that jvm with PrintGCDetails and Verbose flags do not crash when ParOldGC has no memory
+ * @key gc
+ * @requires java.version ~= ".*fastdebug"
+ * @requires vm.gc=="Parallel" | vm.gc=="null"
+ * @library /testlibrary
+ * @run main/othervm -Xmx50m -XX:+UseParallelOldGC -XX:+PrintGCDetails -XX:+Verbose TestPrintGCDetailsVerbose
+ */
+public class TestPrintGCDetailsVerbose {
+
+    public static void main(String[] args) {
+        for (int t = 0; t <= 10; t++) {
+            byte a[][] = new byte[100000][];
+            try {
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = new byte[100000];
+                }
+            } catch (OutOfMemoryError oome) {
+                a = null;
+                System.out.println("OOM!");
+                continue;
+            }
+        }
+    }
+}
+
--- a/hotspot/test/runtime/CommandLine/IgnoreUnrecognizedVMOptions.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/runtime/CommandLine/IgnoreUnrecognizedVMOptions.java	Mon Dec 07 17:04:42 2015 +0000
@@ -163,9 +163,9 @@
       -IgnoreUnrecognizedVMOptions               ERR                           ERR
       +IgnoreUnrecognizedVMOptions               ERR                           ERR
     */
-    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
-    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
-    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
-    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
   }
 }
--- a/hotspot/test/runtime/Thread/Fibonacci.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/runtime/Thread/Fibonacci.java	Mon Dec 07 17:04:42 2015 +0000
@@ -29,7 +29,7 @@
  *     make this test inherently unstable on Windows with 32-bit VM data model.
  * @requires !(os.family == "windows" & sun.arch.data.model == "32")
  * @library /testlibrary
- * @run main Fibonacci 15
+ * @run main/othervm Fibonacci 15
  */
 
 import jdk.test.lib.Asserts;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/logging/BadMap50.jasm	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * This class should throw VerifyError because the StackMap for bytecode
+ * index 45 is incorrect. The stack maps for bytecode indexes 45 and 49 are
+ * incompatible because 45 doesn't supply enough locals to satisfy 49.
+ *
+ * The astore_2 bytecode at bytecode index 45 changes the type state,
+ * preventing the stackmap mismatch.  But, if the incoming type state is used,
+ * as required by JVM Spec 8, then the verifier will detected the stackmap
+ * mismatch, and throw VerifyError.
+ */
+
+super public class BadMap50
+    version 50:0
+{
+
+
+public Method "<init>":"()V"
+    stack 1 locals 1
+{
+        aload_0;
+        invokespecial    Method java/lang/Object."<init>":"()V";
+        return;
+}
+
+public static Method main:"([Ljava/lang/String;)V"
+    throws java/lang/Throwable
+    stack 0 locals 1
+{
+        return;
+}
+
+public static Method foo:"()V"
+    stack 3 locals 5
+{
+        iconst_0;
+        ifne    L5;
+        nop;
+        try t7;
+    L5:    stack_frame_type full;
+        aconst_null;
+        astore_0;
+        iconst_3;
+        istore_1;
+        try t0;
+        aconst_null;
+        astore_0;
+        endtry t0;
+        goto    L19;
+        catch t0 java/io/IOException;
+        stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        stack_map class java/io/IOException;
+        astore_2;
+        aconst_null;
+        astore_0;
+        iconst_2;
+        istore_1;
+        try t1;
+    L19:    stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        iconst_0;
+        istore_2;
+        endtry t1;
+        iload_1;
+        ifeq    L37;
+        nop;
+        goto    L37;
+        catch t1 #0;
+        catch t2 #0;
+        try t2;
+        stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        stack_map class java/lang/Throwable;
+astore_3;
+iconst_2;
+istore_2;
+        endtry t2;
+        iload_1;
+        ifeq    L35;
+        nop;
+    L35:    stack_frame_type full;
+        locals_map class java/lang/Object, int, bogus, class java/lang/Throwable;
+aload_3;
+        athrow;
+        try t3, t4;
+    L37:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        iload_2;
+        ifeq    L42;
+        nop;
+        endtry t3, t4;
+    L42:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        goto    L54;
+        catch t3 java/lang/Exception;
+        try t5;
+        stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        stack_map class java/lang/Exception;
+        // astore_2;                  // astore_2, at bci 45, that changes the type state.
+// pop;
+iconst_1;
+        istore_2;                  // astore_2, at bci 45, that changes the type state.
+        endtry t5;
+        goto    L54;
+        catch t4 #0;
+        catch t5 #0;
+        catch t6 #0;
+        try t6;
+      stack_frame_type full;
+      locals_map class java/lang/Object, int, int;
+      stack_map class java/lang/Throwable;
+// astore    3;
+ istore_1;
+        endtry t6;
+// aload    3;
+//         athrow;
+    L54:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        goto    L57;
+    L57:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        nop;
+        endtry t7;
+        return;
+        catch t7 #0;
+        stack_frame_type full;
+        stack_map class java/lang/Throwable;
+        nop;
+        athrow;
+}
+
+} // end Class BadMap50
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/logging/ClassInitializationTest.java	Mon Dec 07 17:04:42 2015 +0000
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test ClassInitializationTest
+ * @bug 8142976
+ * @library /testlibrary
+ * @compile BadMap50.jasm
+ * @run driver ClassInitializationTest
+ */
+
+import jdk.test.lib.*;
+
+public class ClassInitializationTest {
+
+    public static void main(String... args) throws Exception {
+
+        // (1)
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:classinit=info", "-Xverify:all", "-Xmx64m", "BadMap50");
+        OutputAnalyzer out = new OutputAnalyzer(pb.start());
+        out.shouldContain("Start class verification for:");
+        out.shouldContain("End class verification for:");
+        out.shouldContain("Initializing");
+        out.shouldContain("Verification for BadMap50 failed");
+        out.shouldContain("Fail over class verification to old verifier for: BadMap50");
+
+        // (2)
+        if (Platform.isDebugBuild()) {
+          pb = ProcessTools.createJavaProcessBuilder("-Xlog:classinit=info", "-Xverify:all", "-XX:+EagerInitialization", "-Xmx64m", "-version");
+          out = new OutputAnalyzer(pb.start());
+          out.shouldContain("[Initialized").shouldContain("without side effects]");
+          out.shouldHaveExitValue(0);
+        }
+        // (3) Ensure that VerboseVerification still triggers appropriate messages.
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", "-XX:+VerboseVerification", "-Xverify:all", "-Xmx64m", "BadMap50");
+        out = new OutputAnalyzer(pb.start());
+        out.shouldContain("End class verification for:");
+        out.shouldContain("Verification for BadMap50 failed");
+        out.shouldContain("Fail over class verification to old verifier for: BadMap50");
+    }
+}
--- a/hotspot/test/runtime/logging/DefaultMethodsTest.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/runtime/logging/DefaultMethodsTest.java	Mon Dec 07 17:04:42 2015 +0000
@@ -28,7 +28,7 @@
  * @library /testlibrary
  * @modules java.base/sun.misc
  *          java.management
- * @run main DefaultMethodsTest
+ * @run driver DefaultMethodsTest
  */
 
 import jdk.test.lib.*;
--- a/hotspot/test/runtime/logging/SafepointTest.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/runtime/logging/SafepointTest.java	Mon Dec 07 17:04:42 2015 +0000
@@ -26,19 +26,18 @@
  * @bug 8140348
  * @summary safepoint=trace should have output from each log statement in the code
  * @library /testlibrary
- * @compile SafepointTestMain.java
  * @modules java.base/sun.misc
  *          java.management
- * @build SafepointTest
- * @run main SafepointTest
+ * @run driver SafepointTest
  */
 
 import jdk.test.lib.*;
+import java.lang.ref.WeakReference;
 
 public class SafepointTest {
     public static void main(String[] args) throws Exception {
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            "-Xlog:safepoint=trace", "SafepointTestMain");
+            "-Xlog:safepoint=trace", InnerClass.class.getName());
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("Safepoint synchronization initiated. (");
         output.shouldContain("Entering safepoint region: ");
@@ -46,4 +45,25 @@
         output.shouldContain("_at_poll_safepoint");
         output.shouldHaveExitValue(0);
     }
+
+    public static class InnerClass {
+        public static byte[] garbage;
+        public static volatile WeakReference<Object> weakref;
+
+        public static void createweakref() {
+            Object o = new Object();
+            weakref = new WeakReference<>(o);
+        }
+
+        public static void main(String[] args) throws Exception {
+            // Cause several safepoints to run GC to see safepoint messages
+            for (int i = 0; i < 2; i++) {
+                createweakref();
+                while(weakref.get() != null) {
+                    garbage = new byte[8192];
+                    System.gc();
+                }
+            }
+        }
+    }
 }
--- a/hotspot/test/runtime/logging/SafepointTestMain.java	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import java.lang.ref.WeakReference;
-
-public class SafepointTestMain {
-    public static byte[] garbage;
-    public static volatile WeakReference<Object> weakref;
-
-    public static void createweakref() {
-        Object o = new Object();
-        weakref = new WeakReference<>(o);
-    }
-
-    public static void main(String[] args) throws Exception {
-        // Cause several safepoints to run GC to see safepoint messages
-        for (int i = 0; i < 2; i++) {
-            createweakref();
-            while(weakref.get() != null) {
-                garbage = new byte[8192];
-                System.gc();
-            }
-        }
-    }
-}
--- a/hotspot/test/runtime/logging/VMOperationTest.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/runtime/logging/VMOperationTest.java	Mon Dec 07 17:04:42 2015 +0000
@@ -26,21 +26,41 @@
  * @bug 8143157
  * @summary vmoperation=debug should have logging output
  * @library /testlibrary
- * @compile VMOperationTestMain.java
  * @modules java.base/sun.misc
  *          java.management
- * @run main VMOperationTest
+ * @run driver VMOperationTest
  */
 
 import jdk.test.lib.*;
+import java.lang.ref.WeakReference;
 
 public class VMOperationTest {
     public static void main(String[] args) throws Exception {
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            "-Xlog:vmoperation=debug", "-Xmx64m", "-Xms64m", "VMOperationTestMain");
+            "-Xlog:vmoperation=debug", "-Xmx64m", "-Xms64m",
+            InternalClass.class.getName());
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("VM_Operation (");
         output.shouldHaveExitValue(0);
     }
+
+    public static class InternalClass {
+        public static byte[] garbage;
+        public static volatile WeakReference<Object> weakref;
+
+        public static void createweakref() {
+            Object o = new Object();
+            weakref = new WeakReference<>(o);
+        }
+
+        // Loop until a GC runs.
+        public static void main(String[] args) throws Exception {
+            createweakref();
+            while (weakref.get() != null) {
+                garbage = new byte[8192];
+                System.gc();
+            }
+        }
+    }
 }
 
--- a/hotspot/test/runtime/logging/VMOperationTestMain.java	Thu Dec 03 22:30:17 2015 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import java.lang.ref.WeakReference;
-
-public class VMOperationTestMain {
-    public static byte[] garbage;
-    public static volatile WeakReference<Object> weakref;
-
-    public static void createweakref() {
-        Object o = new Object();
-        weakref = new WeakReference<>(o);
-    }
-
-    // Loop until a GC runs.
-    public static void main(String[] args) throws Exception {
-        createweakref();
-        while (weakref.get() != null) {
-            garbage = new byte[8192];
-            System.gc();
-        }
-    }
-}
--- a/hotspot/test/serviceability/dcmd/gc/RunFinalizationTest.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/serviceability/dcmd/gc/RunFinalizationTest.java	Mon Dec 07 17:04:42 2015 +0000
@@ -23,20 +23,22 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
 
-import jdk.test.lib.OutputAnalyzer;
-import jdk.test.lib.ProcessTools;
+import jdk.test.lib.process.ProcessTools;
 
 /*
  * @test
  * @summary Test of diagnostic command GC.run_finalization
  * @library /testlibrary
+ * @library /test/lib/share/classes
  * @modules java.base/sun.misc
  *          java.compiler
  *          java.management
  *          jdk.jvmstat/sun.jvmstat.monitor
  * @build jdk.test.lib.*
  * @build jdk.test.lib.dcmd.*
+ * @build jdk.test.lib.process.*
  * @build RunFinalizationTest FinalizationRunner
  * @run main RunFinalizationTest
  */
@@ -50,8 +52,21 @@
         javaArgs.add(TEST_APP_NAME);
         ProcessBuilder testAppPb = ProcessTools.createJavaProcessBuilder(javaArgs.toArray(new String[javaArgs.size()]));
 
-        OutputAnalyzer out = ProcessTools.executeProcess(testAppPb);
-        out.stderrShouldNotMatch("^" + FinalizationRunner.FAILED + ".*")
-           .stdoutShouldMatch("^" + FinalizationRunner.PASSED + ".*");
+        final AtomicBoolean failed = new AtomicBoolean();
+        final AtomicBoolean passed = new AtomicBoolean();
+
+        Process runner = ProcessTools.startProcess(
+            "FinalizationRunner",
+            testAppPb,
+            l -> {
+                failed.compareAndSet(false, l.contains(FinalizationRunner.FAILED));
+                passed.compareAndSet(false, l.contains(FinalizationRunner.PASSED));
+            }
+        );
+        runner.waitFor();
+
+        if (failed.get() || !passed.get()) {
+            throw new Error("RunFinalizationTest failed");
+        }
     }
 }
--- a/hotspot/test/testlibrary/jdk/test/lib/Asserts.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/Asserts.java	Mon Dec 07 17:04:42 2015 +0000
@@ -41,7 +41,10 @@
  * multiple times, then the line number won't provide enough context to
  * understand the failure.
  * </pre>
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
  */
+@Deprecated
 public class Asserts {
 
     /**
--- a/hotspot/test/testlibrary/jdk/test/lib/JDKToolFinder.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/JDKToolFinder.java	Mon Dec 07 17:04:42 2015 +0000
@@ -27,6 +27,11 @@
 import java.nio.file.Path;
 import java.nio.file.Paths;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
+ */
+@Deprecated
 public final class JDKToolFinder {
 
     private JDKToolFinder() {
--- a/hotspot/test/testlibrary/jdk/test/lib/JDKToolLauncher.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/JDKToolLauncher.java	Mon Dec 07 17:04:42 2015 +0000
@@ -46,7 +46,10 @@
  * Process p = pb.start();
  * }
  * </pre>
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
  */
+@Deprecated
 public class JDKToolLauncher {
     private final String executable;
     private final List<String> vmArgs = new ArrayList<String>();
--- a/hotspot/test/testlibrary/jdk/test/lib/OutputAnalyzer.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/OutputAnalyzer.java	Mon Dec 07 17:04:42 2015 +0000
@@ -41,7 +41,11 @@
    *
    * @param process Process to analyze
    * @throws IOException If an I/O error occurs.
+   *
+   * @deprecated This class is deprecated. Use the one from
+   *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
    */
+  @Deprecated
   public OutputAnalyzer(Process process) throws IOException {
     OutputBuffer output = ProcessTools.getOutput(process);
     exitValue = process.exitValue();
--- a/hotspot/test/testlibrary/jdk/test/lib/OutputBuffer.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/OutputBuffer.java	Mon Dec 07 17:04:42 2015 +0000
@@ -23,6 +23,11 @@
 
 package jdk.test.lib;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
+ */
+@Deprecated
 public class OutputBuffer {
   private final String stdout;
   private final String stderr;
--- a/hotspot/test/testlibrary/jdk/test/lib/Platform.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/Platform.java	Mon Dec 07 17:04:42 2015 +0000
@@ -25,6 +25,11 @@
 
 import java.util.regex.Pattern;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
+ */
+@Deprecated
 public class Platform {
     private static final String osName      = System.getProperty("os.name");
     private static final String dataModel   = System.getProperty("sun.arch.data.model");
--- a/hotspot/test/testlibrary/jdk/test/lib/ProcessTools.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/ProcessTools.java	Mon Dec 07 17:04:42 2015 +0000
@@ -31,6 +31,11 @@
 import java.util.Collections;
 import java.util.List;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
+ */
+@Deprecated
 public final class ProcessTools {
 
   private ProcessTools() {
--- a/hotspot/test/testlibrary/jdk/test/lib/StreamPumper.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/StreamPumper.java	Mon Dec 07 17:04:42 2015 +0000
@@ -27,6 +27,11 @@
 import java.io.InputStream;
 import java.io.IOException;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
+ */
+@Deprecated
 public final class StreamPumper implements Runnable {
 
   private static final int BUF_SIZE = 256;
--- a/hotspot/test/testlibrary/jdk/test/lib/Utils.java	Thu Dec 03 22:30:17 2015 -0800
+++ b/hotspot/test/testlibrary/jdk/test/lib/Utils.java	Mon Dec 07 17:04:42 2015 +0000
@@ -55,7 +55,11 @@
 
 /**
  * Common library for various test helper functions.
+ *
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
  */
+@Deprecated
 public final class Utils {
 
     /**