7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
authortwisti
Mon, 28 Feb 2011 06:07:12 -0800
changeset 8495 a4959965eaa3
parent 8494 4258c78226d9
child 8496 2d29a13b66ed
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc Reviewed-by: never, bdelsart
hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
hotspot/src/share/vm/c1/c1_Runtime1.cpp
hotspot/src/share/vm/c1/c1_Runtime1.hpp
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/nmethod.hpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/utilities/macros.hpp
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -395,9 +395,9 @@
 
   int offset = code_offset();
 
-  __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
   __ delayed()->nop();
-  debug_only(__ stop("should have gone to the caller");)
+  __ should_not_reach_here();
   assert(code_offset() - offset <= exception_handler_size, "overflow");
   __ end_a_stub();
 
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -148,7 +148,7 @@
 
 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
   assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
-         " mismatch in calculation");
+         "mismatch in calculation");
   sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
   int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
@@ -176,9 +176,8 @@
 
 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
   assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
-         " mismatch in calculation");
+         "mismatch in calculation");
   __ save_frame_c1(frame_size_in_bytes);
-  sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
 
   // Record volatile registers as callee-save values in an OopMap so their save locations will be
   // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
@@ -367,23 +366,7 @@
   switch (id) {
     case forward_exception_id:
       {
-        // we're handling an exception in the context of a compiled
-        // frame.  The registers have been saved in the standard
-        // places.  Perform an exception lookup in the caller and
-        // dispatch to the handler if found.  Otherwise unwind and
-        // dispatch to the callers exception handler.
-
-        oop_maps = new OopMapSet();
-        OopMap* oop_map = generate_oop_map(sasm, true);
-
-        // transfer the pending exception to the exception_oop
-        __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception);
-        __ ld_ptr(Oexception, 0, G0);
-        __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset()));
-        __ add(I7, frame::pc_return_offset, Oissuing_pc);
-
-        generate_handle_exception(sasm, oop_maps, oop_map);
-        __ should_not_reach_here();
+        oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
@@ -671,15 +654,14 @@
       break;
 
     case handle_exception_id:
-      {
-        __ set_info("handle_exception", dont_gc_arguments);
-        // make a frame and preserve the caller's caller-save registers
+      { __ set_info("handle_exception", dont_gc_arguments);
+        oop_maps = generate_handle_exception(id, sasm);
+      }
+      break;
 
-        oop_maps = new OopMapSet();
-        OopMap* oop_map = save_live_registers(sasm);
-        __ mov(Oexception->after_save(),  Oexception);
-        __ mov(Oissuing_pc->after_save(), Oissuing_pc);
-        generate_handle_exception(sasm, oop_maps, oop_map);
+    case handle_exception_from_callee_id:
+      { __ set_info("handle_exception_from_callee", dont_gc_arguments);
+        oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
@@ -696,7 +678,7 @@
                         G2_thread, Oissuing_pc->after_save());
         __ verify_not_null_oop(Oexception->after_save());
 
-        // Restore SP from L7 if the exception PC is a MethodHandle call site.
+        // Restore SP from L7 if the exception PC is a method handle call site.
         __ mov(O0, G5);  // Save the target address.
         __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
         __ tst(L0);  // Condition codes are preserved over the restore.
@@ -1006,48 +988,89 @@
 }
 
 
-void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) {
-  Label no_deopt;
+OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
+  __ block_comment("generate_handle_exception");
+
+  // Save registers, if required.
+  OopMapSet* oop_maps = new OopMapSet();
+  OopMap* oop_map = NULL;
+  switch (id) {
+  case forward_exception_id:
+    // We're handling an exception in the context of a compiled frame.
+    // The registers have been saved in the standard places.  Perform
+    // an exception lookup in the caller and dispatch to the handler
+    // if found.  Otherwise unwind and dispatch to the callers
+    // exception handler.
+     oop_map = generate_oop_map(sasm, true);
+
+     // transfer the pending exception to the exception_oop
+     __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception);
+     __ ld_ptr(Oexception, 0, G0);
+     __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset()));
+     __ add(I7, frame::pc_return_offset, Oissuing_pc);
+    break;
+  case handle_exception_id:
+    // At this point all registers MAY be live.
+    oop_map = save_live_registers(sasm);
+    __ mov(Oexception->after_save(),  Oexception);
+    __ mov(Oissuing_pc->after_save(), Oissuing_pc);
+    break;
+  case handle_exception_from_callee_id:
+    // At this point all registers except exception oop (Oexception)
+    // and exception pc (Oissuing_pc) are dead.
+    oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
+    sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
+    __ save_frame_c1(frame_size_in_bytes);
+    __ mov(Oexception->after_save(),  Oexception);
+    __ mov(Oissuing_pc->after_save(), Oissuing_pc);
+    break;
+  default:  ShouldNotReachHere();
+  }
 
   __ verify_not_null_oop(Oexception);
 
   // save the exception and issuing pc in the thread
-  __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
+  __ st_ptr(Oexception,  G2_thread, in_bytes(JavaThread::exception_oop_offset()));
   __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
 
-  // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map)
-  __ mov(I7, L0);
+  // use the throwing pc as the return address to lookup (has bci & oop map)
   __ mov(Oissuing_pc, I7);
   __ sub(I7, frame::pc_return_offset, I7);
   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
+  oop_maps->add_gc_map(call_offset, oop_map);
 
   // Note: if nmethod has been deoptimized then regardless of
   // whether it had a handler or not we will deoptimize
   // by entering the deopt blob with a pending exception.
 
-#ifdef ASSERT
-  Label done;
-  __ tst(O0);
-  __ br(Assembler::notZero, false, Assembler::pn, done);
-  __ delayed()->nop();
-  __ stop("should have found address");
-  __ bind(done);
-#endif
+  // Restore the registers that were saved at the beginning, remove
+  // the frame and jump to the exception handler.
+  switch (id) {
+  case forward_exception_id:
+  case handle_exception_id:
+    restore_live_registers(sasm);
+    __ jmp(O0, 0);
+    __ delayed()->restore();
+    break;
+  case handle_exception_from_callee_id:
+    // Restore SP from L7 if the exception PC is a method handle call site.
+    __ mov(O0, G5);  // Save the target address.
+    __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
+    __ tst(L0);  // Condition codes are preserved over the restore.
+    __ restore();
 
-  // restore the registers that were saved at the beginning and jump to the exception handler.
-  restore_live_registers(sasm);
+    __ jmp(G5, 0);  // jump to the exception handler
+    __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);  // Restore SP if required.
+    break;
+  default:  ShouldNotReachHere();
+  }
 
-  __ jmp(O0, 0);
-  __ delayed()->restore();
-
-  oop_maps->add_gc_map(call_offset, oop_map);
+  return oop_maps;
 }
 
 
 #undef __
 
-#define __ masm->
-
 const char *Runtime1::pd_name_for_address(address entry) {
   return "<unknown function>";
 }
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -417,6 +417,7 @@
 
   // Some handy addresses:
   Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
+  Address G5_method_fce(    G5_method,        in_bytes(methodOopDesc::from_compiled_offset()));
 
   Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
 
@@ -444,12 +445,10 @@
   case _raise_exception:
     {
       // Not a real MH entry, but rather shared code for raising an
-      // exception.  Since we use a C2I adapter to set up the
-      // interpreter state, arguments are expected in compiler
-      // argument registers.
+      // exception.  Since we use the compiled entry, arguments are
+      // expected in compiler argument registers.
       assert(raise_exception_method(), "must be set");
-      address c2i_entry = raise_exception_method()->get_c2i_entry();
-      assert(c2i_entry, "method must be linked");
+      assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
 
       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
 
@@ -468,10 +467,9 @@
       __ delayed()->nop();
 
       __ verify_oop(G5_method);
-      __ jump_to(AddressLiteral(c2i_entry), O3_scratch);
+      __ jump_indirect_to(G5_method_fce, O3_scratch);  // jump to compiled entry
       __ delayed()->nop();
 
-      // If we get here, the Java runtime did not do its job of creating the exception.
       // Do something that is at least causes a valid throw from the interpreter.
       __ bind(L_no_method);
       __ unimplemented("call throw_WrongMethodType_entry");
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -456,10 +456,8 @@
   __ verify_not_null_oop(rax);
 
   // search an exception handler (rax: exception oop, rdx: throwing pc)
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
-
-  __ stop("should not reach here");
-
+  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
+  __ should_not_reach_here();
   assert(code_offset() - offset <= exception_handler_size, "overflow");
   __ end_a_stub();
 
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -248,11 +248,14 @@
 #ifdef _LP64
   align_dummy_0, align_dummy_1,
 #endif // _LP64
-  dummy1, SLOT2(dummy1H)                                                                    // 0, 4
-  dummy2, SLOT2(dummy2H)                                                                    // 8, 12
-  // Two temps to be used as needed by users of save/restore callee registers
-  temp_2_off, SLOT2(temp_2H_off)                                                            // 16, 20
-  temp_1_off, SLOT2(temp_1H_off)                                                            // 24, 28
+#ifdef _WIN64
+  // Windows always allocates space for it's argument registers (see
+  // frame::arg_reg_save_area_bytes).
+  arg_reg_save_1, arg_reg_save_1H,                                                          // 0, 4
+  arg_reg_save_2, arg_reg_save_2H,                                                          // 8, 12
+  arg_reg_save_3, arg_reg_save_3H,                                                          // 16, 20
+  arg_reg_save_4, arg_reg_save_4H,                                                          // 24, 28
+#endif // _WIN64
   xmm_regs_as_doubles_off,                                                                  // 32
   float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots,  // 160
   fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots,          // 224
@@ -282,24 +285,7 @@
   rax_off, SLOT2(raxH_off)                                                                  // 480, 484
   saved_rbp_off, SLOT2(saved_rbpH_off)                                                      // 488, 492
   return_off, SLOT2(returnH_off)                                                            // 496, 500
-  reg_save_frame_size,  // As noted: neglects any parameters to runtime                     // 504
-
-#ifdef _WIN64
-  c_rarg0_off = rcx_off,
-#else
-  c_rarg0_off = rdi_off,
-#endif // WIN64
-
-  // equates
-
-  // illegal instruction handler
-  continue_dest_off = temp_1_off,
-
-  // deoptimization equates
-  fp0_off = float_regs_as_doubles_off, // slot for java float/double return value
-  xmm0_off = xmm_regs_as_doubles_off,  // slot for java float/double return value
-  deopt_type = temp_2_off,             // slot for type of deopt in progress
-  ret_type = temp_1_off                // slot for return type
+  reg_save_frame_size   // As noted: neglects any parameters to runtime                     // 504
 };
 
 
@@ -405,11 +391,6 @@
                                    bool save_fpu_registers = true) {
   __ block_comment("save_live_registers");
 
-  // 64bit passes the args in regs to the c++ runtime
-  int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread
-  // frame_size = round_to(frame_size, 4);
-  sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
-
   __ pusha();         // integer registers
 
   // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
@@ -642,19 +623,58 @@
 }
 
 
-void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) {
+OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
+  __ block_comment("generate_handle_exception");
+
   // incoming parameters
   const Register exception_oop = rax;
-  const Register exception_pc = rdx;
+  const Register exception_pc  = rdx;
   // other registers used in this stub
-  const Register real_return_addr = rbx;
   const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
 
-  __ block_comment("generate_handle_exception");
+  // Save registers, if required.
+  OopMapSet* oop_maps = new OopMapSet();
+  OopMap* oop_map = NULL;
+  switch (id) {
+  case forward_exception_id:
+    // We're handling an exception in the context of a compiled frame.
+    // The registers have been saved in the standard places.  Perform
+    // an exception lookup in the caller and dispatch to the handler
+    // if found.  Otherwise unwind and dispatch to the callers
+    // exception handler.
+    oop_map = generate_oop_map(sasm, 1 /*thread*/);
+
+    // load and clear pending exception oop into RAX
+    __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
+    __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+
+    // load issuing PC (the return address for this stub) into rdx
+    __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
+
+    // make sure that the vm_results are cleared (may be unnecessary)
+    __ movptr(Address(thread, JavaThread::vm_result_offset()),   NULL_WORD);
+    __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+    break;
+  case handle_exception_nofpu_id:
+  case handle_exception_id:
+    // At this point all registers MAY be live.
+    oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
+    break;
+  case handle_exception_from_callee_id: {
+    // At this point all registers except exception oop (RAX) and
+    // exception pc (RDX) are dead.
+    const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
+    oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
+    sasm->set_frame_size(frame_size);
+    WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
+    break;
+  }
+  default:  ShouldNotReachHere();
+  }
 
 #ifdef TIERED
   // C2 can leave the fpu stack dirty
-  if (UseSSE < 2 ) {
+  if (UseSSE < 2) {
     __ empty_FPU_stack();
   }
 #endif // TIERED
@@ -686,11 +706,7 @@
   // save exception oop and issuing pc into JavaThread
   // (exception handler will load it from here)
   __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
-  __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
-
-  // save real return address (pc that called this stub)
-  __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord));
-  __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr);
+  __ movptr(Address(thread, JavaThread::exception_pc_offset()),  exception_pc);
 
   // patch throwing pc into return address (has bci & oop map)
   __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
@@ -700,33 +716,41 @@
   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
   oop_maps->add_gc_map(call_offset, oop_map);
 
-  // rax,: handler address
+  // rax: handler address
   //      will be the deopt blob if nmethod was deoptimized while we looked up
   //      handler regardless of whether handler existed in the nmethod.
 
   // only rax, is valid at this time, all other registers have been destroyed by the runtime call
   __ invalidate_registers(false, true, true, true, true, true);
 
-#ifdef ASSERT
-  // Do we have an exception handler in the nmethod?
-  Label done;
-  __ testptr(rax, rax);
-  __ jcc(Assembler::notZero, done);
-  __ stop("no handler found");
-  __ bind(done);
-#endif
-
-  // exception handler found
-  // patch the return address -> the stub will directly return to the exception handler
+  // patch the return address, this stub will directly return to the exception handler
   __ movptr(Address(rbp, 1*BytesPerWord), rax);
 
-  // restore registers
-  restore_live_registers(sasm, save_fpu_registers);
+  switch (id) {
+  case forward_exception_id:
+  case handle_exception_nofpu_id:
+  case handle_exception_id:
+    // Restore the registers that were saved at the beginning.
+    restore_live_registers(sasm, id == handle_exception_nofpu_id);
+    break;
+  case handle_exception_from_callee_id:
+    // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
+    // since we do a leave anyway.
 
-  // return to exception handler
-  __ leave();
-  __ ret(0);
+    // Pop the return address since we are possibly changing SP (restoring from BP).
+    __ leave();
+    __ pop(rcx);
 
+    // Restore SP from BP if the exception PC is a method handle call site.
+    NOT_LP64(__ get_thread(thread);)
+    __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
+    __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
+    __ jmp(rcx);  // jump to exception handler
+    break;
+  default:  ShouldNotReachHere();
+  }
+
+  return oop_maps;
 }
 
 
@@ -791,7 +815,7 @@
   // the pop is also necessary to simulate the effect of a ret(0)
   __ pop(exception_pc);
 
-  // Restore SP from BP if the exception PC is a MethodHandle call site.
+  // Restore SP from BP if the exception PC is a method handle call site.
   NOT_LP64(__ get_thread(thread);)
   __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
   __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
@@ -934,7 +958,6 @@
   __ ret(0);
 
   return oop_maps;
-
 }
 
 
@@ -952,35 +975,9 @@
   switch (id) {
     case forward_exception_id:
       {
-        // we're handling an exception in the context of a compiled
-        // frame.  The registers have been saved in the standard
-        // places.  Perform an exception lookup in the caller and
-        // dispatch to the handler if found.  Otherwise unwind and
-        // dispatch to the callers exception handler.
-
-        const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
-        const Register exception_oop = rax;
-        const Register exception_pc = rdx;
-
-        // load pending exception oop into rax,
-        __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
-        // clear pending exception
-        __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
-
-        // load issuing PC (the return address for this stub) into rdx
-        __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
-
-        // make sure that the vm_results are cleared (may be unnecessary)
-        __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
-        __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
-
-        // verify that that there is really a valid exception in rax,
-        __ verify_not_null_oop(exception_oop);
-
-        oop_maps = new OopMapSet();
-        OopMap* oop_map = generate_oop_map(sasm, 1);
-        generate_handle_exception(sasm, oop_maps, oop_map);
-        __ stop("should not reach here");
+        oop_maps = generate_handle_exception(id, sasm);
+        __ leave();
+        __ ret(0);
       }
       break;
 
@@ -1315,13 +1312,15 @@
       break;
 
     case handle_exception_nofpu_id:
-      save_fpu_registers = false;
-      // fall through
     case handle_exception_id:
       { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
-        oop_maps = new OopMapSet();
-        OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
-        generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers);
+        oop_maps = generate_handle_exception(id, sasm);
+      }
+      break;
+
+    case handle_exception_from_callee_id:
+      { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
+        oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -419,6 +419,7 @@
 
   // some handy addresses
   Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
+  Address rbx_method_fce(     rbx,      methodOopDesc::from_compiled_offset() );
 
   Address rcx_mh_vmtarget(    rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() );
   Address rcx_dmh_vmindex(    rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() );
@@ -448,12 +449,10 @@
   case _raise_exception:
     {
       // Not a real MH entry, but rather shared code for raising an
-      // exception.  Since we use a C2I adapter to set up the
-      // interpreter state, arguments are expected in compiler
-      // argument registers.
+      // exception.  Since we use the compiled entry, arguments are
+      // expected in compiler argument registers.
       assert(raise_exception_method(), "must be set");
-      address c2i_entry = raise_exception_method()->get_c2i_entry();
-      assert(c2i_entry, "method must be linked");
+      assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
 
       const Register rdi_pc = rax;
       __ pop(rdi_pc);  // caller PC
@@ -472,13 +471,10 @@
       __ jccb(Assembler::zero, L_no_method);
       __ verify_oop(rbx_method);
 
-      // 32-bit: push remaining arguments as if coming from the compiler.
       NOT_LP64(__ push(rarg2_required));
+      __ push(rdi_pc);         // restore caller PC
+      __ jmp(rbx_method_fce);  // jump to compiled entry
 
-      __ push(rdi_pc);  // restore caller PC
-      __ jump(ExternalAddress(c2i_entry));  // do C2I transition
-
-      // If we get here, the Java runtime did not do its job of creating the exception.
       // Do something that is at least causes a valid throw from the interpreter.
       __ bind(L_no_method);
       __ push(rarg2_required);
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -439,10 +439,6 @@
     // Verify that there is really a valid exception in RAX.
     __ verify_oop(exception_oop);
 
-    // Restore SP from BP if the exception PC is a MethodHandle call site.
-    __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
-    __ cmovptr(Assembler::notEqual, rsp, rbp);
-
     // continue at exception handler (return address removed)
     // rax: exception
     // rbx: exception handler
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -426,10 +426,9 @@
 // been deoptimized. If that is the case we return the deopt blob
 // unpack_with_exception entry instead. This makes life for the exception blob easier
 // because making that same check and diverting is painful from assembly language.
-//
-
-
 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
+  // Reset method handle flag.
+  thread->set_is_method_handle_return(false);
 
   Handle exception(thread, ex);
   nm = CodeCache::find_nmethod(pc);
@@ -480,11 +479,12 @@
     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
   }
 
-  // ExceptionCache is used only for exceptions at call and not for implicit exceptions
+  // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
   if (guard_pages_enabled) {
     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
     if (fast_continuation != NULL) {
-      if (fast_continuation == ExceptionCache::unwind_handler()) fast_continuation = NULL;
+      // Set flag if return address is a method handle call site.
+      thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
       return fast_continuation;
     }
   }
@@ -522,14 +522,14 @@
     thread->set_exception_pc(pc);
 
     // the exception cache is used only by non-implicit exceptions
-    if (continuation == NULL) {
-      nm->add_handler_for_exception_and_pc(exception, pc, ExceptionCache::unwind_handler());
-    } else {
+    if (continuation != NULL) {
       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
     }
   }
 
   thread->set_vm_result(exception());
+  // Set flag if return address is a method handle call site.
+  thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
 
   if (TraceExceptions) {
     ttyLocker ttyl;
@@ -542,20 +542,19 @@
 JRT_END
 
 // Enter this method from compiled code only if there is a Java exception handler
-// in the method handling the exception
+// in the method handling the exception.
 // We are entering here from exception stub. We don't do a normal VM transition here.
 // We do it in a helper. This is so we can check to see if the nmethod we have just
 // searched for an exception handler has been deoptimized in the meantime.
-address  Runtime1::exception_handler_for_pc(JavaThread* thread) {
+address Runtime1::exception_handler_for_pc(JavaThread* thread) {
   oop exception = thread->exception_oop();
   address pc = thread->exception_pc();
   // Still in Java mode
-  debug_only(ResetNoHandleMark rnhm);
+  DEBUG_ONLY(ResetNoHandleMark rnhm);
   nmethod* nm = NULL;
   address continuation = NULL;
   {
     // Enter VM mode by calling the helper
-
     ResetNoHandleMark rnhm;
     continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
   }
@@ -563,11 +562,11 @@
 
   // Now check to see if the nmethod we were called from is now deoptimized.
   // If so we must return to the deopt blob and deoptimize the nmethod
-
   if (nm != NULL && caller_is_deopted()) {
     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
   }
 
+  assert(continuation != NULL, "no handler found");
   return continuation;
 }
 
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Mon Feb 28 06:07:12 2011 -0800
@@ -54,6 +54,7 @@
   stub(new_multi_array)              \
   stub(handle_exception_nofpu)         /* optimized version that does not preserve fpu registers */ \
   stub(handle_exception)             \
+  stub(handle_exception_from_callee) \
   stub(throw_array_store_exception)  \
   stub(throw_class_cast_exception)   \
   stub(throw_incompatible_class_change_error)   \
@@ -116,11 +117,11 @@
   static const char* _blob_names[];
 
   // stub generation
-  static void generate_blob_for(BufferBlob* blob, StubID id);
-  static OopMapSet* generate_code_for(StubID id, StubAssembler* masm);
+  static void       generate_blob_for(BufferBlob* blob, StubID id);
+  static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm);
   static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
-  static void generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool ignore_fpu_registers = false);
-  static void generate_unwind_exception(StubAssembler *sasm);
+  static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm);
+  static void       generate_unwind_exception(StubAssembler *sasm);
   static OopMapSet* generate_patching(StubAssembler* sasm, address target);
 
   static OopMapSet* generate_stub_call(StubAssembler* sasm, Register result, address entry,
--- a/hotspot/src/share/vm/code/nmethod.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -190,15 +190,10 @@
 } nmethod_stats;
 #endif //PRODUCT
 
+
 //---------------------------------------------------------------------------------
 
 
-// The _unwind_handler is a special marker address, which says that
-// for given exception oop and address, the frame should be removed
-// as the tuple cannot be caught in the nmethod
-address ExceptionCache::_unwind_handler = (address) -1;
-
-
 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
   assert(pc != NULL, "Must be non null");
   assert(exception.not_null(), "Must be non null");
--- a/hotspot/src/share/vm/code/nmethod.hpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Mon Feb 28 06:07:12 2011 -0800
@@ -34,7 +34,6 @@
 class ExceptionCache : public CHeapObj {
   friend class VMStructs;
  private:
-  static address _unwind_handler;
   enum { cache_size = 16 };
   klassOop _exception_type;
   address  _pc[cache_size];
@@ -62,8 +61,6 @@
   bool    match_exception_with_space(Handle exception) ;
   address test_address(address addr);
   bool    add_address_and_handler(address addr, address handler) ;
-
-  static address unwind_handler() { return _unwind_handler; }
 };
 
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Mon Feb 28 06:07:12 2011 -0800
@@ -431,25 +431,24 @@
 // previous frame depending on the return address.
 
 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
-  assert(frame::verify_return_pc(return_address), "must be a return pc");
-
-  // Reset MethodHandle flag.
+  assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address));
+
+  // Reset method handle flag.
   thread->set_is_method_handle_return(false);
 
-  // the fastest case first
+  // The fastest case first
   CodeBlob* blob = CodeCache::find_blob(return_address);
-  if (blob != NULL && blob->is_nmethod()) {
-    nmethod* code = (nmethod*)blob;
-    assert(code != NULL, "nmethod must be present");
-    // Check if the return address is a MethodHandle call site.
-    thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
+  nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL;
+  if (nm != NULL) {
+    // Set flag if return address is a method handle call site.
+    thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
     // native nmethods don't have exception handlers
-    assert(!code->is_native_method(), "no exception handler");
-    assert(code->header_begin() != code->exception_begin(), "no exception handler");
-    if (code->is_deopt_pc(return_address)) {
+    assert(!nm->is_native_method(), "no exception handler");
+    assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
+    if (nm->is_deopt_pc(return_address)) {
       return SharedRuntime::deopt_blob()->unpack_with_exception();
     } else {
-      return code->exception_begin();
+      return nm->exception_begin();
     }
   }
 
@@ -462,22 +461,9 @@
     return Interpreter::rethrow_exception_entry();
   }
 
-  // Compiled code
-  if (CodeCache::contains(return_address)) {
-    CodeBlob* blob = CodeCache::find_blob(return_address);
-    if (blob->is_nmethod()) {
-      nmethod* code = (nmethod*)blob;
-      assert(code != NULL, "nmethod must be present");
-      // Check if the return address is a MethodHandle call site.
-      thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
-      assert(code->header_begin() != code->exception_begin(), "no exception handler");
-      return code->exception_begin();
-    }
-    if (blob->is_runtime_stub()) {
-      ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
-    }
-  }
+  guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
+
 #ifndef PRODUCT
   { ResourceMark rm;
     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
@@ -485,6 +471,7 @@
     tty->print_cr("b) other problem");
   }
 #endif // PRODUCT
+
   ShouldNotReachHere();
   return NULL;
 }
--- a/hotspot/src/share/vm/utilities/macros.hpp	Sat Feb 26 12:10:54 2011 -0800
+++ b/hotspot/src/share/vm/utilities/macros.hpp	Mon Feb 28 06:07:12 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,6 +161,14 @@
 #define NOT_WINDOWS(code) code
 #endif
 
+#ifdef _WIN64
+#define WIN64_ONLY(code) code
+#define NOT_WIN64(code)
+#else
+#define WIN64_ONLY(code)
+#define NOT_WIN64(code) code
+#endif
+
 #if defined(IA32) || defined(AMD64)
 #define X86
 #define X86_ONLY(code) code