hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
changeset 24462 0676642e6560
parent 24453 3863651dc319
child 28947 2ea471384931
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Sat May 17 19:34:38 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue May 20 20:35:39 2014 +0200
@@ -2484,31 +2484,6 @@
     __ bind(done);
 
   }
-
-  {
-    // Normally we do not post method_entry and method_exit events from
-    // compiled code, only from the interpreter. If method_entry/exit
-    // events are switched on at runtime, we will deoptimize everything
-    // (see VM_EnterInterpOnlyMode) on the stack and call method_entry/exit
-    // from the interpreter. But when we do that, we will not deoptimize
-    // this native wrapper frame. Thus we have an extra check here to see
-    // if we are now in interp_only_mode and in that case we do the jvmti
-    // callback.
-    Label skip_jvmti_method_exit;
-    __ cmpl(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
-    __ jcc(Assembler::zero, skip_jvmti_method_exit, true);
-
-    save_native_result(masm, ret_type, stack_slots);
-    __ mov_metadata(c_rarg1, method());
-    __ call_VM(
-        noreg,
-        CAST_FROM_FN_PTR(address, SharedRuntime::jvmti_method_exit),
-        r15_thread, c_rarg1,
-        true);
-    restore_native_result(masm, ret_type, stack_slots);
-    __ bind(skip_jvmti_method_exit);
-  }
-
   {
     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
     save_native_result(masm, ret_type, stack_slots);