diff -r 07e8e98ca6af -r 3863651dc319 hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp --- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Wed May 14 22:54:45 2014 -0400 +++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Wed May 14 20:44:33 2014 +0200 @@ -2239,6 +2239,30 @@ } { + // Normally we do not post method_entry and method_exit events from + // compiled code, only from the interpreter. If method_entry/exit + // events are switched on at runtime, we will deoptimize everything + // (see VM_EnterInterpOnlyMode) on the stack and call method_entry/exit + // from the interpreter. But when we do that, we will not deoptimize + // this native wrapper frame. Thus we have an extra check here to see + // if we are now in interp_only_mode and in that case we do the jvmti + // callback. + Label skip_jvmti_method_exit; + __ cmpl(Address(thread, JavaThread::interp_only_mode_offset()), 0); + __ jcc(Assembler::zero, skip_jvmti_method_exit, true); + + save_native_result(masm, ret_type, stack_slots); + __ mov_metadata(rax, method()); + __ call_VM( + noreg, + CAST_FROM_FN_PTR(address, SharedRuntime::jvmti_method_exit), + thread, rax, + true); + restore_native_result(masm, ret_type, stack_slots); + __ bind(skip_jvmti_method_exit); + } + + { SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); // Tell dtrace about this method exit save_native_result(masm, ret_type, stack_slots);