8041934: com/sun/jdi/RepStep.java fails in RT_Baseline on all platforms with assert(_cur_stack_depth == count_frames()) failed: cur_stack_depth out of sync
authorsla
Wed, 14 May 2014 20:44:33 +0200
changeset 24453 3863651dc319
parent 24452 07e8e98ca6af
child 24454 19b3c54faec3
8041934: com/sun/jdi/RepStep.java fails in RT_Baseline on all platforms with assert(_cur_stack_depth == count_frames()) failed: cur_stack_depth out of sync Summary: Missing call to jvmti_method_exit from native wrapper code Reviewed-by: twisti, dcubed, sspitsyn Contributed-by: rickard.backman@oracle.com
hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/runtime/sharedRuntime.hpp
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed May 14 22:54:45 2014 -0400
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed May 14 20:44:33 2014 +0200
@@ -2657,6 +2657,30 @@
     __ bind(done);
   }
 
+  {
+    // Normally we do not post method_entry and method_exit events from
+    // compiled code, only from the interpreter. If method_entry/exit
+    // events are switched on at runtime, we will deoptimize everything
+    // (see VM_EnterInterpOnlyMode) on the stack and call method_entry/exit
+    // from the interpreter. But when we do that, we will not deoptimize
+    // this native wrapper frame. Thus we have an extra check here to see
+    // if we are now in interp_only_mode and in that case we do the jvmti
+    // callback.
+    Label skip_jvmti_method_exit;
+    __ ld(G2_thread, JavaThread::interp_only_mode_offset(), G3_scratch);
+    __ cmp_and_br_short(G3_scratch, 0, Assembler::zero, Assembler::pt, skip_jvmti_method_exit);
+
+    save_native_result(masm, ret_type, stack_slots);
+    __ set_metadata_constant(method(), G3_scratch);
+    __ call_VM(
+        noreg,
+        CAST_FROM_FN_PTR(address, SharedRuntime::jvmti_method_exit),
+        G2_thread, G3_scratch,
+        true);
+    restore_native_result(masm, ret_type, stack_slots);
+    __ bind(skip_jvmti_method_exit);
+  }
+
   // Tell dtrace about this method exit
   {
     SkipIfEqual skip_if(
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed May 14 22:54:45 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed May 14 20:44:33 2014 +0200
@@ -2239,6 +2239,30 @@
   }
 
   {
+    // Normally we do not post method_entry and method_exit events from
+    // compiled code, only from the interpreter. If method_entry/exit
+    // events are switched on at runtime, we will deoptimize everything
+    // (see VM_EnterInterpOnlyMode) on the stack and call method_entry/exit
+    // from the interpreter. But when we do that, we will not deoptimize
+    // this native wrapper frame. Thus we have an extra check here to see
+    // if we are now in interp_only_mode and in that case we do the jvmti
+    // callback.
+    Label skip_jvmti_method_exit;
+    __ cmpl(Address(thread, JavaThread::interp_only_mode_offset()), 0);
+    __ jcc(Assembler::zero, skip_jvmti_method_exit, true);
+
+    save_native_result(masm, ret_type, stack_slots);
+    __ mov_metadata(rax, method());
+    __ call_VM(
+        noreg,
+        CAST_FROM_FN_PTR(address, SharedRuntime::jvmti_method_exit),
+        thread, rax,
+        true);
+    restore_native_result(masm, ret_type, stack_slots);
+    __ bind(skip_jvmti_method_exit);
+  }
+
+  {
     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
     // Tell dtrace about this method exit
     save_native_result(masm, ret_type, stack_slots);
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed May 14 22:54:45 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed May 14 20:44:33 2014 +0200
@@ -2484,6 +2484,31 @@
     __ bind(done);
 
   }
+
+  {
+    // Normally we do not post method_entry and method_exit events from
+    // compiled code, only from the interpreter. If method_entry/exit
+    // events are switched on at runtime, we will deoptimize everything
+    // (see VM_EnterInterpOnlyMode) on the stack and call method_entry/exit
+    // from the interpreter. But when we do that, we will not deoptimize
+    // this native wrapper frame. Thus we have an extra check here to see
+    // if we are now in interp_only_mode and in that case we do the jvmti
+    // callback.
+    Label skip_jvmti_method_exit;
+    __ cmpl(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
+    __ jcc(Assembler::zero, skip_jvmti_method_exit, true);
+
+    save_native_result(masm, ret_type, stack_slots);
+    __ mov_metadata(c_rarg1, method());
+    __ call_VM(
+        noreg,
+        CAST_FROM_FN_PTR(address, SharedRuntime::jvmti_method_exit),
+        r15_thread, c_rarg1,
+        true);
+    restore_native_result(masm, ret_type, stack_slots);
+    __ bind(skip_jvmti_method_exit);
+  }
+
   {
     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
     save_native_result(masm, ret_type, stack_slots);
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed May 14 22:54:45 2014 -0400
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed May 14 20:44:33 2014 +0200
@@ -993,6 +993,12 @@
   return 0;
 JRT_END
 
+JRT_ENTRY(int, SharedRuntime::jvmti_method_exit(
+    JavaThread* thread, Method* method))
+  JvmtiExport::post_method_exit(thread, method, thread->last_frame());
+  return 0;
+JRT_END
+
 
 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
 // for a call current in progress, i.e., arguments has been pushed on stack
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed May 14 22:54:45 2014 -0400
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed May 14 20:44:33 2014 +0200
@@ -263,6 +263,9 @@
   static int dtrace_method_entry(JavaThread* thread, Method* m);
   static int dtrace_method_exit(JavaThread* thread, Method* m);
 
+  // jvmti notification
+  static int jvmti_method_exit(JavaThread* thread, Method* m);
+
   // Utility method for retrieving the Java thread id, returns 0 if the
   // thread is not a well formed Java thread.
   static jlong get_java_tid(Thread* thread);