hotspot/src/share/vm/runtime/sharedRuntime.cpp
changeset 4753 9e7bcf214f71
parent 4752 67a506670cd0
child 4755 eee57ea6d910
equal deleted inserted replaced
4752:67a506670cd0 4753:9e7bcf214f71
  1359 
  1359 
  1360 // ---------------------------------------------------------------------------
  1360 // ---------------------------------------------------------------------------
  1361 // We are calling the interpreter via a c2i. Normally this would mean that
  1361 // We are calling the interpreter via a c2i. Normally this would mean that
  1362 // we were called by a compiled method. However we could have lost a race
  1362 // we were called by a compiled method. However we could have lost a race
  1363 // where we went int -> i2c -> c2i and so the caller could in fact be
  1363 // where we went int -> i2c -> c2i and so the caller could in fact be
  1364 // interpreted. If the caller is compiled we attampt to patch the caller
  1364 // interpreted. If the caller is compiled we attempt to patch the caller
  1365 // so he no longer calls into the interpreter.
  1365 // so he no longer calls into the interpreter.
  1366 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
  1366 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
  1367   methodOop moop(method);
  1367   methodOop moop(method);
  1368 
  1368 
  1369   address entry_point = moop->from_compiled_entry();
  1369   address entry_point = moop->from_compiled_entry();
  1375   // Also it is possible that we lost a race in that from_compiled_entry
  1375   // Also it is possible that we lost a race in that from_compiled_entry
  1376   // is now back to the i2c in that case we don't need to patch and if
  1376   // is now back to the i2c in that case we don't need to patch and if
  1377   // we did we'd leap into space because the callsite needs to use
  1377   // we did we'd leap into space because the callsite needs to use
  1378   // "to interpreter" stub in order to load up the methodOop. Don't
  1378   // "to interpreter" stub in order to load up the methodOop. Don't
  1379   // ask me how I know this...
  1379   // ask me how I know this...
  1380   //
       
  1381 
  1380 
  1382   CodeBlob* cb = CodeCache::find_blob(caller_pc);
  1381   CodeBlob* cb = CodeCache::find_blob(caller_pc);
  1383   if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
  1382   if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
       
  1383     return;
       
  1384   }
       
  1385 
       
  1386   // The check above makes sure this is a nmethod.
       
  1387   nmethod* nm = cb->as_nmethod_or_null();
       
  1388   assert(nm, "must be");
       
  1389 
       
  1390   // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
       
  1391   // to implement MethodHandle actions.
       
  1392   if (nm->is_method_handle_return(caller_pc)) {
  1384     return;
  1393     return;
  1385   }
  1394   }
  1386 
  1395 
  1387   // There is a benign race here. We could be attempting to patch to a compiled
  1396   // There is a benign race here. We could be attempting to patch to a compiled
  1388   // entry point at the same time the callee is being deoptimized. If that is
  1397   // entry point at the same time the callee is being deoptimized. If that is
  1393   // from_compiled_entry and the NULL isn't present yet then we lose the race
  1402   // from_compiled_entry and the NULL isn't present yet then we lose the race
  1394   // and patch the code with the same old data. Asi es la vida.
  1403   // and patch the code with the same old data. Asi es la vida.
  1395 
  1404 
  1396   if (moop->code() == NULL) return;
  1405   if (moop->code() == NULL) return;
  1397 
  1406 
  1398   if (((nmethod*)cb)->is_in_use()) {
  1407   if (nm->is_in_use()) {
  1399 
  1408 
  1400     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
  1409     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
  1401     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1410     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1402     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
  1411     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
  1403       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
  1412       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);