src/hotspot/share/runtime/sharedRuntime.cpp
changeset 52384 d6dc479bcdd3
parent 51591 9183040e34d8
child 52661 4f45c682eab0
equal deleted inserted replaced
52383:71564a544d4c 52384:d6dc479bcdd3
  1349     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
  1349     CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info);
  1350   }
  1350   }
  1351 
  1351 
  1352   // grab lock, check for deoptimization and potentially patch caller
  1352   // grab lock, check for deoptimization and potentially patch caller
  1353   {
  1353   {
  1354     MutexLocker ml_patch(CompiledIC_lock);
  1354     CompiledICLocker ml(caller_nm);
  1355 
  1355 
  1356     // Lock blocks for safepoint during which both nmethods can change state.
  1356     // Lock blocks for safepoint during which both nmethods can change state.
  1357 
  1357 
  1358     // Now that we are ready to patch if the Method* was redefined then
  1358     // Now that we are ready to patch if the Method* was redefined then
  1359     // don't update call site and let the caller retry.
  1359     // don't update call site and let the caller retry.
  1380         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
  1380         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
  1381         if (ssc->is_clean()) ssc->set(static_call_info);
  1381         if (ssc->is_clean()) ssc->set(static_call_info);
  1382       }
  1382       }
  1383     }
  1383     }
  1384 
  1384 
  1385   } // unlock CompiledIC_lock
  1385   } // unlock CompiledICLocker
  1386 
  1386 
  1387   return callee_method;
  1387   return callee_method;
  1388 }
  1388 }
  1389 
  1389 
  1390 
  1390 
  1583   // - instead the event will be deferred until the event collector goes
  1583   // - instead the event will be deferred until the event collector goes
  1584   // out of scope.
  1584   // out of scope.
  1585   JvmtiDynamicCodeEventCollector event_collector;
  1585   JvmtiDynamicCodeEventCollector event_collector;
  1586 
  1586 
  1587   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
  1587   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
  1588   { MutexLocker ml_patch (CompiledIC_lock);
  1588   {
  1589     RegisterMap reg_map(thread, false);
  1589     RegisterMap reg_map(thread, false);
  1590     frame caller_frame = thread->last_frame().sender(&reg_map);
  1590     frame caller_frame = thread->last_frame().sender(&reg_map);
  1591     CodeBlob* cb = caller_frame.cb();
  1591     CodeBlob* cb = caller_frame.cb();
  1592     CompiledMethod* caller_nm = cb->as_compiled_method_or_null();
  1592     CompiledMethod* caller_nm = cb->as_compiled_method_or_null();
       
  1593     CompiledICLocker ml(caller_nm);
       
  1594 
  1593     if (cb->is_compiled()) {
  1595     if (cb->is_compiled()) {
  1594       CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc());
  1596       CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc());
  1595       bool should_be_mono = false;
  1597       bool should_be_mono = false;
  1596       if (inline_cache->is_optimized()) {
  1598       if (inline_cache->is_optimized()) {
  1597         if (TraceCallFixup) {
  1599         if (TraceCallFixup) {
  1645         // Either clean or megamorphic
  1647         // Either clean or megamorphic
  1646       }
  1648       }
  1647     } else {
  1649     } else {
  1648       fatal("Unimplemented");
  1650       fatal("Unimplemented");
  1649     }
  1651     }
  1650   } // Release CompiledIC_lock
  1652   } // Release CompiledICLocker
  1651 
  1653 
  1652   return callee_method;
  1654   return callee_method;
  1653 }
  1655 }
  1654 
  1656 
  1655 //
  1657 //
  1729       // is always done through the same code path. (experience shows that it
  1731       // is always done through the same code path. (experience shows that it
  1730       // leads to very hard to track down bugs, if an inline cache gets updated
  1732       // leads to very hard to track down bugs, if an inline cache gets updated
  1731       // to a wrong method). It should not be performance critical, since the
  1733       // to a wrong method). It should not be performance critical, since the
  1732       // resolve is only done once.
  1734       // resolve is only done once.
  1733 
  1735 
  1734       bool is_nmethod = caller_nm->is_nmethod();
  1736       CompiledICLocker ml(caller_nm);
  1735       MutexLocker ml(CompiledIC_lock);
       
  1736       if (is_static_call) {
  1737       if (is_static_call) {
  1737         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
  1738         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
  1738         ssc->set_to_clean();
  1739         ssc->set_to_clean();
  1739       } else {
  1740       } else {
  1740         // compiled, dispatched call (which used to call an interpreted method)
  1741         // compiled, dispatched call (which used to call an interpreted method)