1349 CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); |
1349 CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); |
1350 } |
1350 } |
1351 |
1351 |
1352 // grab lock, check for deoptimization and potentially patch caller |
1352 // grab lock, check for deoptimization and potentially patch caller |
1353 { |
1353 { |
1354 MutexLocker ml_patch(CompiledIC_lock); |
1354 CompiledICLocker ml(caller_nm); |
1355 |
1355 |
1356 // Lock blocks for safepoint during which both nmethods can change state. |
1356 // Lock blocks for safepoint during which both nmethods can change state. |
1357 |
1357 |
1358 // Now that we are ready to patch if the Method* was redefined then |
1358 // Now that we are ready to patch if the Method* was redefined then |
1359 // don't update call site and let the caller retry. |
1359 // don't update call site and let the caller retry. |
1583 // - instead the event will be deferred until the event collector goes |
1583 // - instead the event will be deferred until the event collector goes |
1584 // out of scope. |
1584 // out of scope. |
1585 JvmtiDynamicCodeEventCollector event_collector; |
1585 JvmtiDynamicCodeEventCollector event_collector; |
1586 |
1586 |
1587 // Update inline cache to megamorphic. Skip update if we are called from interpreted. |
1587 // Update inline cache to megamorphic. Skip update if we are called from interpreted. |
1588 { MutexLocker ml_patch (CompiledIC_lock); |
1588 { |
1589 RegisterMap reg_map(thread, false); |
1589 RegisterMap reg_map(thread, false); |
1590 frame caller_frame = thread->last_frame().sender(®_map); |
1590 frame caller_frame = thread->last_frame().sender(®_map); |
1591 CodeBlob* cb = caller_frame.cb(); |
1591 CodeBlob* cb = caller_frame.cb(); |
1592 CompiledMethod* caller_nm = cb->as_compiled_method_or_null(); |
1592 CompiledMethod* caller_nm = cb->as_compiled_method_or_null(); |
|
1593 CompiledICLocker ml(caller_nm); |
|
1594 |
1593 if (cb->is_compiled()) { |
1595 if (cb->is_compiled()) { |
1594 CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc()); |
1596 CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc()); |
1595 bool should_be_mono = false; |
1597 bool should_be_mono = false; |
1596 if (inline_cache->is_optimized()) { |
1598 if (inline_cache->is_optimized()) { |
1597 if (TraceCallFixup) { |
1599 if (TraceCallFixup) { |
1729 // is always done through the same code path. (experience shows that it |
1731 // is always done through the same code path. (experience shows that it |
1730 // leads to very hard to track down bugs, if an inline cache gets updated |
1732 // leads to very hard to track down bugs, if an inline cache gets updated |
1731 // to a wrong method). It should not be performance critical, since the |
1733 // to a wrong method). It should not be performance critical, since the |
1732 // resolve is only done once. |
1734 // resolve is only done once. |
1733 |
1735 |
1734 bool is_nmethod = caller_nm->is_nmethod(); |
1736 CompiledICLocker ml(caller_nm); |
1735 MutexLocker ml(CompiledIC_lock); |
|
1736 if (is_static_call) { |
1737 if (is_static_call) { |
1737 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); |
1738 CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); |
1738 ssc->set_to_clean(); |
1739 ssc->set_to_clean(); |
1739 } else { |
1740 } else { |
1740 // compiled, dispatched call (which used to call an interpreted method) |
1741 // compiled, dispatched call (which used to call an interpreted method) |