48 #include "oops/method.inline.hpp" |
48 #include "oops/method.inline.hpp" |
49 #include "oops/methodData.hpp" |
49 #include "oops/methodData.hpp" |
50 #include "oops/oop.inline.hpp" |
50 #include "oops/oop.inline.hpp" |
51 #include "prims/jvmtiImpl.hpp" |
51 #include "prims/jvmtiImpl.hpp" |
52 #include "runtime/atomic.hpp" |
52 #include "runtime/atomic.hpp" |
|
53 #include "runtime/deoptimization.hpp" |
53 #include "runtime/flags/flagSetting.hpp" |
54 #include "runtime/flags/flagSetting.hpp" |
54 #include "runtime/frame.inline.hpp" |
55 #include "runtime/frame.inline.hpp" |
55 #include "runtime/handles.inline.hpp" |
56 #include "runtime/handles.inline.hpp" |
56 #include "runtime/jniHandles.inline.hpp" |
57 #include "runtime/jniHandles.inline.hpp" |
57 #include "runtime/orderAccess.hpp" |
58 #include "runtime/orderAccess.hpp" |
1136 mdo->inc_decompile_count(); |
1136 mdo->inc_decompile_count(); |
1137 } |
1137 } |
1138 |
1138 |
1139 bool nmethod::try_transition(int new_state_int) { |
1139 bool nmethod::try_transition(int new_state_int) { |
1140 signed char new_state = new_state_int; |
1140 signed char new_state = new_state_int; |
|
1141 #ifdef DEBUG |
|
1142 if (new_state != unloaded) { |
|
1143 assert_lock_strong(CompiledMethod_lock); |
|
1144 } |
|
1145 #endif |
1141 for (;;) { |
1146 for (;;) { |
1142 signed char old_state = Atomic::load(&_state); |
1147 signed char old_state = Atomic::load(&_state); |
1143 if (old_state >= new_state) { |
1148 if (old_state >= new_state) { |
1144 // Ensure monotonicity of transitions. |
1149 // Ensure monotonicity of transitions. |
1145 return false; |
1150 return false; |
1191 // If _method is already NULL the Method* is about to be unloaded, |
1196 // If _method is already NULL the Method* is about to be unloaded, |
1192 // so we don't have to break the cycle. Note that it is possible to |
1197 // so we don't have to break the cycle. Note that it is possible to |
1193 // have the Method* live here, in case we unload the nmethod because |
1198 // have the Method* live here, in case we unload the nmethod because |
1194 // it is pointing to some oop (other than the Method*) being unloaded. |
1199 // it is pointing to some oop (other than the Method*) being unloaded. |
1195 if (_method != NULL) { |
1200 if (_method != NULL) { |
1196 // OSR methods point to the Method*, but the Method* does not |
1201 _method->unlink_code(this); |
1197 // point back! |
|
1198 if (_method->code() == this) { |
|
1199 _method->clear_code(); // Break a cycle |
|
1200 } |
|
1201 } |
1202 } |
1202 |
1203 |
1203 // Make the class unloaded - i.e., change state and notify sweeper |
1204 // Make the class unloaded - i.e., change state and notify sweeper |
1204 assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), |
1205 assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(), |
1205 "must be at safepoint"); |
1206 "must be at safepoint"); |
1279 if (PrintCompilation && _state != unloaded) { |
1280 if (PrintCompilation && _state != unloaded) { |
1280 print_on(tty, state_msg); |
1281 print_on(tty, state_msg); |
1281 } |
1282 } |
1282 } |
1283 } |
1283 |
1284 |
1284 void nmethod::unlink_from_method(bool acquire_lock) { |
1285 void nmethod::unlink_from_method() { |
1285 // We need to check if both the _code and _from_compiled_code_entry_point |
1286 if (method() != NULL) { |
1286 // refer to this nmethod because there is a race in setting these two fields |
1287 method()->unlink_code(this); |
1287 // in Method* as seen in bugid 4947125. |
|
1288 // If the vep() points to the zombie nmethod, the memory for the nmethod |
|
1289 // could be flushed and the compiler and vtable stubs could still call |
|
1290 // through it. |
|
1291 if (method() != NULL && (method()->code() == this || |
|
1292 method()->from_compiled_entry() == verified_entry_point())) { |
|
1293 method()->clear_code(acquire_lock); |
|
1294 } |
1288 } |
1295 } |
1289 } |
1296 |
1290 |
1297 /** |
1291 /** |
1298 * Common functionality for both make_not_entrant and make_zombie |
1292 * Common functionality for both make_not_entrant and make_zombie |
1315 // This can be called while the system is already at a safepoint which is ok |
1309 // This can be called while the system is already at a safepoint which is ok |
1316 NoSafepointVerifier nsv; |
1310 NoSafepointVerifier nsv; |
1317 |
1311 |
1318 // during patching, depending on the nmethod state we must notify the GC that |
1312 // during patching, depending on the nmethod state we must notify the GC that |
1319 // code has been unloaded, unregistering it. We cannot do this right while |
1313 // code has been unloaded, unregistering it. We cannot do this right while |
1320 // holding the Patching_lock because we need to use the CodeCache_lock. This |
1314 // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This |
1321 // would be prone to deadlocks. |
1315 // would be prone to deadlocks. |
1322 // This flag is used to remember whether we need to later lock and unregister. |
1316 // This flag is used to remember whether we need to later lock and unregister. |
1323 bool nmethod_needs_unregister = false; |
1317 bool nmethod_needs_unregister = false; |
1324 |
1318 |
|
1319 // invalidate osr nmethod before acquiring the patching lock since |
|
1320 // they both acquire leaf locks and we don't want a deadlock. |
|
1321 // This logic is equivalent to the logic below for patching the |
|
1322 // verified entry point of regular methods. We check that the |
|
1323 // nmethod is in use to ensure that it is invalidated only once. |
|
1324 if (is_osr_method() && is_in_use()) { |
|
1325 // this effectively makes the osr nmethod not entrant |
|
1326 invalidate_osr_method(); |
|
1327 } |
|
1328 |
1325 { |
1329 { |
1326 // invalidate osr nmethod before acquiring the patching lock since |
|
1327 // they both acquire leaf locks and we don't want a deadlock. |
|
1328 // This logic is equivalent to the logic below for patching the |
|
1329 // verified entry point of regular methods. We check that the |
|
1330 // nmethod is in use to ensure that it is invalidated only once. |
|
1331 if (is_osr_method() && is_in_use()) { |
|
1332 // this effectively makes the osr nmethod not entrant |
|
1333 invalidate_osr_method(); |
|
1334 } |
|
1335 |
|
1336 // Enter critical section. Does not block for safepoint. |
1330 // Enter critical section. Does not block for safepoint. |
1337 MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag); |
1331 MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); |
1338 |
1332 |
1339 if (Atomic::load(&_state) >= state) { |
1333 if (Atomic::load(&_state) >= state) { |
1340 // another thread already performed this transition so nothing |
1334 // another thread already performed this transition so nothing |
1341 // to do, but return false to indicate this. |
1335 // to do, but return false to indicate this. |
1342 return false; |
1336 return false; |
1387 |
1381 |
1388 // Log the transition once |
1382 // Log the transition once |
1389 log_state_change(); |
1383 log_state_change(); |
1390 |
1384 |
1391 // Remove nmethod from method. |
1385 // Remove nmethod from method. |
1392 unlink_from_method(false /* already owns Patching_lock */); |
1386 unlink_from_method(); |
1393 } // leave critical region under Patching_lock |
1387 |
|
1388 } // leave critical region under CompiledMethod_lock |
1394 |
1389 |
1395 #if INCLUDE_JVMCI |
1390 #if INCLUDE_JVMCI |
1396 // Invalidate can't occur while holding the Patching lock |
1391 // Invalidate can't occur while holding the Patching lock |
1397 JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); |
1392 JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); |
1398 if (nmethod_data != NULL) { |
1393 if (nmethod_data != NULL) { |