8213755: Let nmethods be is_unloading() outside of safepoints
Reviewed-by: rehn, coleenp, kvn
--- a/src/hotspot/share/code/codeCache.cpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/code/codeCache.cpp Thu Nov 22 10:01:38 2018 +0100
@@ -657,7 +657,7 @@
void CodeCache::nmethods_do(void f(nmethod* nm)) {
assert_locked_or_safepoint(CodeCache_lock);
- NMethodIterator iter;
+ NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
f(iter.method());
}
@@ -665,8 +665,8 @@
void CodeCache::metadata_do(void f(Metadata* m)) {
assert_locked_or_safepoint(CodeCache_lock);
- NMethodIterator iter;
- while(iter.next_alive()) {
+ NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
iter.method()->metadata_do(f);
}
AOTLoader::metadata_do(f);
@@ -684,8 +684,8 @@
void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
UnloadingScope scope(is_alive);
- CompiledMethodIterator iter;
- while(iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
+ while(iter.next()) {
iter.method()->do_unloading(unloading_occurred);
}
}
@@ -842,8 +842,8 @@
// Temporarily mark nmethods that are claimed to be on the scavenge list.
void CodeCache::mark_scavenge_root_nmethods() {
- NMethodIterator iter;
- while(iter.next_alive()) {
+ NMethodIterator iter(NMethodIterator::only_alive);
+ while(iter.next()) {
nmethod* nm = iter.method();
assert(nm->scavenge_root_not_marked(), "clean state");
if (nm->on_scavenge_root_list())
@@ -854,8 +854,8 @@
// If the closure is given, run it on the unlisted nmethods.
// Also make sure that the effects of mark_scavenge_root_nmethods is gone.
void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
- NMethodIterator iter;
- while(iter.next_alive()) {
+ NMethodIterator iter(NMethodIterator::only_alive);
+ while(iter.next()) {
nmethod* nm = iter.method();
bool call_f = (f_or_null != NULL);
assert(nm->scavenge_root_not_marked(), "must be already processed");
@@ -869,8 +869,8 @@
void CodeCache::verify_clean_inline_caches() {
#ifdef ASSERT
- NMethodIterator iter;
- while(iter.next_alive()) {
+ NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
nmethod* nm = iter.method();
assert(!nm->is_unloaded(), "Tautology");
nm->verify_clean_inline_caches();
@@ -943,8 +943,8 @@
void CodeCache::verify_oops() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
VerifyOopClosure voc;
- NMethodIterator iter;
- while(iter.next_alive()) {
+ NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
nmethod* nm = iter.method();
nm->oops_do(&voc);
nm->verify_oop_relocations();
@@ -1120,16 +1120,16 @@
void CodeCache::clear_inline_caches() {
assert_locked_or_safepoint(CodeCache_lock);
- CompiledMethodIterator iter;
- while(iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
iter.method()->clear_inline_caches();
}
}
void CodeCache::cleanup_inline_caches() {
assert_locked_or_safepoint(CodeCache_lock);
- NMethodIterator iter;
- while(iter.next_alive()) {
+ NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
iter.method()->cleanup_inline_caches(/*clean_all=*/true);
}
}
@@ -1199,8 +1199,8 @@
}
}
- CompiledMethodIterator iter;
- while(iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
CompiledMethod* nm = iter.method();
if (nm->is_marked_for_deoptimization()) {
// ...Already marked in the previous pass; don't count it again.
@@ -1222,8 +1222,8 @@
// Deoptimize all methods
void CodeCache::mark_all_nmethods_for_deoptimization() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- CompiledMethodIterator iter;
- while(iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
CompiledMethod* nm = iter.method();
if (!nm->method()->is_method_handle_intrinsic()) {
nm->mark_for_deoptimization();
@@ -1235,8 +1235,8 @@
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0;
- CompiledMethodIterator iter;
- while(iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
CompiledMethod* nm = iter.method();
if (nm->is_dependent_on_method(dependee)) {
ResourceMark rm;
@@ -1250,8 +1250,8 @@
void CodeCache::make_marked_nmethods_not_entrant() {
assert_locked_or_safepoint(CodeCache_lock);
- CompiledMethodIterator iter;
- while(iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
CompiledMethod* nm = iter.method();
if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) {
nm->make_not_entrant();
@@ -1519,7 +1519,7 @@
int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
memset(buckets, 0, sizeof(int) * bucketLimit);
- NMethodIterator iter;
+ NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
nmethod* nm = iter.method();
if(nm->method() != NULL && nm->is_java_method()) {
@@ -1659,8 +1659,8 @@
void CodeCache::print_codelist(outputStream* st) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- CompiledMethodIterator iter;
- while (iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
+ while (iter.next()) {
CompiledMethod* cm = iter.method();
ResourceMark rm;
char* method_name = cm->method()->name_and_sig_as_C_string();
--- a/src/hotspot/share/code/codeCache.hpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/code/codeCache.hpp Thu Nov 22 10:01:38 2018 +0100
@@ -334,13 +334,21 @@
// Iterator to iterate over nmethods in the CodeCache.
template <class T, class Filter> class CodeBlobIterator : public StackObj {
+ public:
+ enum LivenessFilter { all_blobs, only_alive, only_alive_and_not_unloading };
+
private:
CodeBlob* _code_blob; // Current CodeBlob
GrowableArrayIterator<CodeHeap*> _heap;
GrowableArrayIterator<CodeHeap*> _end;
+ bool _only_alive;
+ bool _only_not_unloading;
public:
- CodeBlobIterator(T* nm = NULL) {
+ CodeBlobIterator(LivenessFilter filter, T* nm = NULL)
+ : _only_alive(filter == only_alive || filter == only_alive_and_not_unloading),
+ _only_not_unloading(filter == only_alive_and_not_unloading)
+ {
if (Filter::heaps() == NULL) {
return;
}
@@ -360,29 +368,35 @@
bool next() {
assert_locked_or_safepoint(CodeCache_lock);
- bool result = next_blob();
- while (!result && _heap != _end) {
- // Advance to next code heap of segmented code cache
- if (++_heap == _end) {
- break;
+ for (;;) {
+ // Walk through heaps as required
+ if (!next_blob()) {
+ if (_heap == _end) {
+ return false;
+ }
+ ++_heap;
+ continue;
}
- result = next_blob();
+
+ // Filter is_alive as required
+ if (_only_alive && !_code_blob->is_alive()) {
+ continue;
+ }
+
+ // Filter is_unloading as required
+ if (_only_not_unloading) {
+ CompiledMethod* cm = _code_blob->as_compiled_method_or_null();
+ if (cm != NULL && cm->is_unloading()) {
+ continue;
+ }
+ }
+
+ return true;
}
-
- return result;
}
- // Advance iterator to next alive blob
- bool next_alive() {
- bool result = next();
- while(result && !_code_blob->is_alive()) {
- result = next();
- }
- return result;
- }
-
- bool end() const { return _code_blob == NULL; }
- T* method() const { return (T*)_code_blob; }
+ bool end() const { return _code_blob == NULL; }
+ T* method() const { return (T*)_code_blob; }
private:
@@ -422,7 +436,6 @@
static const GrowableArray<CodeHeap*>* heaps() { return CodeCache::nmethod_heaps(); }
};
-
typedef CodeBlobIterator<CompiledMethod, CompiledMethodFilter> CompiledMethodIterator;
typedef CodeBlobIterator<nmethod, NMethodFilter> NMethodIterator;
--- a/src/hotspot/share/code/nmethod.cpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/code/nmethod.cpp Thu Nov 22 10:01:38 2018 +0100
@@ -1019,13 +1019,20 @@
// there are no activations on the stack, not in use by the VM,
// and not in use by the ServiceThread)
bool nmethod::can_convert_to_zombie() {
- assert(is_not_entrant(), "must be a non-entrant method");
+ // Note that this is called when the sweeper has observed the nmethod to be
+ // not_entrant. However, with concurrent code cache unloading, the state
+ // might have moved on to unloaded if it is_unloading(), due to racing
+ // concurrent GC threads.
+ assert(is_not_entrant() || is_unloading(), "must be a non-entrant method");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
// nmethod for the second time.
- return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
- !is_locked_by_vm();
+ // If an is_unloading() nmethod is still not_entrant, then it is not safe to
+ // convert it to zombie due to GC unloading interactions. However, if it
+ // has become unloaded, then it is okay to convert such nmethods to zombie.
+ return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
+ !is_locked_by_vm() && (!is_unloading() || is_unloaded());
}
void nmethod::inc_decompile_count() {
@@ -1093,8 +1100,6 @@
// Unregister must be done before the state change
Universe::heap()->unregister_nmethod(this);
- _state = unloaded;
-
// Log the unloading.
log_state_change();
@@ -1110,6 +1115,13 @@
set_osr_link(NULL);
NMethodSweeper::report_state_change(this);
+
+ // The release is only needed for compile-time ordering, as accesses
+ // into the nmethod after the store are not safe due to the sweeper
+ // being allowed to free it when the store is observed, during
+ // concurrent nmethod unloading. Therefore, there is no need for
+ // acquire on the loader side.
+ OrderAccess::release_store(&_state, (signed char)unloaded);
}
void nmethod::invalidate_osr_method() {
@@ -1913,11 +1925,11 @@
// Iterate over live nmethods and check dependencies of all nmethods that are not
// marked for deoptimization. A particular dependency is only checked once.
- NMethodIterator iter;
+ NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
while(iter.next()) {
nmethod* nm = iter.method();
// Only notify for live nmethods
- if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
+ if (!nm->is_marked_for_deoptimization()) {
for (Dependencies::DepStream deps(nm); deps.next(); ) {
// Construct abstraction of a dependency.
DependencySignature* current_sig = new DependencySignature(deps);
@@ -2912,7 +2924,7 @@
// Update the values in the InstalledCode instance if it still refers to this nmethod
nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
if (nm == this) {
- if (!is_alive()) {
+ if (!is_alive() || is_unloading()) {
// Break the link between nmethod and InstalledCode such that the nmethod
// can subsequently be flushed safely. The link must be maintained while
// the method could have live activations since invalidateInstalledCode
@@ -2927,7 +2939,7 @@
}
}
}
- if (!is_alive()) {
+ if (!is_alive() || is_unloading()) {
// Clear these out after the nmethod has been unregistered and any
// updates to the InstalledCode instance have been performed.
clear_jvmci_installed_code();
@@ -2951,7 +2963,7 @@
{
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
// This relationship can only be checked safely under a lock
- assert(!nm->is_alive() || nm->jvmci_installed_code() == installedCode(), "sanity check");
+ assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check");
}
#endif
--- a/src/hotspot/share/gc/shared/parallelCleaning.cpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp Thu Nov 22 10:01:38 2018 +0100
@@ -74,8 +74,8 @@
_first_nmethod(NULL),
_claimed_nmethod(NULL) {
// Get first alive nmethod
- CompiledMethodIterator iter = CompiledMethodIterator();
- if(iter.next_alive()) {
+ CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
+ if(iter.next()) {
_first_nmethod = iter.method();
}
_claimed_nmethod = _first_nmethod;
@@ -91,18 +91,18 @@
void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
CompiledMethod* first;
- CompiledMethodIterator last;
+ CompiledMethodIterator last(CompiledMethodIterator::only_alive);
do {
*num_claimed_nmethods = 0;
first = _claimed_nmethod;
- last = CompiledMethodIterator(first);
+ last = CompiledMethodIterator(CompiledMethodIterator::only_alive, first);
if (first != NULL) {
for (int i = 0; i < MaxClaimNmethods; i++) {
- if (!last.next_alive()) {
+ if (!last.next()) {
break;
}
claimed_nmethods[i] = last.method();
--- a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp Thu Nov 22 10:01:38 2018 +0100
@@ -228,8 +228,8 @@
// can be safely skipped.
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// Iterate over non-profiled and profiled nmethods
- NMethodIterator iter;
- while(iter.next_alive()) {
+ NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
+ while(iter.next()) {
nmethod* current = iter.method();
// Lock the nmethod so it can't be freed
nmethodLocker nml(current);
--- a/src/hotspot/share/runtime/javaCalls.cpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/runtime/javaCalls.cpp Thu Nov 22 10:01:38 2018 +0100
@@ -427,7 +427,7 @@
#if INCLUDE_JVMCI
if (alternative_target != NULL) {
- if (alternative_target->is_alive()) {
+ if (alternative_target->is_alive() && !alternative_target->is_unloading()) {
thread->set_jvmci_alternate_call_target(alternative_target->verified_entry_point());
entry_point = method->adapter()->get_i2c_entry();
} else {
--- a/src/hotspot/share/runtime/sharedRuntime.cpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp Thu Nov 22 10:01:38 2018 +0100
@@ -1279,7 +1279,7 @@
(!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
- assert(caller_nm->is_alive(), "It should be alive");
+ assert(caller_nm->is_alive() && !caller_nm->is_unloading(), "It should be alive");
#ifndef PRODUCT
// tracing/debugging/statistics
@@ -1606,8 +1606,10 @@
} else if (inline_cache->is_icholder_call()) {
CompiledICHolder* ic_oop = inline_cache->cached_icholder();
if (ic_oop != NULL) {
-
- if (receiver()->klass() == ic_oop->holder_klass()) {
+ if (!ic_oop->is_loader_alive()) {
+ // Deferred IC cleaning due to concurrent class unloading
+ inline_cache->set_to_clean();
+ } else if (receiver()->klass() == ic_oop->holder_klass()) {
// This isn't a real miss. We must have seen that compiled code
// is now available and we want the call site converted to a
// monomorphic compiled call site.
--- a/src/hotspot/share/runtime/sweeper.cpp Thu Nov 22 09:55:44 2018 +0100
+++ b/src/hotspot/share/runtime/sweeper.cpp Thu Nov 22 10:01:38 2018 +0100
@@ -142,7 +142,7 @@
#define SWEEP(nm)
#endif
-CompiledMethodIterator NMethodSweeper::_current; // Current compiled method
+CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method
long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
@@ -274,7 +274,7 @@
assert(wait_for_stack_scanning(), "should only happen between sweeper cycles");
_seen = 0;
- _current = CompiledMethodIterator();
+ _current = CompiledMethodIterator(CompiledMethodIterator::all_blobs);
// Initialize to first nmethod
_current.next();
_traversals += 1;
@@ -653,7 +653,7 @@
JavaThread* current = JavaThread::current();
assert (current->is_Code_cache_sweeper_thread(), "Must be");
_thread = (CodeCacheSweeperThread*)current;
- if (!cm->is_zombie() && !cm->is_unloaded()) {
+ if (!cm->is_zombie() && !cm->is_unloading()) {
// Only expose live nmethods for scanning
_thread->set_scanned_compiled_method(cm);
}
@@ -697,7 +697,7 @@
// Skip methods that are currently referenced by the VM
if (cm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods
- if (cm->is_alive()) {
+ if (cm->is_alive() && !cm->is_unloading()) {
// Clean inline caches that point to zombie/non-entrant/unloaded nmethods
CompiledICLocker ml(cm);
cm->cleanup_inline_caches(false);
@@ -786,7 +786,7 @@
void NMethodSweeper::possibly_flush(nmethod* nm) {
if (UseCodeCacheFlushing) {
- if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed()) {
+ if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed() && !nm->is_unloading()) {
bool make_not_entrant = false;
// Do not make native methods not-entrant