8220343: Move scavenge_root_nmethods from shared code
Reviewed-by: kvn, eosterlund
--- a/src/hotspot/share/code/codeCache.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/code/codeCache.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -145,7 +145,6 @@
address CodeCache::_low_bound = 0;
address CodeCache::_high_bound = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
-nmethod* CodeCache::_scavenge_root_nmethods = NULL;
ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
// Initialize arrays of CodeHeap subsets
@@ -711,167 +710,6 @@
}
}
-// Walk the list of methods which might contain oops to the java heap.
-void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
- assert_locked_or_safepoint(CodeCache_lock);
-
- const bool fix_relocations = f->fix_relocations();
- debug_only(mark_scavenge_root_nmethods());
-
- nmethod* prev = NULL;
- nmethod* cur = scavenge_root_nmethods();
- while (cur != NULL) {
- debug_only(cur->clear_scavenge_root_marked());
- assert(cur->scavenge_root_not_marked(), "");
- assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
-
- bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
- LogTarget(Trace, gc, nmethod) lt;
- if (lt.is_enabled()) {
- LogStream ls(lt);
- CompileTask::print(&ls, cur,
- is_live ? "scavenge root " : "dead scavenge root", /*short_form:*/ true);
- }
- if (is_live) {
- // Perform cur->oops_do(f), maybe just once per nmethod.
- f->do_code_blob(cur);
- }
- nmethod* const next = cur->scavenge_root_link();
- // The scavengable nmethod list must contain all methods with scavengable
- // oops. It is safe to include more nmethod on the list, but we do not
- // expect any live non-scavengable nmethods on the list.
- if (fix_relocations) {
- if (!is_live || !cur->detect_scavenge_root_oops()) {
- unlink_scavenge_root_nmethod(cur, prev);
- } else {
- prev = cur;
- }
- }
- cur = next;
- }
-
- // Check for stray marks.
- debug_only(verify_perm_nmethods(NULL));
-}
-
-void CodeCache::register_scavenge_root_nmethod(nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
- if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
- add_scavenge_root_nmethod(nm);
- }
-}
-
-void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) {
- nm->verify_scavenge_root_oops();
-}
-
-void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
-
- nm->set_on_scavenge_root_list();
- nm->set_scavenge_root_link(_scavenge_root_nmethods);
- set_scavenge_root_nmethods(nm);
- print_trace("add_scavenge_root", nm);
-}
-
-void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
- assert_locked_or_safepoint(CodeCache_lock);
-
- assert((prev == NULL && scavenge_root_nmethods() == nm) ||
- (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
-
- print_trace("unlink_scavenge_root", nm);
- if (prev == NULL) {
- set_scavenge_root_nmethods(nm->scavenge_root_link());
- } else {
- prev->set_scavenge_root_link(nm->scavenge_root_link());
- }
- nm->set_scavenge_root_link(NULL);
- nm->clear_on_scavenge_root_list();
-}
-
-void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
-
- print_trace("drop_scavenge_root", nm);
- nmethod* prev = NULL;
- for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
- if (cur == nm) {
- unlink_scavenge_root_nmethod(cur, prev);
- return;
- }
- prev = cur;
- }
- assert(false, "should have been on list");
-}
-
-void CodeCache::prune_scavenge_root_nmethods() {
- assert_locked_or_safepoint(CodeCache_lock);
-
- debug_only(mark_scavenge_root_nmethods());
-
- nmethod* last = NULL;
- nmethod* cur = scavenge_root_nmethods();
- while (cur != NULL) {
- nmethod* next = cur->scavenge_root_link();
- debug_only(cur->clear_scavenge_root_marked());
- assert(cur->scavenge_root_not_marked(), "");
- assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
-
- if (!cur->is_zombie() && !cur->is_unloaded()
- && cur->detect_scavenge_root_oops()) {
- // Keep it. Advance 'last' to prevent deletion.
- last = cur;
- } else {
- // Prune it from the list, so we don't have to look at it any more.
- print_trace("prune_scavenge_root", cur);
- unlink_scavenge_root_nmethod(cur, last);
- }
- cur = next;
- }
-
- // Check for stray marks.
- debug_only(verify_perm_nmethods(NULL));
-}
-
-#ifndef PRODUCT
-void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
- // While we are here, verify the integrity of the list.
- mark_scavenge_root_nmethods();
- for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
- assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
- cur->clear_scavenge_root_marked();
- }
- verify_perm_nmethods(f);
-}
-
-// Temporarily mark nmethods that are claimed to be on the scavenge list.
-void CodeCache::mark_scavenge_root_nmethods() {
- NMethodIterator iter(NMethodIterator::only_alive);
- while(iter.next()) {
- nmethod* nm = iter.method();
- assert(nm->scavenge_root_not_marked(), "clean state");
- if (nm->on_scavenge_root_list())
- nm->set_scavenge_root_marked();
- }
-}
-
-// If the closure is given, run it on the unlisted nmethods.
-// Also make sure that the effects of mark_scavenge_root_nmethods is gone.
-void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
- NMethodIterator iter(NMethodIterator::only_alive);
- while(iter.next()) {
- nmethod* nm = iter.method();
- bool call_f = (f_or_null != NULL);
- assert(nm->scavenge_root_not_marked(), "must be already processed");
- if (nm->on_scavenge_root_list())
- call_f = false; // don't show this one to the client
- Universe::heap()->verify_nmethod(nm);
- if (call_f) f_or_null->do_code_blob(nm);
- }
-}
-#endif //PRODUCT
-
void CodeCache::verify_clean_inline_caches() {
#ifdef ASSERT
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
@@ -929,12 +767,6 @@
_exception_cache_purge_list = NULL;
}
-void CodeCache::gc_prologue() { }
-
-void CodeCache::gc_epilogue() {
- prune_scavenge_root_nmethods();
-}
-
uint8_t CodeCache::_unloading_cycle = 1;
void CodeCache::increment_unloading_cycle() {
--- a/src/hotspot/share/code/codeCache.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/code/codeCache.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -94,14 +94,10 @@
static address _low_bound; // Lower bound of CodeHeap addresses
static address _high_bound; // Upper bound of CodeHeap addresses
static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
- static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded
static ExceptionCache* volatile _exception_cache_purge_list;
- static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
- static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
-
// CodeHeap management
static void initialize_heaps(); // Initializes the CodeHeaps
// Check the code heap sizes set by the user via command line
@@ -124,10 +120,6 @@
static int allocated_segments();
static size_t freelists_length();
- static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
- static void prune_scavenge_root_nmethods();
- static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev);
-
// Make private to prevent unsafe calls. Not all CodeBlob*'s are embedded in a CodeHeap.
static bool contains(CodeBlob *p) { fatal("don't call me!"); return false; }
@@ -171,8 +163,6 @@
static int nmethod_count(int code_blob_type);
// GC support
- static void gc_epilogue();
- static void gc_prologue();
static void verify_oops();
// If any oops are not marked this method unloads (i.e., breaks root links
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
@@ -189,25 +179,9 @@
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
static uint8_t unloading_cycle() { return _unloading_cycle; }
static void increment_unloading_cycle();
- static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
static void release_exception_cache(ExceptionCache* entry);
static void purge_exception_caches();
- // Apply f to every live code blob in scavengable nmethods. Prune nmethods
- // from the list of scavengable nmethods if f->fix_relocations() and a nmethod
- // no longer has scavengable oops. If f->fix_relocations(), then f must copy
- // objects to their new location immediately to avoid fixing nmethods on the
- // basis of the old object locations.
- static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
-
- static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
- // register_scavenge_root_nmethod() conditionally adds the nmethod to the list
- // if it is not already on the list and has a scavengeable root
- static void register_scavenge_root_nmethod(nmethod* nm);
- static void verify_scavenge_root_nmethod(nmethod* nm);
- static void add_scavenge_root_nmethod(nmethod* nm);
- static void drop_scavenge_root_nmethod(nmethod* nm);
-
// Printing/debugging
static void print(); // prints summary
static void print_internals();
--- a/src/hotspot/share/code/compiledMethod.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/code/compiledMethod.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -408,10 +408,6 @@
PcDesc* find_pc_desc(address pc, bool approximate) {
return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
}
-
-protected:
- // Used by some GCs to chain nmethods.
- nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
};
#endif // SHARE_CODE_COMPILEDMETHOD_HPP
--- a/src/hotspot/share/code/nmethod.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/code/nmethod.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -423,8 +423,6 @@
_oops_do_mark_link = NULL;
_jmethod_id = NULL;
_osr_link = NULL;
- _scavenge_root_link = NULL;
- _scavenge_root_state = 0;
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
#endif
@@ -1360,10 +1358,6 @@
ec = next;
}
- if (on_scavenge_root_list()) {
- CodeCache::drop_scavenge_root_nmethod(this);
- }
-
#if INCLUDE_JVMCI
assert(_jvmci_installed_code == NULL, "should have been nulled out when transitioned to zombie");
assert(_speculation_log == NULL, "should have been nulled out when transitioned to zombie");
@@ -1777,44 +1771,6 @@
log_trace(gc, nmethod)("oops_do_marking_epilogue");
}
-class DetectScavengeRoot: public OopClosure {
- bool _detected_scavenge_root;
- nmethod* _print_nm;
-public:
- DetectScavengeRoot(nmethod* nm) : _detected_scavenge_root(false), _print_nm(nm) {}
-
- bool detected_scavenge_root() { return _detected_scavenge_root; }
- virtual void do_oop(oop* p) {
- if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
- NOT_PRODUCT(maybe_print(p));
- _detected_scavenge_root = true;
- }
- }
- virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-
-#ifndef PRODUCT
- void maybe_print(oop* p) {
- LogTarget(Trace, gc, nmethod) lt;
- if (lt.is_enabled()) {
- LogStream ls(lt);
- if (!_detected_scavenge_root) {
- CompileTask::print(&ls, _print_nm, "new scavenge root", /*short_form:*/ true);
- }
- ls.print("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ") ",
- p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
- p2i(*p), p2i(p));
- ls.cr();
- }
- }
-#endif //PRODUCT
-};
-
-bool nmethod::detect_scavenge_root_oops() {
- DetectScavengeRoot detect_scavenge_root(this);
- oops_do(&detect_scavenge_root);
- return detect_scavenge_root.detected_scavenge_root();
-}
-
inline bool includes(void* p, void* from, void* to) {
return from <= p && p < to;
}
@@ -2266,41 +2222,6 @@
// -----------------------------------------------------------------------------
-// Non-product code
-#ifndef PRODUCT
-
-class DebugScavengeRoot: public OopClosure {
- nmethod* _nm;
- bool _ok;
-public:
- DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
- bool ok() { return _ok; }
- virtual void do_oop(oop* p) {
- if ((*p) == NULL || !Universe::heap()->is_scavengable(*p)) return;
- if (_ok) {
- _nm->print_nmethod(true);
- _ok = false;
- }
- tty->print_cr("*** scavengable oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
- p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
- (*p)->print();
- }
- virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-
-void nmethod::verify_scavenge_root_oops() {
- if (!on_scavenge_root_list()) {
- // Actually look inside, to verify the claim that it's clean.
- DebugScavengeRoot debug_scavenge_root(this);
- oops_do(&debug_scavenge_root);
- if (!debug_scavenge_root.ok())
- fatal("found an unadvertised bad scavengable oop in the code cache");
- }
- assert(scavenge_root_not_marked(), "");
-}
-
-#endif // PRODUCT
-
// Printing operations
void nmethod::print() const {
@@ -2326,7 +2247,6 @@
tty->print(" for method " INTPTR_FORMAT , p2i(method()));
tty->print(" { ");
tty->print_cr("%s ", state());
- if (on_scavenge_root_list()) tty->print("scavenge_root ");
tty->print_cr("}:");
}
if (size () > 0) tty->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
--- a/src/hotspot/share/code/nmethod.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/code/nmethod.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -131,8 +131,6 @@
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
#endif
- jbyte _scavenge_root_state;
-
#if INCLUDE_RTM_OPT
// RTM state at compile time. Used during deoptimization to decide
// whether to restart collecting RTM locking abort statistic again.
@@ -410,24 +408,6 @@
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
- // Scavengable oop support
- bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
- protected:
- enum { sl_on_list = 0x01, sl_marked = 0x10 };
- void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; }
- void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
- // assertion-checking and pruning logic uses the bits of _scavenge_root_state
-#ifndef PRODUCT
- void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
- void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
- bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
- // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
-#endif //PRODUCT
- nmethod* scavenge_root_link() const { return _scavenge_root_link; }
- void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
-
- public:
-
// Sweeper support
long stack_traversal_mark() { return _stack_traversal_mark; }
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
@@ -504,8 +484,6 @@
public:
void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool allow_zombie);
- bool detect_scavenge_root_oops();
- void verify_scavenge_root_oops() PRODUCT_RETURN;
bool test_set_oops_do_mark();
static void oops_do_marking_prologue();
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -4193,9 +4193,6 @@
CMSHeap* heap = CMSHeap::heap();
- if (should_unload_classes()) {
- CodeCache::gc_prologue();
- }
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
@@ -4251,7 +4248,7 @@
verify_overflow_empty();
if (should_unload_classes()) {
- CodeCache::gc_epilogue();
+ heap->prune_nmethods();
}
JvmtiExport::gc_epilogue();
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -88,11 +88,6 @@
return _space->is_in(p);
}
- virtual bool is_scavengable(oop obj) {
- // No GC is going to happen, therefore no objects ever move.
- return false;
- }
-
virtual bool is_maximal_no_gc() const {
// No GC is going to happen. Return "we are at max", when we are about to fail.
return used() == capacity();
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -4796,13 +4796,6 @@
void do_oop(narrowOop* p) { do_oop_work(p); }
};
-// Returns true if the reference points to an object that
-// can move in an incremental collection.
-bool G1CollectedHeap::is_scavengable(oop obj) {
- HeapRegion* hr = heap_region_containing(obj);
- return !hr->is_pinned();
-}
-
void G1CollectedHeap::register_nmethod(nmethod* nm) {
guarantee(nm != NULL, "sanity");
RegisterNMethodOopClosure reg_cl(this, nm);
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -1307,9 +1307,6 @@
// Optimized nmethod scanning support routines
- // Is an oop scavengeable
- virtual bool is_scavengable(oop obj);
-
// Register the given nmethod with the G1 heap.
virtual void register_nmethod(nmethod* nm);
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -152,10 +152,6 @@
reference_processor()->enable_discovery();
reference_processor()->setup_policy(scope()->should_clear_soft_refs());
- // When collecting the permanent generation Method*s may be moving,
- // so we either have to flush all bcp data or convert it into bci.
- CodeCache::gc_prologue();
-
// We should save the marks of the currently locked biased monitors.
// The marking doesn't preserve the marks of biased objects.
BiasedLocking::preserve_marks();
@@ -187,7 +183,6 @@
update_derived_pointers();
BiasedLocking::restore_marks();
- CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
_heap->prepare_heap_for_mutators();
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -41,6 +41,7 @@
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcWhen.hpp"
+#include "gc/shared/scavengableNMethods.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceCounters.hpp"
#include "oops/oop.inline.hpp"
@@ -150,6 +151,14 @@
}
+class PSIsScavengable : public BoolObjectClosure {
+ bool do_object_b(oop obj) {
+ return ParallelScavengeHeap::heap()->is_in_young(obj);
+ }
+};
+
+static PSIsScavengable _is_scavengable;
+
void ParallelScavengeHeap::post_initialize() {
CollectedHeap::post_initialize();
// Need to init the tenuring threshold
@@ -160,6 +169,8 @@
PSMarkSweepProxy::initialize();
}
PSPromotionManager::initialize();
+
+ ScavengableNMethods::initialize(&_is_scavengable);
}
void ParallelScavengeHeap::update_counters() {
@@ -693,16 +704,24 @@
}
#endif
-bool ParallelScavengeHeap::is_scavengable(oop obj) {
- return is_in_young(obj);
+void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
+ ScavengableNMethods::register_nmethod(nm);
}
-void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
- CodeCache::register_scavenge_root_nmethod(nm);
+void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
+ ScavengableNMethods::unregister_nmethod(nm);
}
void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
- CodeCache::verify_scavenge_root_nmethod(nm);
+ ScavengableNMethods::verify_nmethod(nm);
+}
+
+void ParallelScavengeHeap::flush_nmethod(nmethod* nm) {
+ ScavengableNMethods::flush_nmethod(nm);
+}
+
+void ParallelScavengeHeap::prune_nmethods() {
+ ScavengableNMethods::prune_nmethods();
}
GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -157,13 +157,12 @@
// collection.
virtual bool is_maximal_no_gc() const;
- // Return true if the reference points to an object that
- // can be moved in a partial collection. For currently implemented
- // generational collectors that means during a collection of
- // the young gen.
- virtual bool is_scavengable(oop obj);
virtual void register_nmethod(nmethod* nm);
- virtual void verify_nmethod(nmethod* nmethod);
+ virtual void unregister_nmethod(nmethod* nm);
+ virtual void verify_nmethod(nmethod* nm);
+ virtual void flush_nmethod(nmethod* nm);
+
+ void prune_nmethods();
size_t max_capacity() const;
--- a/src/hotspot/share/gc/parallel/pcTasks.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/parallel/pcTasks.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -117,7 +117,7 @@
case code_cache:
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
- //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
+ //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
AOTLoader::oops_do(&mark_and_push_closure);
break;
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -187,7 +187,6 @@
// Let the size policy know we're starting
size_policy->major_collection_begin();
- CodeCache::gc_prologue();
BiasedLocking::preserve_marks();
// Capture metadata size before collection for sizing.
@@ -255,7 +254,7 @@
MetaspaceUtils::verify_metrics();
BiasedLocking::restore_marks();
- CodeCache::gc_epilogue();
+ heap->prune_nmethods();
JvmtiExport::gc_epilogue();
#if COMPILER2_OR_JVMCI
@@ -524,7 +523,7 @@
SystemDictionary::oops_do(mark_and_push_closure());
ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
- //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
+ //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
AOTLoader::oops_do(mark_and_push_closure());
}
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -1061,7 +1061,7 @@
ClassLoaderDataGraph::purge();
MetaspaceUtils::verify_metrics();
- CodeCache::gc_epilogue();
+ heap->prune_nmethods();
JvmtiExport::gc_epilogue();
#if COMPILER2_OR_JVMCI
@@ -1807,8 +1807,6 @@
// Let the size policy know we're starting
size_policy->major_collection_begin();
- CodeCache::gc_prologue();
-
#if COMPILER2_OR_JVMCI
DerivedPointerTable::clear();
#endif
--- a/src/hotspot/share/gc/parallel/psTasks.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psTasks.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -28,12 +28,14 @@
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/parallel/gcTaskManager.hpp"
+#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psClosure.inline.hpp"
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
#include "gc/parallel/psTasks.hpp"
+#include "gc/shared/scavengableNMethods.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
@@ -97,7 +99,7 @@
case code_cache:
{
MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
- CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
+ ScavengableNMethods::scavengable_nmethods_do(&each_scavengable_code_blob);
AOTLoader::oops_do(&roots_closure);
}
break;
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -75,10 +75,6 @@
gch->trace_heap_before_gc(_gc_tracer);
- // When collecting the permanent generation Method*s may be moving,
- // so we either have to flush all bcp data or convert it into bci.
- CodeCache::gc_prologue();
-
// Increment the invocation count
_total_invocations++;
@@ -128,7 +124,7 @@
rs->invalidate_or_clear(old_gen);
}
- CodeCache::gc_epilogue();
+ gch->prune_nmethods();
JvmtiExport::gc_epilogue();
// refs processing: clean slate
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -509,9 +509,6 @@
void print_heap_before_gc();
void print_heap_after_gc();
- // An object is scavengable if its location may move during a scavenge.
- // (A scavenge is a GC which is not a full GC.)
- virtual bool is_scavengable(oop obj) = 0;
// Registering and unregistering an nmethod (compiled code) with the heap.
// Override with specific mechanism for each specialized heap type.
virtual void register_nmethod(nmethod* nm) {}
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -47,6 +47,7 @@
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
+#include "gc/shared/scavengableNMethods.hpp"
#include "gc/shared/space.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/weakProcessor.hpp"
@@ -175,6 +176,15 @@
return heap_rs->base();
}
+class GenIsScavengable : public BoolObjectClosure {
+public:
+ bool do_object_b(oop obj) {
+ return GenCollectedHeap::heap()->is_in_young(obj);
+ }
+};
+
+static GenIsScavengable _is_scavengable;
+
void GenCollectedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
@@ -186,6 +196,8 @@
def_new_gen->from()->capacity());
MarkSweep::initialize();
+
+ ScavengableNMethods::initialize(&_is_scavengable);
}
void GenCollectedHeap::ref_processing_init() {
@@ -699,11 +711,23 @@
}
void GenCollectedHeap::register_nmethod(nmethod* nm) {
- CodeCache::register_scavenge_root_nmethod(nm);
+ ScavengableNMethods::register_nmethod(nm);
+}
+
+void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
+ ScavengableNMethods::unregister_nmethod(nm);
}
void GenCollectedHeap::verify_nmethod(nmethod* nm) {
- CodeCache::verify_scavenge_root_nmethod(nm);
+ ScavengableNMethods::verify_nmethod(nm);
+}
+
+void GenCollectedHeap::flush_nmethod(nmethod* nm) {
+ ScavengableNMethods::flush_nmethod(nm);
+}
+
+void GenCollectedHeap::prune_nmethods() {
+ ScavengableNMethods::prune_nmethods();
}
HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
@@ -847,7 +871,7 @@
assert(code_roots != NULL, "must supply closure for code cache");
// We only visit parts of the CodeCache when scavenging.
- CodeCache::scavenge_root_nmethods_do(code_roots);
+ ScavengableNMethods::scavengable_nmethods_do(code_roots);
}
if (so & SO_AllCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");
@@ -859,7 +883,7 @@
// Verify that the code cache contents are not subject to
// movement by a scavenging collection.
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
- DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
+ DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
}
}
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -249,13 +249,13 @@
bool is_in_partial_collection(const void* p);
#endif
- virtual bool is_scavengable(oop obj) {
- return is_in_young(obj);
- }
-
// Optimized nmethod scanning support routines
virtual void register_nmethod(nmethod* nm);
- virtual void verify_nmethod(nmethod* nmethod);
+ virtual void unregister_nmethod(nmethod* nm);
+ virtual void verify_nmethod(nmethod* nm);
+ virtual void flush_nmethod(nmethod* nm);
+
+ void prune_nmethods();
// Iteration functions.
void oop_iterate(OopIterateClosure* cl);
--- a/src/hotspot/share/gc/shared/parallelCleaning.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -70,9 +70,6 @@
CodeCacheUnloadingTask::~CodeCacheUnloadingTask() {
CodeCache::verify_clean_inline_caches();
-
- guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
-
CodeCache::verify_icholder_relocations();
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/scavengableNMethods.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCache.hpp"
+#include "code/nmethod.hpp"
+#include "compiler/compileTask.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/scavengableNMethods.hpp"
+#include "gc/shared/scavengableNMethodsData.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "memory/universe.hpp"
+#include "utilities/debug.hpp"
+
+static ScavengableNMethodsData gc_data(nmethod* nm) {
+ return ScavengableNMethodsData(nm);
+}
+
+nmethod* ScavengableNMethods::_head = NULL;
+BoolObjectClosure* ScavengableNMethods::_is_scavengable = NULL;
+
+void ScavengableNMethods::initialize(BoolObjectClosure* is_scavengable) {
+ _is_scavengable = is_scavengable;
+}
+
+// Conditionally adds the nmethod to the list if it is
+// not already on the list and has a scavengeable root.
+void ScavengableNMethods::register_nmethod(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+
+ ScavengableNMethodsData data = gc_data(nm);
+
+ if (data.on_list() || !has_scavengable_oops(nm)) {
+ return;
+ }
+
+ data.set_on_list();
+ data.set_next(_head);
+
+ _head = nm;
+
+ CodeCache::print_trace("register_nmethod", nm);
+}
+
+void ScavengableNMethods::unregister_nmethod(nmethod* nm) {
+ // Do nothing. Unlinking is currently delayed until the purge phase.
+}
+
+#ifndef PRODUCT
+
+class DebugScavengableOops: public OopClosure {
+ BoolObjectClosure* _is_scavengable;
+ nmethod* _nm;
+ bool _ok;
+public:
+ DebugScavengableOops(BoolObjectClosure* is_scavengable, nmethod* nm) :
+ _is_scavengable(is_scavengable),
+ _nm(nm),
+ _ok(true) { }
+
+ bool ok() { return _ok; }
+ virtual void do_oop(oop* p) {
+ if (*p == NULL || !_is_scavengable->do_object_b(*p)) {
+ return;
+ }
+
+ if (_ok) {
+ _nm->print_nmethod(true);
+ _ok = false;
+ }
+ tty->print_cr("*** scavengable oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
+ p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
+ (*p)->print();
+ }
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
+#endif // PRODUCT
+
+void ScavengableNMethods::verify_nmethod(nmethod* nm) {
+#ifndef PRODUCT
+ if (!gc_data(nm).on_list()) {
+ // Actually look inside, to verify the claim that it's clean.
+ DebugScavengableOops cl(_is_scavengable, nm);
+ nm->oops_do(&cl);
+ if (!cl.ok())
+ fatal("found an unadvertised bad scavengable oop in the code cache");
+ }
+ assert(gc_data(nm).not_marked(), "");
+#endif // PRODUCT
+}
+
+void ScavengableNMethods::flush_nmethod(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+
+ // TODO: Should be done in unregister_nmethod, during the "unlink" phase.
+ if (gc_data(nm).on_list()) {
+ CodeCache::print_trace("flush_nmethod", nm);
+ nmethod* prev = NULL;
+ for (nmethod* cur = _head; cur != NULL; cur = gc_data(cur).next()) {
+ if (cur == nm) {
+ unlist_nmethod(cur, prev);
+ return;
+ }
+ prev = cur;
+ }
+ }
+}
+
+class HasScavengableOops: public OopClosure {
+ BoolObjectClosure* _is_scavengable;
+ bool _found;
+ nmethod* _print_nm;
+public:
+ HasScavengableOops(BoolObjectClosure* is_scavengable, nmethod* nm) :
+ _is_scavengable(is_scavengable),
+ _found(false),
+ _print_nm(nm) {}
+
+ bool found() { return _found; }
+ virtual void do_oop(oop* p) {
+ if (*p != NULL && _is_scavengable->do_object_b(*p)) {
+ NOT_PRODUCT(maybe_print(p));
+ _found = true;
+ }
+ }
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+#ifndef PRODUCT
+ void maybe_print(oop* p) {
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ if (!_found) {
+ CompileTask::print(&ls, _print_nm, "new scavengable oop", /*short_form:*/ true);
+ }
+ ls.print("" PTR_FORMAT "[offset=%d] found scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ") ",
+ p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
+ p2i(*p), p2i(p));
+ ls.cr();
+ }
+ }
+#endif //PRODUCT
+};
+
+bool ScavengableNMethods::has_scavengable_oops(nmethod* nm) {
+ HasScavengableOops cl(_is_scavengable, nm);
+ nm->oops_do(&cl);
+ return cl.found();
+}
+
+// Walk the list of methods which might contain oops to the java heap.
+void ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure* f) {
+ assert_locked_or_safepoint(CodeCache_lock);
+
+ const bool fix_relocations = f->fix_relocations();
+ debug_only(mark_on_list_nmethods());
+
+ nmethod* prev = NULL;
+ nmethod* cur = _head;
+ while (cur != NULL) {
+ ScavengableNMethodsData data = gc_data(cur);
+ debug_only(data.clear_marked());
+ assert(data.not_marked(), "");
+ assert(data.on_list(), "else shouldn't be on this list");
+
+ bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ CompileTask::print(&ls, cur,
+ is_live ? "scavengable root " : "dead scavengable root", /*short_form:*/ true);
+ }
+ if (is_live) {
+ // Perform cur->oops_do(f), maybe just once per nmethod.
+ f->do_code_blob(cur);
+ }
+ nmethod* const next = data.next();
+ // The scavengable nmethod list must contain all methods with scavengable
+ // oops. It is safe to include more nmethod on the list, but we do not
+ // expect any live non-scavengable nmethods on the list.
+ if (fix_relocations) {
+ if (!is_live || !has_scavengable_oops(cur)) {
+ unlist_nmethod(cur, prev);
+ } else {
+ prev = cur;
+ }
+ }
+ cur = next;
+ }
+
+ // Check for stray marks.
+ debug_only(verify_unlisted_nmethods(NULL));
+}
+
+#ifndef PRODUCT
+void ScavengableNMethods::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
+ // While we are here, verify the integrity of the list.
+ mark_on_list_nmethods();
+ for (nmethod* cur = _head; cur != NULL; cur = gc_data(cur).next()) {
+ assert(gc_data(cur).on_list(), "else shouldn't be on this list");
+ gc_data(cur).clear_marked();
+ }
+ verify_unlisted_nmethods(f);
+}
+#endif // PRODUCT
+
+void ScavengableNMethods::unlist_nmethod(nmethod* nm, nmethod* prev) {
+ assert_locked_or_safepoint(CodeCache_lock);
+
+ assert((prev == NULL && _head == nm) ||
+ (prev != NULL && gc_data(prev).next() == nm), "precondition");
+
+ CodeCache::print_trace("unlist_nmethod", nm);
+
+ ScavengableNMethodsData data = gc_data(nm);
+
+ if (prev == NULL) {
+ _head = data.next();
+ } else {
+ gc_data(prev).set_next(data.next());
+ }
+ data.set_next(NULL);
+ data.clear_on_list();
+}
+
+void ScavengableNMethods::prune_nmethods() {
+ assert_locked_or_safepoint(CodeCache_lock);
+
+ debug_only(mark_on_list_nmethods());
+
+ nmethod* last = NULL;
+ nmethod* cur = _head;
+ while (cur != NULL) {
+ nmethod* next = gc_data(cur).next();
+ debug_only(gc_data(cur).clear_marked());
+ assert(gc_data(cur).on_list(), "else shouldn't be on this list");
+
+ if (!cur->is_zombie() && !cur->is_unloaded() && has_scavengable_oops(cur)) {
+ // Keep it. Advance 'last' to prevent deletion.
+ last = cur;
+ } else {
+ // Prune it from the list, so we don't have to look at it any more.
+ CodeCache::print_trace("prune_nmethods", cur);
+ unlist_nmethod(cur, last);
+ }
+ cur = next;
+ }
+
+ // Check for stray marks.
+ debug_only(verify_unlisted_nmethods(NULL));
+}
+
+#ifndef PRODUCT
+// Temporarily mark nmethods that are claimed to be on the scavenge list.
+void ScavengableNMethods::mark_on_list_nmethods() {
+ NMethodIterator iter(NMethodIterator::only_alive);
+ while(iter.next()) {
+ nmethod* nm = iter.method();
+ ScavengableNMethodsData data = gc_data(nm);
+ assert(data.not_marked(), "clean state");
+ if (data.on_list())
+ data.set_marked();
+ }
+}
+
+// If the closure is given, run it on the unlisted nmethods.
+// Also make sure that the effects of mark_on_list_nmethods is gone.
+void ScavengableNMethods::verify_unlisted_nmethods(CodeBlobClosure* f_or_null) {
+ NMethodIterator iter(NMethodIterator::only_alive);
+ while(iter.next()) {
+ nmethod* nm = iter.method();
+
+ verify_nmethod(nm);
+
+ if (f_or_null != NULL && !gc_data(nm).on_list()) {
+ f_or_null->do_code_blob(nm);
+ }
+ }
+}
+
+#endif //PRODUCT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/scavengableNMethods.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_SCAVENGABLENMETHODS_HPP
+#define SHARE_GC_SHARED_SCAVENGABLENMETHODS_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
+
+class BoolObjectClosure;
+class CodeBlobClosure;
+class CodeBlobToOopClosure;
+class nmethod;
+
+class ScavengableNMethods : public AllStatic {
+ friend class VMStructs;
+
+ static nmethod* _head;
+ static BoolObjectClosure* _is_scavengable;
+
+public:
+ static void initialize(BoolObjectClosure* is_scavengable);
+
+ static void register_nmethod(nmethod* nm);
+ static void unregister_nmethod(nmethod* nm);
+ static void verify_nmethod(nmethod* nm);
+ static void flush_nmethod(nmethod* nm);
+
+ static void prune_nmethods();
+
+ // Apply f to every live code blob in scavengable nmethods. Prune nmethods
+ // from the list of scavengable nmethods if f->fix_relocations() and a nmethod
+ // no longer has scavengable oops. If f->fix_relocations(), then f must copy
+ // objects to their new location immediately to avoid fixing nmethods on the
+ // basis of the old object locations.
+ static void scavengable_nmethods_do(CodeBlobToOopClosure* f);
+
+ static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
+
+private:
+ static void unlist_nmethod(nmethod* nm, nmethod* prev);
+
+ static bool has_scavengable_oops(nmethod* nm);
+
+ static void mark_on_list_nmethods() PRODUCT_RETURN;
+ static void verify_unlisted_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
+};
+
+#endif // SHARE_GC_SHARED_SCAVENGABLENMETHODS_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/scavengableNMethodsData.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_SCAVENGABLENMETHODDATAS_HPP
+#define SHARE_GC_SHARED_SCAVENGABLENMETHODDATAS_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class nmethod;
+
+class ScavengableNMethodsData : public CHeapObj<mtGC> {
+ // State bits put into the two lower alignment bits.
+ static const uintptr_t state_bits = 2;
+ static const uintptr_t state_mask = (1 << state_bits) - 1;
+ static const uintptr_t state_on_list = 0x1;
+ static const uintptr_t state_marked = 0x2;
+
+ // nmethod containing the GC data.
+ nmethod* const _nm;
+
+ // The data is stored as a bit pattern in a void* inside the nmethod.
+ uintptr_t data() const { return reinterpret_cast<uintptr_t>(_nm->gc_data<void>()); }
+ void set_data(uintptr_t data) const { _nm->set_gc_data(reinterpret_cast<void*>(data)); }
+
+ jbyte state() const { return data() & state_mask; }
+ void set_state(jbyte state) const { set_data((data() & ~state_mask) | state); }
+
+ uintptr_t from_nmethod(nmethod* nm) const { return reinterpret_cast<uintptr_t>(nm); }
+ nmethod* to_nmethod(uintptr_t data) const { return reinterpret_cast<nmethod*>(data); }
+
+public:
+ ScavengableNMethodsData(nmethod* nm) : _nm(nm) {
+ assert(is_aligned(nm, 4), "Must be aligned to fit state bits");
+ }
+
+ // Scavengable oop support
+ bool on_list() const { return (state() & state_on_list) != 0; }
+ void set_on_list() { set_state(state_on_list); }
+ void clear_on_list() { set_state(0); }
+
+#ifndef PRODUCT
+ void set_marked() { set_state(state() | state_marked); }
+ void clear_marked() { set_state(state() & ~state_marked); }
+ bool not_marked() { return (state() & ~state_on_list) == 0; }
+ // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
+#endif //PRODUCT
+
+ nmethod* next() const { return to_nmethod(data() & ~state_mask); }
+ void set_next(nmethod *n) { set_data(from_nmethod(n) | state()); }
+};
+
+#endif // SHARE_GC_SHARED_SCAVENGABLENMETHODDATAS_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -559,9 +559,6 @@
size_t obj_size(oop obj) const;
virtual ptrdiff_t cell_header_size() const;
- // All objects can potentially move
- bool is_scavengable(oop obj) { return true; };
-
void collect(GCCause::Cause cause);
void do_full_collection(bool clear_all_soft_refs);
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -123,8 +123,6 @@
heap->make_parsable(true);
- CodeCache::gc_prologue();
-
OrderAccess::fence();
phase1_mark_heap();
@@ -168,7 +166,6 @@
}
FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
- CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
heap->set_full_gc_move_in_progress(false);
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -106,10 +106,6 @@
return false;
}
-bool ZCollectedHeap::is_scavengable(oop obj) {
- return false;
-}
-
bool ZCollectedHeap::is_in(const void* p) const {
return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
}
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp Thu Mar 14 09:15:51 2019 +0100
@@ -71,7 +71,6 @@
virtual size_t used() const;
virtual bool is_maximal_no_gc() const;
- virtual bool is_scavengable(oop obj);
virtual bool is_in(const void* p) const;
virtual bool is_in_closed_subset(const void* p) const;
--- a/src/hotspot/share/runtime/vmStructs.cpp Thu Mar 14 09:14:20 2019 +0100
+++ b/src/hotspot/share/runtime/vmStructs.cpp Thu Mar 14 09:15:51 2019 +0100
@@ -539,7 +539,6 @@
static_field(CodeCache, _heaps, GrowableArray<CodeHeap*>*) \
static_field(CodeCache, _low_bound, address) \
static_field(CodeCache, _high_bound, address) \
- static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \
\
/*******************************/ \
/* CodeHeap (NOTE: incomplete) */ \
@@ -681,8 +680,6 @@
\
nonstatic_field(nmethod, _entry_bci, int) \
nonstatic_field(nmethod, _osr_link, nmethod*) \
- nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
- nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
nonstatic_field(nmethod, _state, volatile signed char) \
nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeCache.java Thu Mar 14 09:14:20 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeCache.java Thu Mar 14 09:15:51 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,6 @@
public class CodeCache {
private static GrowableArray<CodeHeap> heapArray;
- private static AddressField scavengeRootNMethodsField;
private static VirtualConstructor virtualConstructor;
static {
@@ -56,8 +55,6 @@
AddressField heapsField = type.getAddressField("_heaps");
heapArray = GrowableArray.create(heapsField.getValue(), heapConstructor);
- scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
-
virtualConstructor = new VirtualConstructor(db);
// Add mappings for all possible CodeBlob subclasses
virtualConstructor.addMapping("BufferBlob", BufferBlob.class);
@@ -73,10 +70,6 @@
}
}
- public NMethod scavengeRootMethods() {
- return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
- }
-
public boolean contains(Address p) {
for (int i = 0; i < heapArray.length(); ++i) {
if (heapArray.at(i).contains(p)) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java Thu Mar 14 09:14:20 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/NMethod.java Thu Mar 14 09:15:51 2019 +0100
@@ -38,8 +38,6 @@
private static CIntegerField entryBCIField;
/** To support simple linked-list chaining of nmethods */
private static AddressField osrLinkField;
- private static AddressField scavengeRootLinkField;
- private static JByteField scavengeRootStateField;
/** Offsets for different nmethod parts */
private static CIntegerField exceptionOffsetField;
@@ -88,8 +86,6 @@
entryBCIField = type.getCIntegerField("_entry_bci");
osrLinkField = type.getAddressField("_osr_link");
- scavengeRootLinkField = type.getAddressField("_scavenge_root_link");
- scavengeRootStateField = type.getJByteField("_scavenge_root_state");
exceptionOffsetField = type.getCIntegerField("_exception_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
@@ -251,14 +247,6 @@
return (NMethod) VMObjectFactory.newObject(NMethod.class, osrLinkField.getValue(addr));
}
- public NMethod getScavengeRootLink() {
- return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootLinkField.getValue(addr));
- }
-
- public int getScavengeRootState() {
- return (int) scavengeRootStateField.getValue(addr);
- }
-
// MethodHandle
public boolean isMethodHandleReturn(Address returnPc) {
// Hard to read a bit fields from Java and it's only there for performance
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbPrintStatics.java Thu Mar 14 09:14:20 2019 +0100
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbPrintStatics.java Thu Mar 14 09:15:51 2019 +0100
@@ -60,8 +60,7 @@
"ClassLoaderDataGraph::_head",
"JNIHandles::_weak_global_handles", "PerfMemory::_top",
"ObjectSynchronizer::gBlockList",
- "java_lang_Class::_oop_size_offset",
- "CodeCache::_scavenge_root_nmethods"));
+ "java_lang_Class::_oop_size_offset"));
expStrMap.put("printstatics SystemDictionary", List.of(
"Static fields of SystemDictionary",
"SystemDictionary::Class_klass_knum",