8038212: Method::is_valid_method() check has performance regression impact for stackwalking
Summary: Only prune metaspace virtual spaces at safepoint so walking them is safe outside a safepoint.
Reviewed-by: mgerdin, mgronlun, hseigel, stefank
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Thu May 15 18:23:26 2014 -0400
@@ -549,6 +549,8 @@
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
+bool ClassLoaderDataGraph::_should_purge = false;
+
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
@@ -675,32 +677,6 @@
return array;
}
-// For profiling and hsfind() only. Otherwise, this is unsafe (and slow). This
-// is done lock free to avoid lock inversion problems. It is safe because
-// new ClassLoaderData are added to the end of the CLDG, and only removed at
-// safepoint. The _unloading list can be deallocated concurrently with CMS so
-// this doesn't look in metaspace for classes that have been unloaded.
-bool ClassLoaderDataGraph::contains(const void* x) {
- if (DumpSharedSpaces) {
- // There are only two metaspaces to worry about.
- ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data();
- return (ncld->ro_metaspace()->contains(x) || ncld->rw_metaspace()->contains(x));
- }
-
- if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(x)) {
- return true;
- }
-
- for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
- if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
- return true;
- }
- }
-
- // Do not check unloading list because deallocation can be concurrent.
- return false;
-}
-
#ifndef PRODUCT
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
@@ -759,6 +735,7 @@
}
void ClassLoaderDataGraph::purge() {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
ClassLoaderData* list = _unloading;
_unloading = NULL;
ClassLoaderData* next = list;
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp Thu May 15 18:23:26 2014 -0400
@@ -66,6 +66,7 @@
static ClassLoaderData* _unloading;
// CMS support.
static ClassLoaderData* _saved_head;
+ static bool _should_purge;
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(void);
@@ -87,12 +88,20 @@
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
static GrowableArray<ClassLoaderData*>* new_clds();
+ static void set_should_purge(bool b) { _should_purge = b; }
+ static void purge_if_needed() {
+ // Only purge the CLDG for CMS if concurrent sweep is complete.
+ if (_should_purge) {
+ purge();
+ // reset for next time.
+ set_should_purge(false);
+ }
+ }
+
static void dump_on(outputStream * const out) PRODUCT_RETURN;
static void dump() { dump_on(tty); }
static void verify();
- // expensive test for pointer in metaspace for debugging
- static bool contains(const void* x);
#ifndef PRODUCT
static bool contains_loader_data(ClassLoaderData* loader_data);
#endif
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu May 15 18:23:26 2014 -0400
@@ -6362,7 +6362,9 @@
verify_overflow_empty();
if (should_unload_classes()) {
- ClassLoaderDataGraph::purge();
+ // Delay purge to the beginning of the next safepoint. Metaspace::contains
+ // requires that the virtual spaces are stable and not deleted.
+ ClassLoaderDataGraph::set_should_purge(true);
}
_intra_sweep_timer.stop();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 15 18:23:26 2014 -0400
@@ -5401,7 +5401,7 @@
if (_g1h->is_in_g1_reserved(p)) {
_par_scan_state->push_on_queue(p);
} else {
- assert(!ClassLoaderDataGraph::contains((address)p),
+ assert(!Metaspace::contains((const void*)p),
err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
PTR_FORMAT, p));
_copy_non_heap_obj_cl->do_oop(p);
--- a/hotspot/src/share/vm/memory/allocation.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/memory/allocation.cpp Thu May 15 18:23:26 2014 -0400
@@ -75,7 +75,7 @@
}
bool MetaspaceObj::is_metaspace_object() const {
- return ClassLoaderDataGraph::contains((void*)this);
+ return Metaspace::contains((void*)this);
}
void MetaspaceObj::print_address_on(outputStream* st) const {
--- a/hotspot/src/share/vm/memory/metaspace.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Thu May 15 18:23:26 2014 -0400
@@ -316,6 +316,8 @@
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
+ bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
+
size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
@@ -557,6 +559,8 @@
void inc_virtual_space_count();
void dec_virtual_space_count();
+ bool contains(const void* ptr);
+
// Unlink empty VirtualSpaceNodes and free it.
void purge(ChunkManager* chunk_manager);
@@ -641,8 +645,6 @@
// Accessors
Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
- // ensure lock-free iteration sees fully initialized node
- OrderAccess::storestore();
_chunks_in_use[index] = v;
}
@@ -757,8 +759,6 @@
void print_on(outputStream* st) const;
void locked_print_chunks_in_use_on(outputStream* st) const;
- bool contains(const void *ptr);
-
void verify();
void verify_chunk_size(Metachunk* chunk);
NOT_PRODUCT(void mangle_freed_chunks();)
@@ -1078,6 +1078,7 @@
// nodes with a 0 container_count. Remove Metachunks in
// the node from their respective freelists.
void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
assert_lock_strong(SpaceManager::expand_lock());
// Don't use a VirtualSpaceListIterator because this
// list is being changed and a straightforward use of an iterator is not safe.
@@ -1111,8 +1112,8 @@
}
#ifdef ASSERT
if (purged_vsl != NULL) {
- // List should be stable enough to use an iterator here.
- VirtualSpaceListIterator iter(virtual_space_list());
+ // List should be stable enough to use an iterator here.
+ VirtualSpaceListIterator iter(virtual_space_list());
while (iter.repeat()) {
VirtualSpaceNode* vsl = iter.get_next();
assert(vsl != purged_vsl, "Purge of vsl failed");
@@ -1121,6 +1122,23 @@
#endif
}
+
+// This function looks at the mmap regions in the metaspace without locking.
+// The chunks are added with store ordering and not deleted except for at
+// unloading time during a safepoint.
+bool VirtualSpaceList::contains(const void* ptr) {
+ // List should be stable enough to use an iterator here because removing virtual
+ // space nodes is only allowed at a safepoint.
+ VirtualSpaceListIterator iter(virtual_space_list());
+ while (iter.repeat()) {
+ VirtualSpaceNode* vsn = iter.get_next();
+ if (vsn->contains(ptr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
void VirtualSpaceList::retire_current_virtual_space() {
assert_lock_strong(SpaceManager::expand_lock());
@@ -1210,6 +1228,8 @@
} else {
assert(new_entry->reserved_words() == vs_word_size,
"Reserved memory size differs from requested memory size");
+ // ensure lock-free iteration sees fully initialized node
+ OrderAccess::storestore();
link_vs(new_entry);
return true;
}
@@ -2434,21 +2454,6 @@
return result;
}
-// This function looks at the chunks in the metaspace without locking.
-// The chunks are added with store ordering and not deleted except for at
-// unloading time.
-bool SpaceManager::contains(const void *ptr) {
- for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
- {
- Metachunk* curr = chunks_in_use(i);
- while (curr != NULL) {
- if (curr->contains(ptr)) return true;
- curr = curr->next();
- }
- }
- return false;
-}
-
void SpaceManager::verify() {
// If there are blocks in the dictionary, then
// verification of chunks does not work since
@@ -3538,11 +3543,15 @@
}
bool Metaspace::contains(const void* ptr) {
- if (vsm()->contains(ptr)) return true;
- if (using_class_space()) {
- return class_vsm()->contains(ptr);
+ if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
+ return true;
}
- return false;
+
+ if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
+ return true;
+ }
+
+ return get_space_list(NonClassType)->contains(ptr);
}
void Metaspace::verify() {
@@ -3787,5 +3796,4 @@
TestVirtualSpaceNodeTest::test();
TestVirtualSpaceNodeTest::test_is_available();
}
-
#endif
--- a/hotspot/src/share/vm/memory/metaspace.hpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.hpp Thu May 15 18:23:26 2014 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -232,7 +232,8 @@
MetaWord* expand_and_allocate(size_t size,
MetadataType mdtype);
- bool contains(const void* ptr);
+ static bool contains(const void* ptr);
+
void dump(outputStream* const out) const;
// Free empty virtualspaces
--- a/hotspot/src/share/vm/oops/klass.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/oops/klass.cpp Thu May 15 18:23:26 2014 -0400
@@ -640,7 +640,7 @@
// This can be expensive, but it is worth checking that this klass is actually
// in the CLD graph but not in production.
- assert(ClassLoaderDataGraph::contains((address)this), "Should be");
+ assert(Metaspace::contains((address)this), "Should be");
guarantee(this->is_klass(),"should be klass");
--- a/hotspot/src/share/vm/oops/method.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/oops/method.cpp Thu May 15 18:23:26 2014 -0400
@@ -1866,6 +1866,14 @@
loader_data->jmethod_ids()->clear_all_methods();
}
+bool Method::has_method_vptr(const void* ptr) {
+ Method m;
+ // This assumes that the vtbl pointer is the first word of a C++ object.
+ // This assumption is also in universe.cpp patch_klass_vtble
+ void* vtbl2 = dereference_vptr((const void*)&m);
+ void* this_vtbl = dereference_vptr(ptr);
+ return vtbl2 == this_vtbl;
+}
// Check that this pointer is valid by checking that the vtbl pointer matches
bool Method::is_valid_method() const {
@@ -1874,12 +1882,7 @@
} else if (!is_metaspace_object()) {
return false;
} else {
- Method m;
- // This assumes that the vtbl pointer is the first word of a C++ object.
- // This assumption is also in universe.cpp patch_klass_vtble
- void* vtbl2 = dereference_vptr((void*)&m);
- void* this_vtbl = dereference_vptr((void*)this);
- return vtbl2 == this_vtbl;
+ return has_method_vptr((const void*)this);
}
}
--- a/hotspot/src/share/vm/oops/method.hpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/oops/method.hpp Thu May 15 18:23:26 2014 -0400
@@ -861,6 +861,7 @@
const char* internal_name() const { return "{method}"; }
// Check for valid method pointer
+ static bool has_method_vptr(const void* ptr);
bool is_valid_method() const;
// Verify
--- a/hotspot/src/share/vm/runtime/os.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/runtime/os.cpp Thu May 15 18:23:26 2014 -0400
@@ -1097,11 +1097,15 @@
}
- // Check if in metaspace.
- if (ClassLoaderDataGraph::contains((address)addr)) {
- // Use addr->print() from the debugger instead (not here)
- st->print_cr(INTPTR_FORMAT
- " is pointing into metadata", addr);
+ // Check if in metaspace and print types that have vptrs (only method now)
+ if (Metaspace::contains(addr)) {
+ if (Method::has_method_vptr((const void*)addr)) {
+ ((Method*)addr)->print_value_on(st);
+ st->cr();
+ } else {
+ // Use addr->print() from the debugger instead (not here)
+ st->print_cr(INTPTR_FORMAT " is pointing into metadata", addr);
+ }
return;
}
--- a/hotspot/src/share/vm/runtime/safepoint.cpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp Thu May 15 18:23:26 2014 -0400
@@ -541,6 +541,13 @@
gclog_or_tty->rotate_log(false);
}
+ {
+ // CMS delays purging the CLDG until the beginning of the next safepoint and to
+ // make sure concurrent sweep is done
+ TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
+ ClassLoaderDataGraph::purge_if_needed();
+ }
+
if (MemTracker::is_on()) {
MemTracker::sync();
}
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Thu May 15 17:38:50 2014 -0400
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Thu May 15 18:23:26 2014 -0400
@@ -1385,7 +1385,7 @@
// All C++ compilers that we know of have the vtbl pointer in the first
// word. If there are exceptions, this function needs to be made compiler
// specific.
-static inline void* dereference_vptr(void* addr) {
+static inline void* dereference_vptr(const void* addr) {
return *(void**)addr;
}