--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -1050,7 +1050,6 @@
n_copy->set_data((intx) (load_klass()));
} else {
assert(mirror() != NULL, "klass not set");
- // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
n_copy->set_data(cast_from_oop<intx>(mirror()));
}
--- a/hotspot/src/share/vm/ci/ciEnv.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -185,10 +185,6 @@
}
}
- void ensure_metadata_alive(ciMetadata* m) {
- _factory->ensure_metadata_alive(m);
- }
-
ciInstance* get_instance(oop o) {
if (o == NULL) return NULL;
return get_object(o)->as_instance();
--- a/hotspot/src/share/vm/ci/ciKlass.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciKlass.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -43,7 +43,6 @@
friend class ciMethod;
friend class ciMethodData;
friend class ciObjArrayKlass;
- friend class ciReceiverTypeData;
private:
ciSymbol* _name;
--- a/hotspot/src/share/vm/ci/ciMethodData.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciMethodData.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -170,7 +170,6 @@
Klass* k = data->as_ReceiverTypeData()->receiver(row);
if (k != NULL) {
ciKlass* klass = CURRENT_ENV->get_klass(k);
- CURRENT_ENV->ensure_metadata_alive(klass);
set_receiver(row, klass);
}
}
@@ -192,7 +191,6 @@
void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
Method* m = data->as_SpeculativeTrapData()->method();
ciMethod* ci_m = CURRENT_ENV->get_method(m);
- CURRENT_ENV->ensure_metadata_alive(ci_m);
set_method(ci_m);
}
--- a/hotspot/src/share/vm/ci/ciMethodData.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciMethodData.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -70,7 +70,6 @@
Klass* v = TypeEntries::valid_klass(k);
if (v != NULL) {
ciKlass* klass = CURRENT_ENV->get_klass(v);
- CURRENT_ENV->ensure_metadata_alive(klass);
return with_status(klass, k);
}
return with_status(NULL, k);
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -46,9 +46,6 @@
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/fieldType.hpp"
-#if INCLUDE_ALL_GCS
-# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif
// ciObjectFactory
//
@@ -377,37 +374,6 @@
return NULL;
}
-// ------------------------------------------------------------------
-// ciObjectFactory::ensure_metadata_alive
-//
-// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
-// This is primarily useful for metadata which is considered as weak roots
-// by the GC but need to be strong roots if reachable from a current compilation.
-//
-void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
- ASSERT_IN_VM; // We're handling raw oops here.
-
-#if INCLUDE_ALL_GCS
- if (!UseG1GC) {
- return;
- }
- Klass* metadata_owner_klass;
- if (m->is_klass()) {
- metadata_owner_klass = m->as_klass()->get_Klass();
- } else if (m->is_method()) {
- metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
- } else {
- fatal("Not implemented for other types of metadata");
- }
-
- oop metadata_holder = metadata_owner_klass->klass_holder();
- if (metadata_holder != NULL) {
- G1SATBCardTableModRefBS::enqueue(metadata_holder);
- }
-
-#endif
-}
-
//------------------------------------------------------------------
// ciObjectFactory::get_unloaded_method
//
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -75,8 +75,6 @@
ciObject* create_new_object(oop o);
ciMetadata* create_new_object(Metadata* o);
- void ensure_metadata_alive(ciMetadata* m);
-
static bool is_equal(NonPermObject* p, oop key) {
return p->object()->get_oop() == key;
}
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -332,27 +332,6 @@
}
}
-#ifdef ASSERT
-class AllAliveClosure : public OopClosure {
- BoolObjectClosure* _is_alive_closure;
- bool _found_dead;
- public:
- AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
- template <typename T> void do_oop_work(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (!_is_alive_closure->do_object_b(obj)) {
- _found_dead = true;
- }
- }
- }
- void do_oop(oop* p) { do_oop_work<oop>(p); }
- void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
- bool found_dead() { return _found_dead; }
-};
-#endif
-
oop ClassLoaderData::keep_alive_object() const {
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader();
@@ -362,15 +341,7 @@
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|| is_alive_closure->do_object_b(keep_alive_object());
-#ifdef ASSERT
- if (alive) {
- AllAliveClosure all_alive_closure(is_alive_closure);
- KlassToOopClosure klass_closure(&all_alive_closure);
- const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
- assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
- }
-#endif
-
+ assert(!alive || claimed(), "must be claimed");
return alive;
}
@@ -648,9 +619,9 @@
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
if (ClassUnloading) {
- keep_alive_oops_do(f, klass_closure, must_claim);
+ ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
} else {
- oops_do(f, klass_closure, must_claim);
+ ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
}
}
@@ -660,27 +631,6 @@
}
}
-void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
- for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) {
- CLDClosure* closure = cld->keep_alive() ? strong : weak;
- if (closure != NULL) {
- closure->do_cld(cld);
- }
- }
-}
-
-void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
- roots_cld_do(cl, NULL);
-}
-
-void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
- if (ClassUnloading) {
- keep_alive_cld_do(cl);
- } else {
- cld_do(cl);
- }
-}
-
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->classes_do(klass_closure);
@@ -736,16 +686,6 @@
return array;
}
-bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
- assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
- for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
- if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
- return true;
- }
- }
- return false;
-}
-
#ifndef PRODUCT
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
@@ -861,60 +801,6 @@
return _rw_metaspace;
}
-ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
- : _next_klass(NULL) {
- ClassLoaderData* cld = ClassLoaderDataGraph::_head;
- Klass* klass = NULL;
-
- // Find the first klass in the CLDG.
- while (cld != NULL) {
- klass = cld->_klasses;
- if (klass != NULL) {
- _next_klass = klass;
- return;
- }
- cld = cld->next();
- }
-}
-
-Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
- Klass* next = klass->next_link();
- if (next != NULL) {
- return next;
- }
-
- // No more klasses in the current CLD. Time to find a new CLD.
- ClassLoaderData* cld = klass->class_loader_data();
- while (next == NULL) {
- cld = cld->next();
- if (cld == NULL) {
- break;
- }
- next = cld->_klasses;
- }
-
- return next;
-}
-
-Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
- Klass* head = (Klass*)_next_klass;
-
- while (head != NULL) {
- Klass* next = next_klass_in_cldg(head);
-
- Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
-
- if (old_head == head) {
- return head; // Won the CAS.
- }
-
- head = old_head;
- }
-
- // Nothing more for the iterator to hand out.
- assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
- return NULL;
-}
ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
_data = ClassLoaderDataGraph::_head;
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -31,6 +31,7 @@
#include "memory/metaspaceCounters.hpp"
#include "runtime/mutex.hpp"
#include "utilities/growableArray.hpp"
+
#if INCLUDE_TRACE
# include "utilities/ticks.hpp"
#endif
@@ -58,7 +59,6 @@
class ClassLoaderDataGraph : public AllStatic {
friend class ClassLoaderData;
friend class ClassLoaderDataGraphMetaspaceIterator;
- friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class VMStructs;
private:
// All CLDs (except the null CLD) can be reached by walking _head->_next->...
@@ -74,16 +74,10 @@
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
static void purge();
static void clear_claimed_marks();
- // oops do
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
- static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
- // cld do
+ static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void cld_do(CLDClosure* cl);
- static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
- static void keep_alive_cld_do(CLDClosure* cl);
- static void always_strong_cld_do(CLDClosure* cl);
- // klass do
static void classes_do(KlassClosure* klass_closure);
static void classes_do(void f(Klass* const));
static void methods_do(void f(Method*));
@@ -109,7 +103,6 @@
static void dump() { dump_on(tty); }
static void verify();
- static bool unload_list_contains(const void* x);
#ifndef PRODUCT
static bool contains_loader_data(ClassLoaderData* loader_data);
#endif
@@ -142,7 +135,6 @@
};
friend class ClassLoaderDataGraph;
- friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class ClassLoaderDataGraphMetaspaceIterator;
friend class MetaDataFactory;
friend class Method;
@@ -202,6 +194,7 @@
void unload();
bool keep_alive() const { return _keep_alive; }
+ bool is_alive(BoolObjectClosure* is_alive_closure) const;
void classes_do(void f(Klass*));
void loaded_classes_do(KlassClosure* klass_closure);
void classes_do(void f(InstanceKlass*));
@@ -214,9 +207,6 @@
MetaWord* allocate(size_t size);
public:
-
- bool is_alive(BoolObjectClosure* is_alive_closure) const;
-
// Accessors
Metaspace* metaspace_or_null() const { return _metaspace; }
@@ -302,16 +292,6 @@
void initialize_shared_metaspaces();
};
-// An iterator that distributes Klasses to parallel worker threads.
-class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
- volatile Klass* _next_klass;
- public:
- ClassLoaderDataGraphKlassIteratorAtomic();
- Klass* next_klass();
- private:
- static Klass* next_klass_in_cldg(Klass* klass);
-};
-
class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
ClassLoaderData* _data;
public:
--- a/hotspot/src/share/vm/classfile/dictionary.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -199,26 +199,6 @@
return class_was_unloaded;
}
-void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
- // Skip the strong roots probe marking if the closures are the same.
- if (strong == weak) {
- oops_do(strong);
- return;
- }
-
- for (int index = 0; index < table_size(); index++) {
- for (DictionaryEntry *probe = bucket(index);
- probe != NULL;
- probe = probe->next()) {
- Klass* e = probe->klass();
- ClassLoaderData* loader_data = probe->loader_data();
- if (is_strongly_reachable(loader_data, e)) {
- probe->set_strongly_reachable();
- }
- }
- }
- _pd_cache_table->roots_oops_do(strong, weak);
-}
void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary; only
@@ -510,23 +490,6 @@
}
}
-void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) {
- for (int index = 0; index < table_size(); index++) {
- for (ProtectionDomainCacheEntry* probe = bucket(index);
- probe != NULL;
- probe = probe->next()) {
- if (probe->is_strongly_reachable()) {
- probe->reset_strongly_reachable();
- probe->oops_do(strong);
- } else {
- if (weak != NULL) {
- probe->oops_do(weak);
- }
- }
- }
- }
-}
-
uint ProtectionDomainCacheTable::bucket_size() {
return sizeof(ProtectionDomainCacheEntry);
}
--- a/hotspot/src/share/vm/classfile/dictionary.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -89,7 +89,6 @@
// GC support
void oops_do(OopClosure* f);
void always_strong_oops_do(OopClosure* blk);
- void roots_oops_do(OopClosure* strong, OopClosure* weak);
void always_strong_classes_do(KlassClosure* closure);
@@ -219,7 +218,6 @@
// GC support
void oops_do(OopClosure* f);
void always_strong_oops_do(OopClosure* f);
- void roots_oops_do(OopClosure* strong, OopClosure* weak);
static uint bucket_size();
--- a/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -47,11 +47,8 @@
if (_marked_objects == NULL) {
_marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
}
-
Threads::metadata_do(Metadata::mark_on_stack);
- if (JvmtiExport::has_redefined_a_class()) {
- CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
- }
+ CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
CompileBroker::mark_on_stack();
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack);
--- a/hotspot/src/share/vm/classfile/stringTable.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -37,7 +37,6 @@
#include "runtime/mutexLocker.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#endif
@@ -158,26 +157,11 @@
return lookup(chars, length);
}
-// Tell the GC that this string was looked up in the StringTable.
-static void ensure_string_alive(oop string) {
- // A lookup in the StringTable could return an object that was previously
- // considered dead. The SATB part of G1 needs to get notified about this
- // potential resurrection, otherwise the marking might not find the object.
-#if INCLUDE_ALL_GCS
- if (UseG1GC && string != NULL) {
- G1SATBCardTableModRefBS::enqueue(string);
- }
-#endif
-}
oop StringTable::lookup(jchar* name, int len) {
unsigned int hash = hash_string(name, len);
int index = the_table()->hash_to_index(hash);
- oop string = the_table()->lookup(index, name, len, hash);
-
- ensure_string_alive(string);
-
- return string;
+ return the_table()->lookup(index, name, len, hash);
}
@@ -188,10 +172,7 @@
oop found_string = the_table()->lookup(index, name, len, hashValue);
// Found
- if (found_string != NULL) {
- ensure_string_alive(found_string);
- return found_string;
- }
+ if (found_string != NULL) return found_string;
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
assert(!Universe::heap()->is_in_reserved(name),
@@ -216,17 +197,11 @@
// Grab the StringTable_lock before getting the_table() because it could
// change at safepoint.
- oop added_or_found;
- {
- MutexLocker ml(StringTable_lock, THREAD);
- // Otherwise, add to symbol to table
- added_or_found = the_table()->basic_add(index, string, name, len,
- hashValue, CHECK_NULL);
- }
+ MutexLocker ml(StringTable_lock, THREAD);
- ensure_string_alive(added_or_found);
-
- return added_or_found;
+ // Otherwise, add to symbol to table
+ return the_table()->basic_add(index, string, name, len,
+ hashValue, CHECK_NULL);
}
oop StringTable::intern(Symbol* symbol, TRAPS) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -1612,7 +1612,13 @@
// system dictionary and follows the remaining classes' contents.
void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
- roots_oops_do(blk, NULL);
+ blk->do_oop(&_java_system_loader);
+ blk->do_oop(&_system_loader_lock_obj);
+
+ dictionary()->always_strong_oops_do(blk);
+
+ // Visit extra methods
+ invoke_method_table()->oops_do(blk);
}
void SystemDictionary::always_strong_classes_do(KlassClosure* closure) {
@@ -1679,17 +1685,6 @@
return unloading_occurred;
}
-void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
- strong->do_oop(&_java_system_loader);
- strong->do_oop(&_system_loader_lock_obj);
-
- // Adjust dictionary
- dictionary()->roots_oops_do(strong, weak);
-
- // Visit extra methods
- invoke_method_table()->oops_do(strong);
-}
-
void SystemDictionary::oops_do(OopClosure* f) {
f->do_oop(&_java_system_loader);
f->do_oop(&_system_loader_lock_obj);
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -330,7 +330,6 @@
// Applies "f->do_oop" to all root oops in the system dictionary.
static void oops_do(OopClosure* f);
- static void roots_oops_do(OopClosure* strong, OopClosure* weak);
// System loader lock
static oop system_loader_lock() { return _system_loader_lock_obj; }
--- a/hotspot/src/share/vm/code/codeCache.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -331,11 +331,6 @@
// Walk the list of methods which might contain non-perm oops.
void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
assert_locked_or_safepoint(CodeCache_lock);
-
- if (UseG1GC) {
- return;
- }
-
debug_only(mark_scavenge_root_nmethods());
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -361,11 +356,6 @@
void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
-
- if (UseG1GC) {
- return;
- }
-
nm->set_on_scavenge_root_list();
nm->set_scavenge_root_link(_scavenge_root_nmethods);
set_scavenge_root_nmethods(nm);
@@ -374,11 +364,6 @@
void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
-
- if (UseG1GC) {
- return;
- }
-
print_trace("drop_scavenge_root", nm);
nmethod* last = NULL;
nmethod* cur = scavenge_root_nmethods();
@@ -400,11 +385,6 @@
void CodeCache::prune_scavenge_root_nmethods() {
assert_locked_or_safepoint(CodeCache_lock);
-
- if (UseG1GC) {
- return;
- }
-
debug_only(mark_scavenge_root_nmethods());
nmethod* last = NULL;
@@ -437,10 +417,6 @@
#ifndef PRODUCT
void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
- if (UseG1GC) {
- return;
- }
-
// While we are here, verify the integrity of the list.
mark_scavenge_root_nmethods();
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -481,36 +457,9 @@
}
#endif //PRODUCT
-void CodeCache::verify_clean_inline_caches() {
-#ifdef ASSERT
- FOR_ALL_ALIVE_BLOBS(cb) {
- if (cb->is_nmethod()) {
- nmethod* nm = (nmethod*)cb;
- assert(!nm->is_unloaded(), "Tautology");
- nm->verify_clean_inline_caches();
- nm->verify();
- }
- }
-#endif
-}
-
-void CodeCache::verify_icholder_relocations() {
-#ifdef ASSERT
- // make sure that we aren't leaking icholders
- int count = 0;
- FOR_ALL_BLOBS(cb) {
- if (cb->is_nmethod()) {
- nmethod* nm = (nmethod*)cb;
- count += nm->verify_icholder_relocations();
- }
- }
-
- assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
- CompiledICHolder::live_count(), "must agree");
-#endif
-}
void CodeCache::gc_prologue() {
+ assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
}
void CodeCache::gc_epilogue() {
@@ -523,15 +472,41 @@
nm->cleanup_inline_caches();
}
DEBUG_ONLY(nm->verify());
- DEBUG_ONLY(nm->verify_oop_relocations());
+ nm->fix_oop_relocations();
}
}
set_needs_cache_clean(false);
prune_scavenge_root_nmethods();
+ assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
- verify_icholder_relocations();
+#ifdef ASSERT
+ // make sure that we aren't leaking icholders
+ int count = 0;
+ FOR_ALL_BLOBS(cb) {
+ if (cb->is_nmethod()) {
+ RelocIterator iter((nmethod*)cb);
+ while(iter.next()) {
+ if (iter.type() == relocInfo::virtual_call_type) {
+ if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
+ CompiledIC *ic = CompiledIC_at(&iter);
+ if (TraceCompiledIC) {
+ tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
+ ic->print();
+ }
+ assert(ic->cached_icholder() != NULL, "must be non-NULL");
+ count++;
+ }
+ }
+ }
+ }
+ }
+
+ assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
+ CompiledICHolder::live_count(), "must agree");
+#endif
}
+
void CodeCache::verify_oops() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
VerifyOopClosure voc;
--- a/hotspot/src/share/vm/code/codeCache.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -134,6 +134,10 @@
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
// to "true" iff some code got unloaded.
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
+ static void oops_do(OopClosure* f) {
+ CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
+ blobs_do(&oopc);
+ }
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
@@ -169,9 +173,6 @@
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
- static void verify_clean_inline_caches();
- static void verify_icholder_relocations();
-
// Deoptimization
static int mark_for_deoptimization(DepChange& changes);
#ifdef HOTSWAP
--- a/hotspot/src/share/vm/code/compiledIC.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/code/compiledIC.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -99,13 +99,13 @@
}
{
- MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
+ MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
- CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
- assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
+ CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
+ assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
- _ic_call->set_destination_mt_safe(entry_point);
- }
+ _ic_call->set_destination_mt_safe(entry_point);
+}
if (is_optimized() || is_icstub) {
// Optimized call sites don't have a cache value and ICStub call
@@ -529,7 +529,7 @@
void CompiledStaticCall::set_to_clean() {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
// Reset call site
- MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
+ MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(this);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
--- a/hotspot/src/share/vm/code/nmethod.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -51,8 +51,6 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-unsigned char nmethod::_global_unloading_clock = 0;
-
#ifdef DTRACE_ENABLED
// Only bother with this argument setup if dtrace is available
@@ -448,7 +446,6 @@
// Fill in default values for various flag fields
void nmethod::init_defaults() {
_state = in_use;
- _unloading_clock = 0;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
_has_unsafe_access = 0;
@@ -467,11 +464,7 @@
_oops_do_mark_link = NULL;
_jmethod_id = NULL;
_osr_link = NULL;
- if (UseG1GC) {
- _unloading_next = NULL;
- } else {
- _scavenge_root_link = NULL;
- }
+ _scavenge_root_link = NULL;
_scavenge_root_state = 0;
_compiler = NULL;
#if INCLUDE_RTM_OPT
@@ -1177,77 +1170,6 @@
}
}
-void nmethod::verify_clean_inline_caches() {
- assert_locked_or_safepoint(CompiledIC_lock);
-
- // If the method is not entrant or zombie then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (!is_in_use()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // This means that the low_boundary is going to be a little too high.
- // This shouldn't matter, since oops of non-entrant methods are never used.
- // In fact, why are we bothering to look at oops in a non-entrant method??
- }
-
- ResourceMark rm;
- RelocIterator iter(this, low_boundary);
- while(iter.next()) {
- switch(iter.type()) {
- case relocInfo::virtual_call_type:
- case relocInfo::opt_virtual_call_type: {
- CompiledIC *ic = CompiledIC_at(&iter);
- // Ok, to lookup references to zombies here
- CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
- if( cb != NULL && cb->is_nmethod() ) {
- nmethod* nm = (nmethod*)cb;
- // Verify that inline caches pointing to both zombie and not_entrant methods are clean
- if (!nm->is_in_use() || (nm->method()->code() != nm)) {
- assert(ic->is_clean(), "IC should be clean");
- }
- }
- break;
- }
- case relocInfo::static_call_type: {
- CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
- CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
- if( cb != NULL && cb->is_nmethod() ) {
- nmethod* nm = (nmethod*)cb;
- // Verify that inline caches pointing to both zombie and not_entrant methods are clean
- if (!nm->is_in_use() || (nm->method()->code() != nm)) {
- assert(csc->is_clean(), "IC should be clean");
- }
- }
- break;
- }
- }
- }
-}
-
-int nmethod::verify_icholder_relocations() {
- int count = 0;
-
- RelocIterator iter(this);
- while(iter.next()) {
- if (iter.type() == relocInfo::virtual_call_type) {
- if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
- CompiledIC *ic = CompiledIC_at(&iter);
- if (TraceCompiledIC) {
- tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
- ic->print();
- }
- assert(ic->cached_icholder() != NULL, "must be non-NULL");
- count++;
- }
- }
- }
-
- return count;
-}
-
// This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() {
assert(is_alive(), "Must be an alive method");
@@ -1280,23 +1202,6 @@
mdo->inc_decompile_count();
}
-void nmethod::increase_unloading_clock() {
- _global_unloading_clock++;
- if (_global_unloading_clock == 0) {
- // _nmethods are allocated with _unloading_clock == 0,
- // so 0 is never used as a clock value.
- _global_unloading_clock = 1;
- }
-}
-
-void nmethod::set_unloading_clock(unsigned char unloading_clock) {
- OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
-}
-
-unsigned char nmethod::unloading_clock() {
- return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
-}
-
void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
post_compiled_method_unload();
@@ -1342,10 +1247,6 @@
// for later on.
CodeCache::set_needs_cache_clean(true);
}
-
- // Unregister must be done before the state change
- Universe::heap()->unregister_nmethod(this);
-
_state = unloaded;
// Log the unloading.
@@ -1689,35 +1590,6 @@
set_unload_reported();
}
-void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
- if (ic->is_icholder_call()) {
- // The only exception is compiledICHolder oops which may
- // yet be marked below. (We check this further below).
- CompiledICHolder* cichk_oop = ic->cached_icholder();
- if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
- cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
- return;
- }
- } else {
- Metadata* ic_oop = ic->cached_metadata();
- if (ic_oop != NULL) {
- if (ic_oop->is_klass()) {
- if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
- return;
- }
- } else if (ic_oop->is_method()) {
- if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
- return;
- }
- } else {
- ShouldNotReachHere();
- }
- }
- }
-
- ic->set_to_clean();
-}
-
// This is called at the end of the strong tracing/marking phase of a
// GC to unload an nmethod if it contains otherwise unreachable
// oops.
@@ -1761,7 +1633,31 @@
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(&iter);
- clean_ic_if_metadata_is_dead(ic, is_alive);
+ if (ic->is_icholder_call()) {
+ // The only exception is compiledICHolder oops which may
+ // yet be marked below. (We check this further below).
+ CompiledICHolder* cichk_oop = ic->cached_icholder();
+ if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
+ cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
+ continue;
+ }
+ } else {
+ Metadata* ic_oop = ic->cached_metadata();
+ if (ic_oop != NULL) {
+ if (ic_oop->is_klass()) {
+ if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
+ continue;
+ }
+ } else if (ic_oop->is_method()) {
+ if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
+ continue;
+ }
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+ }
+ ic->set_to_clean();
}
}
}
@@ -1799,175 +1695,6 @@
verify_metadata_loaders(low_boundary, is_alive);
}
-template <class CompiledICorStaticCall>
-static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
- // Ok, to lookup references to zombies here
- CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
- if (cb != NULL && cb->is_nmethod()) {
- nmethod* nm = (nmethod*)cb;
-
- if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
- // The nmethod has not been processed yet.
- return true;
- }
-
- // Clean inline caches pointing to both zombie and not_entrant methods
- if (!nm->is_in_use() || (nm->method()->code() != nm)) {
- ic->set_to_clean();
- assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
- }
- }
-
- return false;
-}
-
-static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
- return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
-}
-
-static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
- return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
-}
-
-bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
- ResourceMark rm;
-
- // Make sure the oop's ready to receive visitors
- assert(!is_zombie() && !is_unloaded(),
- "should not call follow on zombie or unloaded nmethod");
-
- // If the method is not entrant then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (is_not_entrant()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // (See comment above.)
- }
-
- // The RedefineClasses() API can cause the class unloading invariant
- // to no longer be true. See jvmtiExport.hpp for details.
- // Also, leave a debugging breadcrumb in local flag.
- bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
- if (a_class_was_redefined) {
- // This set of the unloading_occurred flag is done before the
- // call to post_compiled_method_unload() so that the unloading
- // of this nmethod is reported.
- unloading_occurred = true;
- }
-
- // Exception cache
- clean_exception_cache(is_alive);
-
- bool is_unloaded = false;
- bool postponed = false;
-
- RelocIterator iter(this, low_boundary);
- while(iter.next()) {
-
- switch (iter.type()) {
-
- case relocInfo::virtual_call_type:
- if (unloading_occurred) {
- // If class unloading occurred we first iterate over all inline caches and
- // clear ICs where the cached oop is referring to an unloaded klass or method.
- clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
- }
-
- postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
- break;
-
- case relocInfo::opt_virtual_call_type:
- postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
- break;
-
- case relocInfo::static_call_type:
- postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
- break;
-
- case relocInfo::oop_type:
- if (!is_unloaded) {
- // Unload check
- oop_Relocation* r = iter.oop_reloc();
- // Traverse those oops directly embedded in the code.
- // Other oops (oop_index>0) are seen as part of scopes_oops.
- assert(1 == (r->oop_is_immediate()) +
- (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
- "oop must be found in exactly one place");
- if (r->oop_is_immediate() && r->oop_value() != NULL) {
- if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
- is_unloaded = true;
- }
- }
- }
- break;
-
- }
- }
-
- if (is_unloaded) {
- return postponed;
- }
-
- // Scopes
- for (oop* p = oops_begin(); p < oops_end(); p++) {
- if (*p == Universe::non_oop_word()) continue; // skip non-oops
- if (can_unload(is_alive, p, unloading_occurred)) {
- is_unloaded = true;
- break;
- }
- }
-
- if (is_unloaded) {
- return postponed;
- }
-
- // Ensure that all metadata is still alive
- verify_metadata_loaders(low_boundary, is_alive);
-
- return postponed;
-}
-
-void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
- ResourceMark rm;
-
- // Make sure the oop's ready to receive visitors
- assert(!is_zombie(),
- "should not call follow on zombie nmethod");
-
- // If the method is not entrant then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (is_not_entrant()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // (See comment above.)
- }
-
- RelocIterator iter(this, low_boundary);
- while(iter.next()) {
-
- switch (iter.type()) {
-
- case relocInfo::virtual_call_type:
- clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
- break;
-
- case relocInfo::opt_virtual_call_type:
- clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
- break;
-
- case relocInfo::static_call_type:
- clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
- break;
- }
- }
-}
-
#ifdef ASSERT
class CheckClass : AllStatic {
@@ -2184,7 +1911,7 @@
assert(cur != NULL, "not NULL-terminated");
nmethod* next = cur->_oops_do_mark_link;
cur->_oops_do_mark_link = NULL;
- cur->verify_oop_relocations();
+ cur->fix_oop_relocations();
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
cur = next;
}
@@ -2752,10 +2479,6 @@
};
void nmethod::verify_scavenge_root_oops() {
- if (UseG1GC) {
- return;
- }
-
if (!on_scavenge_root_list()) {
// Actually look inside, to verify the claim that it's clean.
DebugScavengeRoot debug_scavenge_root(this);
--- a/hotspot/src/share/vm/code/nmethod.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -111,11 +111,6 @@
friend class NMethodSweeper;
friend class CodeCache; // scavengable oops
private:
-
- // GC support to help figure out if an nmethod has been
- // cleaned/unloaded by the current GC.
- static unsigned char _global_unloading_clock;
-
// Shared fields for all nmethod's
Method* _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
@@ -123,13 +118,7 @@
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
-
- union {
- // Used by G1 to chain nmethods.
- nmethod* _unloading_next;
- // Used by non-G1 GCs to chain nmethods.
- nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
- };
+ nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
@@ -191,8 +180,6 @@
// Protected by Patching_lock
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
- volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
-
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
#endif
@@ -450,15 +437,6 @@
bool unload_reported() { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; }
- void set_unloading_next(nmethod* next) { _unloading_next = next; }
- nmethod* unloading_next() { return _unloading_next; }
-
- static unsigned char global_unloading_clock() { return _global_unloading_clock; }
- static void increase_unloading_clock();
-
- void set_unloading_clock(unsigned char unloading_clock);
- unsigned char unloading_clock();
-
bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
void mark_for_deoptimization() { _marked_for_deoptimization = true; }
@@ -574,10 +552,6 @@
return (addr >= code_begin() && addr < verified_entry_point());
}
- // Verify calls to dead methods have been cleaned.
- void verify_clean_inline_caches();
- // Verify and count cached icholder relocations.
- int verify_icholder_relocations();
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
@@ -603,10 +577,6 @@
// GC support
void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
- // The parallel versions are used by G1.
- bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
- void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
- // Unload a nmethod if the *root object is dead.
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -1558,11 +1558,11 @@
}
if (MetaspaceGC::should_concurrent_collect()) {
- if (Verbose && PrintGCDetails) {
+ if (Verbose && PrintGCDetails) {
gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
- }
- return true;
- }
+ }
+ return true;
+ }
// CMSTriggerInterval starts a CMS cycle if enough time has passed.
if (CMSTriggerInterval >= 0) {
@@ -2997,21 +2997,20 @@
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
- // Get a clear set of claim bits for the roots processing to work with.
+ // Get a clear set of claim bits for the strong roots processing to work with.
ClassLoaderDataGraph::clear_claimed_marks();
// Mark from roots one level into CMS
MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
- gch->gen_process_roots(_cmsGen->level(),
- true, // younger gens are roots
- true, // activate StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- ¬Older,
- NULL,
- NULL); // SSS: Provide correct closure
+ gch->gen_process_strong_roots(_cmsGen->level(),
+ true, // younger gens are roots
+ true, // activate StrongRootsScope
+ SharedHeap::ScanningOption(roots_scanning_options()),
+ ¬Older,
+ NULL,
+ NULL); // SSS: Provide correct closure
// Now mark from the roots
MarkFromRootsClosure markFromRootsClosure(this, _span,
@@ -3062,24 +3061,22 @@
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
- // Get a clear set of claim bits for the roots processing to work with.
+ // Get a clear set of claim bits for the strong roots processing to work with.
ClassLoaderDataGraph::clear_claimed_marks();
// Mark from roots one level into CMS
MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
markBitMap());
- CLDToOopClosure cld_closure(¬Older, true);
+ KlassToOopClosure klass_closure(¬Older);
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-
- gch->gen_process_roots(_cmsGen->level(),
- true, // younger gens are roots
- true, // activate StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- ¬Older,
- NULL,
- &cld_closure);
+ gch->gen_process_strong_roots(_cmsGen->level(),
+ true, // younger gens are roots
+ true, // activate StrongRootsScope
+ SharedHeap::ScanningOption(roots_scanning_options()),
+ ¬Older,
+ NULL,
+ &klass_closure);
// Now mark from the roots
MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
@@ -3266,10 +3263,12 @@
void CMSCollector::setup_cms_unloading_and_verification_state() {
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|| VerifyBeforeExit;
- const int rso = SharedHeap::SO_AllCodeCache;
+ const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
// We set the proper root for this CMS cycle here.
if (should_unload_classes()) { // Should unload classes this cycle
+ remove_root_scanning_option(SharedHeap::SO_AllClasses);
+ add_root_scanning_option(SharedHeap::SO_SystemClasses);
remove_root_scanning_option(rso); // Shrink the root set appropriately
set_verifying(should_verify); // Set verification state for this cycle
return; // Nothing else needs to be done at this time
@@ -3277,6 +3276,8 @@
// Not unloading classes this cycle
assert(!should_unload_classes(), "Inconsistency!");
+ remove_root_scanning_option(SharedHeap::SO_SystemClasses);
+ add_root_scanning_option(SharedHeap::SO_AllClasses);
if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
// Include symbols, strings and code cache elements to prevent their resurrection.
@@ -3684,16 +3685,15 @@
gch->set_par_threads(0);
} else {
// The serial version.
- CLDToOopClosure cld_closure(¬Older, true);
+ KlassToOopClosure klass_closure(¬Older);
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
- gch->gen_process_roots(_cmsGen->level(),
- true, // younger gens are roots
- true, // activate StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- ¬Older,
- NULL,
- &cld_closure);
+ gch->gen_process_strong_roots(_cmsGen->level(),
+ true, // younger gens are roots
+ true, // activate StrongRootsScope
+ SharedHeap::ScanningOption(roots_scanning_options()),
+ ¬Older,
+ NULL,
+ &klass_closure);
}
}
@@ -5139,6 +5139,7 @@
_timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap();
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
+ KlassToOopClosure klass_closure(&par_mri_cl);
// ---------- young gen roots --------------
{
@@ -5154,17 +5155,13 @@
// ---------- remaining roots --------------
_timer.reset();
_timer.start();
-
- CLDToOopClosure cld_closure(&par_mri_cl, true);
-
- gch->gen_process_roots(_collector->_cmsGen->level(),
- false, // yg was scanned above
- false, // this is parallel code
- SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
- _collector->should_unload_classes(),
- &par_mri_cl,
- NULL,
- &cld_closure);
+ gch->gen_process_strong_roots(_collector->_cmsGen->level(),
+ false, // yg was scanned above
+ false, // this is parallel code
+ SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+ &par_mri_cl,
+ NULL,
+ &klass_closure);
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -5293,15 +5290,13 @@
// ---------- remaining roots --------------
_timer.reset();
_timer.start();
- gch->gen_process_roots(_collector->_cmsGen->level(),
- false, // yg was scanned above
- false, // this is parallel code
- SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
- _collector->should_unload_classes(),
- &par_mrias_cl,
- NULL,
- NULL); // The dirty klasses will be handled below
-
+ gch->gen_process_strong_roots(_collector->_cmsGen->level(),
+ false, // yg was scanned above
+ false, // this is parallel code
+ SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+ &par_mrias_cl,
+ NULL,
+ NULL); // The dirty klasses will be handled below
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -5356,7 +5351,7 @@
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
- // code, or when the young collector processes the roots. Hence,
+ // code, or when the young collector processes the strong roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
// ---------- rescan dirty cards ------------
@@ -5778,7 +5773,7 @@
cms_space,
n_workers, workers, task_queues());
- // Set up for parallel process_roots work.
+ // Set up for parallel process_strong_roots work.
gch->set_par_threads(n_workers);
// We won't be iterating over the cards in the card table updating
// the younger_gen cards, so we shouldn't call the following else
@@ -5787,7 +5782,7 @@
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
// The young gen rescan work will not be done as part of
- // process_roots (which currently doesn't know how to
+ // process_strong_roots (which currently doesn't knw how to
// parallelize such a scan), but rather will be broken up into
// a set of parallel tasks (via the sampling that the [abortable]
// preclean phase did of EdenSpace, plus the [two] tasks of
@@ -5884,15 +5879,13 @@
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
GenCollectedHeap::StrongRootsScope srs(gch);
-
- gch->gen_process_roots(_cmsGen->level(),
- true, // younger gens as roots
- false, // use the local StrongRootsScope
- SharedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- &mrias_cl,
- NULL,
- NULL); // The dirty klasses will be handled below
+ gch->gen_process_strong_roots(_cmsGen->level(),
+ true, // younger gens as roots
+ false, // use the local StrongRootsScope
+ SharedHeap::ScanningOption(roots_scanning_options()),
+ &mrias_cl,
+ NULL,
+ NULL); // The dirty klasses will be handled below
assert(should_unload_classes()
|| (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
@@ -5932,7 +5925,7 @@
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
- // code, or when the young collector processes the roots. Hence,
+ // code, or when the young collector processes the strong roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
verify_work_stacks_empty();
@@ -6182,14 +6175,15 @@
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
-
- {
- GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
- // Delete entries for dead interned strings.
- StringTable::unlink(&_is_alive_closure);
- }
- }
-
+ }
+
+ // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
+ // Need to check if we really scanned the StringTable.
+ if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
+ GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+ // Delete entries for dead interned strings.
+ StringTable::unlink(&_is_alive_closure);
+ }
// Restore any preserved marks as a result of mark stack or
// work queue overflow
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -32,7 +32,6 @@
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp"
-#include "memory/iterator.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
-#include "code/codeCache.hpp"
#include "gc_implementation/g1/concurrentMark.inline.hpp"
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
@@ -40,7 +39,6 @@
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
-#include "memory/allocation.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
@@ -60,8 +58,8 @@
_bmWordSize = 0;
}
-HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
- const HeapWord* limit) const {
+HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
+ HeapWord* limit) const {
// First we must round addr *up* to a possible object boundary.
addr = (HeapWord*)align_size_up((intptr_t)addr,
HeapWordSize << _shifter);
@@ -78,8 +76,8 @@
return nextAddr;
}
-HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
- const HeapWord* limit) const {
+HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
+ HeapWord* limit) const {
size_t addrOffset = heapWordToOffset(addr);
if (limit == NULL) {
limit = _bmStartWord + _bmWordSize;
@@ -1225,9 +1223,6 @@
};
void ConcurrentMark::scanRootRegions() {
- // Start of concurrent marking.
- ClassLoaderDataGraph::clear_claimed_marks();
-
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
@@ -1276,7 +1271,7 @@
CMConcurrentMarkingTask markingTask(this, cmThread());
if (use_parallel_marking_threads()) {
_parallel_workers->set_active_workers((int)active_workers);
- // Don't set _n_par_threads because it affects MT in process_roots()
+ // Don't set _n_par_threads because it affects MT in process_strong_roots()
// and the decisions on that MT processing is made elsewhere.
assert(_parallel_workers->active_workers() > 0, "Should have been set");
_parallel_workers->run_task(&markingTask);
@@ -2147,29 +2142,23 @@
// Update the soft reference policy with the new heap occupancy.
Universe::update_heap_info_at_gc();
+ // We need to make this be a "collection" so any collection pause that
+ // races with it goes around and waits for completeCleanup to finish.
+ g1h->increment_total_collections();
+
+ // We reclaimed old regions so we should calculate the sizes to make
+ // sure we update the old gen/space data.
+ g1h->g1mm()->update_sizes();
+
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(after)");
}
-
g1h->check_bitmaps("Cleanup End");
g1h->verify_region_sets_optional();
-
- // We need to make this be a "collection" so any collection pause that
- // races with it goes around and waits for completeCleanup to finish.
- g1h->increment_total_collections();
-
- // Clean out dead classes and update Metaspace sizes.
- ClassLoaderDataGraph::purge();
- MetaspaceGC::compute_new_size();
-
- // We reclaimed old regions so we should calculate the sizes to make
- // sure we update the old gen/space data.
- g1h->g1mm()->update_sizes();
-
g1h->trace_heap_after_concurrent_cycle();
}
@@ -2456,26 +2445,6 @@
_g1h->set_par_threads(0);
}
-void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
- G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
-}
-
-// Helper class to get rid of some boilerplate code.
-class G1RemarkGCTraceTime : public GCTraceTime {
- static bool doit_and_prepend(bool doit) {
- if (doit) {
- gclog_or_tty->put(' ');
- }
- return doit;
- }
-
- public:
- G1RemarkGCTraceTime(const char* title, bool doit)
- : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
- G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
- }
-};
-
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
if (has_overflown()) {
// Skip processing the discovered references if we have
@@ -2588,28 +2557,9 @@
return;
}
- assert(_markStack.isEmpty(), "Marking should have completed");
-
- // Unload Klasses, String, Symbols, Code Cache, etc.
-
- G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
-
- bool purged_classes;
-
- {
- G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
- purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
- }
-
- {
- G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
- weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
- }
-
- if (G1StringDedup::is_enabled()) {
- G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
- G1StringDedup::unlink(&g1_is_alive);
- }
+ g1h->unlink_string_and_symbol_table(&g1_is_alive,
+ /* process_strings */ false, // currently strings are always roots
+ /* process_symbols */ true);
}
void ConcurrentMark::swapMarkBitMaps() {
@@ -2618,57 +2568,6 @@
_nextMarkBitMap = (CMBitMap*) temp;
}
-class CMObjectClosure;
-
-// Closure for iterating over objects, currently only used for
-// processing SATB buffers.
-class CMObjectClosure : public ObjectClosure {
-private:
- CMTask* _task;
-
-public:
- void do_object(oop obj) {
- _task->deal_with_reference(obj);
- }
-
- CMObjectClosure(CMTask* task) : _task(task) { }
-};
-
-class G1RemarkThreadsClosure : public ThreadClosure {
- CMObjectClosure _cm_obj;
- G1CMOopClosure _cm_cl;
- MarkingCodeBlobClosure _code_cl;
- int _thread_parity;
- bool _is_par;
-
- public:
- G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
- _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
- _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
-
- void do_thread(Thread* thread) {
- if (thread->is_Java_thread()) {
- if (thread->claim_oops_do(_is_par, _thread_parity)) {
- JavaThread* jt = (JavaThread*)thread;
-
- // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
- // however the liveness of oops reachable from nmethods have very complex lifecycles:
- // * Alive if on the stack of an executing method
- // * Weakly reachable otherwise
- // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
- // live by the SATB invariant but other oops recorded in nmethods may behave differently.
- jt->nmethods_do(&_code_cl);
-
- jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
- }
- } else if (thread->is_VM_thread()) {
- if (thread->claim_oops_do(_is_par, _thread_parity)) {
- JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
- }
- }
- }
-};
-
class CMRemarkTask: public AbstractGangTask {
private:
ConcurrentMark* _cm;
@@ -2680,14 +2579,6 @@
if (worker_id < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_id);
task->record_start_time();
- {
- ResourceMark rm;
- HandleMark hm;
-
- G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
- Threads::threads_do(&threads_f);
- }
-
do {
task->do_marking_step(1000000000.0 /* something very large */,
true /* do_termination */,
@@ -2710,8 +2601,6 @@
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
-
g1h->ensure_parsability(false);
if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -3541,6 +3430,20 @@
}
};
+// Closure for iterating over objects, currently only used for
+// processing SATB buffers.
+class CMObjectClosure : public ObjectClosure {
+private:
+ CMTask* _task;
+
+public:
+ void do_object(oop obj) {
+ _task->deal_with_reference(obj);
+ }
+
+ CMObjectClosure(CMTask* task) : _task(task) { }
+};
+
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMTask* task)
@@ -4005,6 +3908,15 @@
}
}
+ if (!concurrent() && !has_aborted()) {
+ // We should only do this during remark.
+ if (G1CollectedHeap::use_parallel_gc_threads()) {
+ satb_mq_set.par_iterate_closure_all_threads(_worker_id);
+ } else {
+ satb_mq_set.iterate_closure_all_threads();
+ }
+ }
+
_draining_satb_buffers = false;
assert(has_aborted() ||
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
-#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "utilities/taskqueue.hpp"
@@ -87,19 +86,19 @@
// Return the address corresponding to the next marked bit at or after
// "addr", and before "limit", if "limit" is non-NULL. If there is no
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
- HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
- const HeapWord* limit = NULL) const;
+ HeapWord* getNextMarkedWordAddress(HeapWord* addr,
+ HeapWord* limit = NULL) const;
// Return the address corresponding to the next unmarked bit at or after
// "addr", and before "limit", if "limit" is non-NULL. If there is no
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
- HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
- const HeapWord* limit = NULL) const;
+ HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
+ HeapWord* limit = NULL) const;
// conversion utilities
HeapWord* offsetToHeapWord(size_t offset) const {
return _bmStartWord + (offset << _shifter);
}
- size_t heapWordToOffset(const HeapWord* addr) const {
+ size_t heapWordToOffset(HeapWord* addr) const {
return pointer_delta(addr, _bmStartWord) >> _shifter;
}
int heapWordDiffToOffsetDiff(size_t diff) const;
@@ -477,7 +476,6 @@
ForceOverflowSettings _force_overflow_conc;
ForceOverflowSettings _force_overflow_stw;
- void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
void weakRefsWork(bool clear_all_soft_refs);
void swapMarkBitMaps();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -426,7 +426,7 @@
q = n;
oop obj = oop(q);
if (obj->klass_or_null() == NULL) return q;
- n += block_size(q);
+ n += obj->size();
}
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
// [q, n) is the block that crosses the boundary.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -113,7 +113,7 @@
q = n;
oop obj = oop(q);
if (obj->klass_or_null() == NULL) return q;
- n += block_size(q);
+ n += obj->size();
}
assert(q <= n, "wrong order for q and addr");
assert(addr < n, "wrong order for addr and n");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -30,52 +30,23 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
+G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
_top = bottom();
}
void G1CodeRootChunk::reset() {
_next = _prev = NULL;
- _free = NULL;
_top = bottom();
}
void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
- NmethodOrLink* cur = bottom();
+ nmethod** cur = bottom();
while (cur != _top) {
- if (is_nmethod(cur)) {
- cl->do_code_blob(cur->_nmethod);
- }
+ cl->do_code_blob(*cur);
cur++;
}
}
-bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
- NmethodOrLink* cur = bottom();
-
- for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
- if (cur->_nmethod == method) {
- bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
-
- if (!result) {
- // Someone else cleared out this entry.
- return false;
- }
-
- // The method was cleared. Time to link it into the free list.
- NmethodOrLink* prev_free;
- do {
- prev_free = (NmethodOrLink*)_free;
- cur->_link = prev_free;
- } while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
-
- return true;
- }
- }
-
- return false;
-}
-
G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
_free_list.initialize();
_free_list.set_size(G1CodeRootChunk::word_size());
@@ -169,43 +140,34 @@
void G1CodeRootSet::add(nmethod* method) {
if (!contains(method)) {
- // Find the first chunk thatisn't full.
- G1CodeRootChunk* cur = _list.head();
- while (cur != NULL) {
- if (!cur->is_full()) {
- break;
- }
- cur = cur->next();
- }
-
- // All chunks are full, get a new chunk.
- if (cur == NULL) {
- cur = new_chunk();
+ // Try to add the nmethod. If there is not enough space, get a new chunk.
+ if (_list.head() == NULL || _list.head()->is_full()) {
+ G1CodeRootChunk* cur = new_chunk();
_list.return_chunk_at_head(cur);
}
-
- // Add the nmethod.
- bool result = cur->add(method);
-
+ bool result = _list.head()->add(method);
guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
-
_length++;
}
}
-void G1CodeRootSet::remove_lock_free(nmethod* method) {
+void G1CodeRootSet::remove(nmethod* method) {
G1CodeRootChunk* found = find(method);
if (found != NULL) {
- bool result = found->remove_lock_free(method);
- if (result) {
- Atomic::dec_ptr((volatile intptr_t*)&_length);
+ bool result = found->remove(method);
+ guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
+ // eventually free completely emptied chunk
+ if (found->is_empty()) {
+ _list.remove_chunk(found);
+ free(found);
}
+ _length--;
}
assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
}
nmethod* G1CodeRootSet::pop() {
- while (true) {
+ do {
G1CodeRootChunk* cur = _list.head();
if (cur == NULL) {
assert(_length == 0, "when there are no chunks, there should be no elements");
@@ -218,7 +180,7 @@
} else {
free(_list.get_chunk_at_head());
}
- }
+ } while (true);
}
G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -31,14 +31,6 @@
class CodeBlobClosure;
-// The elements of the G1CodeRootChunk is either:
-// 1) nmethod pointers
-// 2) nodes in an internally chained free list
-typedef union {
- nmethod* _nmethod;
- void* _link;
-} NmethodOrLink;
-
class G1CodeRootChunk : public CHeapObj<mtGC> {
private:
static const int NUM_ENTRIES = 32;
@@ -46,28 +38,16 @@
G1CodeRootChunk* _next;
G1CodeRootChunk* _prev;
- NmethodOrLink* _top;
- // First free position within the chunk.
- volatile NmethodOrLink* _free;
+ nmethod** _top;
- NmethodOrLink _data[NUM_ENTRIES];
+ nmethod* _data[NUM_ENTRIES];
- NmethodOrLink* bottom() const {
- return (NmethodOrLink*) &(_data[0]);
+ nmethod** bottom() const {
+ return (nmethod**) &(_data[0]);
}
- NmethodOrLink* end() const {
- return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
- }
-
- bool is_link(NmethodOrLink* nmethod_or_link) {
- return nmethod_or_link->_link == NULL ||
- (bottom() <= nmethod_or_link->_link
- && nmethod_or_link->_link < end());
- }
-
- bool is_nmethod(NmethodOrLink* nmethod_or_link) {
- return !is_link(nmethod_or_link);
+ nmethod** end() const {
+ return (nmethod**) &(_data[NUM_ENTRIES]);
}
public:
@@ -105,55 +85,46 @@
}
bool is_full() const {
- return _top == end() && _free == NULL;
+ return _top == (nmethod**)end();
}
bool contains(nmethod* method) {
- NmethodOrLink* cur = bottom();
+ nmethod** cur = bottom();
while (cur != _top) {
- if (cur->_nmethod == method) return true;
+ if (*cur == method) return true;
cur++;
}
return false;
}
bool add(nmethod* method) {
- if (is_full()) {
- return false;
- }
-
- if (_free != NULL) {
- // Take from internally chained free list
- NmethodOrLink* first_free = (NmethodOrLink*)_free;
- _free = (NmethodOrLink*)_free->_link;
- first_free->_nmethod = method;
- } else {
- // Take from top.
- _top->_nmethod = method;
- _top++;
- }
-
+ if (is_full()) return false;
+ *_top = method;
+ _top++;
return true;
}
- bool remove_lock_free(nmethod* method);
+ bool remove(nmethod* method) {
+ nmethod** cur = bottom();
+ while (cur != _top) {
+ if (*cur == method) {
+ memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
+ _top--;
+ return true;
+ }
+ cur++;
+ }
+ return false;
+ }
void nmethods_do(CodeBlobClosure* blk);
nmethod* pop() {
- if (_free != NULL) {
- // Kill the free list.
- _free = NULL;
+ if (is_empty()) {
+ return NULL;
}
-
- while (!is_empty()) {
- _top--;
- if (is_nmethod(_top)) {
- return _top->_nmethod;
- }
- }
-
- return NULL;
+ _top--;
+ return *_top;
}
};
@@ -222,7 +193,7 @@
// method is likely to be repeatedly called with the same nmethod.
void add(nmethod* method);
- void remove_lock_free(nmethod* method);
+ void remove(nmethod* method);
nmethod* pop();
bool contains(nmethod* method);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -57,7 +57,6 @@
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
-#include "memory/allocation.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/iterator.hpp"
@@ -92,10 +91,10 @@
// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
// The number of GC workers is passed to heap_region_par_iterate_chunked().
// It does use run_task() which sets _n_workers in the task.
-// G1ParTask executes g1_process_roots() ->
-// SharedHeap::process_roots() which calls eventually to
+// G1ParTask executes g1_process_strong_roots() ->
+// SharedHeap::process_strong_roots() which calls eventually to
// CardTableModRefBS::par_non_clean_card_iterate_work() which uses
-// SequentialSubTasksDone. SharedHeap::process_roots() also
+// SequentialSubTasksDone. SharedHeap::process_strong_roots() also
// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
//
@@ -3380,19 +3379,25 @@
if (!silent) { gclog_or_tty->print("Roots "); }
VerifyRootsClosure rootsCl(vo);
VerifyKlassClosure klassCl(this, &rootsCl);
- CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
// We apply the relevant closures to all the oops in the
- // system dictionary, class loader data graph, the string table
- // and the nmethods in the code cache.
+ // system dictionary, class loader data graph and the string table.
+ // Don't verify the code cache here, since it's verified below.
+ const int so = SO_AllClasses | SO_Strings;
+
+ // Need cleared claim bits for the strong roots processing
+ ClassLoaderDataGraph::clear_claimed_marks();
+
+ process_strong_roots(true, // activate StrongRootsScope
+ ScanningOption(so), // roots scanning options
+ &rootsCl,
+ &klassCl
+ );
+
+ // Verify the nmethods in the code cache.
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
-
- process_all_roots(true, // activate StrongRootsScope
- SO_AllCodeCache, // roots scanning options
- &rootsCl,
- &cldCl,
- &blobsCl);
+ CodeCache::blobs_do(&blobsCl);
bool failures = rootsCl.failures() || codeRootsCl.failures();
@@ -3974,7 +3979,6 @@
increment_gc_time_stamp();
verify_before_gc();
-
check_bitmaps("GC Start");
COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -4325,7 +4329,11 @@
assert(_mutator_alloc_region.get() == NULL, "post-condition");
}
-void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
+void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
+ assert_at_safepoint(true /* should_be_vm_thread */);
+
+ _survivor_gc_alloc_region.init();
+ _old_gc_alloc_region.init();
HeapRegion* retained_region = _retained_old_gc_alloc_region;
_retained_old_gc_alloc_region = NULL;
@@ -4357,15 +4365,6 @@
}
}
-void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
- assert_at_safepoint(true /* should_be_vm_thread */);
-
- _survivor_gc_alloc_region.init();
- _old_gc_alloc_region.init();
-
- use_retained_old_gc_alloc_region(evacuation_info);
-}
-
void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
_old_gc_alloc_region.count());
@@ -4588,7 +4587,7 @@
}
}
-template <G1Barrier barrier, G1Mark do_mark_object>
+template <G1Barrier barrier, bool do_mark_object>
template <class T>
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
@@ -4610,7 +4609,7 @@
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
- if (do_mark_object != G1MarkNone && forwardee != obj) {
+ if (do_mark_object && forwardee != obj) {
// If the object is self-forwarded we don't need to explicitly
// mark it, the evacuation failure protocol will do so.
mark_forwarded_object(obj, forwardee);
@@ -4621,8 +4620,9 @@
}
} else {
// The object is not in collection set. If we're a root scanning
- // closure during an initial mark pause then attempt to mark the object.
- if (do_mark_object == G1MarkFromRoot) {
+ // closure during an initial mark pause (i.e. do_mark_object will
+ // be true) then attempt to mark the object.
+ if (do_mark_object) {
mark_object(obj);
}
}
@@ -4632,8 +4632,8 @@
}
}
-template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
-template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
+template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
+template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
class G1ParEvacuateFollowersClosure : public VoidClosure {
protected:
@@ -4746,51 +4746,6 @@
_n_workers = active_workers;
}
- // Helps out with CLD processing.
- //
- // During InitialMark we need to:
- // 1) Scavenge all CLDs for the young GC.
- // 2) Mark all objects directly reachable from strong CLDs.
- template <G1Mark do_mark_object>
- class G1CLDClosure : public CLDClosure {
- G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;
- G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;
- G1KlassScanClosure _klass_in_cld_closure;
- bool _claim;
-
- public:
- G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
- bool only_young, bool claim)
- : _oop_closure(oop_closure),
- _oop_in_klass_closure(oop_closure->g1(),
- oop_closure->pss(),
- oop_closure->rp()),
- _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
- _claim(claim) {
-
- }
-
- void do_cld(ClassLoaderData* cld) {
- cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
- }
- };
-
- class G1CodeBlobClosure: public CodeBlobClosure {
- OopClosure* _f;
-
- public:
- G1CodeBlobClosure(OopClosure* f) : _f(f) {}
- void do_code_blob(CodeBlob* blob) {
- nmethod* that = blob->as_nmethod_or_null();
- if (that != NULL) {
- if (!that->test_set_oops_do_mark()) {
- that->oops_do(_f);
- that->fix_oop_relocations();
- }
- }
- }
- };
-
void work(uint worker_id) {
if (worker_id >= _n_workers) return; // no work needed this round
@@ -4808,62 +4763,40 @@
pss.set_evac_failure_closure(&evac_failure_cl);
- bool only_young = _g1h->g1_policy()->gcs_are_young();
-
- // Non-IM young GC.
- G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
- G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
- only_young, // Only process dirty klasses.
- false); // No need to claim CLDs.
- // IM young GC.
- // Strong roots closures.
- G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
- G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
- false, // Process all klasses.
- true); // Need to claim CLDs.
- // Weak roots closures.
- G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
- G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
- false, // Process all klasses.
- true); // Need to claim CLDs.
-
- G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
- G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
- // IM Weak code roots are handled later.
-
- OopClosure* strong_root_cl;
- OopClosure* weak_root_cl;
- CLDClosure* strong_cld_cl;
- CLDClosure* weak_cld_cl;
- CodeBlobClosure* strong_code_cl;
+ G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
+ G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp);
+
+ G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
+ G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
+
+ bool only_young = _g1h->g1_policy()->gcs_are_young();
+ G1KlassScanClosure scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
+ G1KlassScanClosure only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
+
+ OopClosure* scan_root_cl = &only_scan_root_cl;
+ G1KlassScanClosure* scan_klasses_cl = &only_scan_klasses_cl_s;
if (_g1h->g1_policy()->during_initial_mark_pause()) {
// We also need to mark copied objects.
- strong_root_cl = &scan_mark_root_cl;
- weak_root_cl = &scan_mark_weak_root_cl;
- strong_cld_cl = &scan_mark_cld_cl;
- weak_cld_cl = &scan_mark_weak_cld_cl;
- strong_code_cl = &scan_mark_code_cl;
- } else {
- strong_root_cl = &scan_only_root_cl;
- weak_root_cl = &scan_only_root_cl;
- strong_cld_cl = &scan_only_cld_cl;
- weak_cld_cl = &scan_only_cld_cl;
- strong_code_cl = &scan_only_code_cl;
+ scan_root_cl = &scan_mark_root_cl;
+ scan_klasses_cl = &scan_mark_klasses_cl_s;
}
-
- G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
+ G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
+
+ // Don't scan the scavengable methods in the code cache as part
+ // of strong root scanning. The code roots that point into a
+ // region in the collection set are scanned when we scan the
+ // region's RSet.
+ int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
pss.start_strong_roots();
- _g1h->g1_process_roots(strong_root_cl,
- weak_root_cl,
- &push_heap_rs_cl,
- strong_cld_cl,
- weak_cld_cl,
- strong_code_cl,
- worker_id);
-
+ _g1h->g1_process_strong_roots(/* is scavenging */ true,
+ SharedHeap::ScanningOption(so),
+ scan_root_cl,
+ &push_heap_rs_cl,
+ scan_klasses_cl,
+ worker_id);
pss.end_strong_roots();
{
@@ -4901,31 +4834,24 @@
void
G1CollectedHeap::
-g1_process_roots(OopClosure* scan_non_heap_roots,
- OopClosure* scan_non_heap_weak_roots,
- OopsInHeapRegionClosure* scan_rs,
- CLDClosure* scan_strong_clds,
- CLDClosure* scan_weak_clds,
- CodeBlobClosure* scan_strong_code,
- uint worker_i) {
-
- // First scan the shared roots.
+g1_process_strong_roots(bool is_scavenging,
+ ScanningOption so,
+ OopClosure* scan_non_heap_roots,
+ OopsInHeapRegionClosure* scan_rs,
+ G1KlassScanClosure* scan_klasses,
+ uint worker_i) {
+
+ // First scan the strong roots
double ext_roots_start = os::elapsedTime();
double closure_app_time_sec = 0.0;
- bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
-
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
- BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
-
- process_roots(false, // no scoping; this is parallel code
- SharedHeap::SO_None,
- &buf_scan_non_heap_roots,
- &buf_scan_non_heap_weak_roots,
- scan_strong_clds,
- // Initial Mark handles the weak CLDs separately.
- (during_im ? NULL : scan_weak_clds),
- scan_strong_code);
+
+ process_strong_roots(false, // no scoping; this is parallel code
+ so,
+ &buf_scan_non_heap_roots,
+ scan_klasses
+ );
// Now the CM ref_processor roots.
if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
@@ -4936,21 +4862,10 @@
ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
}
- if (during_im) {
- // Barrier to make sure all workers passed
- // the strong CLD and strong nmethods phases.
- active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
-
- // Now take the complement of the strong CLDs.
- ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
- }
-
// Finish up any enqueued closure apps (attributed as object copy time).
buf_scan_non_heap_roots.done();
- buf_scan_non_heap_weak_roots.done();
-
- double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
- + buf_scan_non_heap_weak_roots.closure_app_seconds();
+
+ double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
@@ -4974,10 +4889,22 @@
}
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
+ // If this is an initial mark pause, and we're not scanning
+ // the entire code cache, we need to mark the oops in the
+ // strong code root lists for the regions that are not in
+ // the collection set.
+ // Note all threads participate in this set of root tasks.
+ double mark_strong_code_roots_ms = 0.0;
+ if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
+ double mark_strong_roots_start = os::elapsedTime();
+ mark_strong_code_roots(worker_i);
+ mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
+ }
+ g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
+
// Now scan the complement of the collection set.
- MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
-
- g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
+ CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
+ g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
_process_strong_tasks->all_tasks_completed();
}
@@ -4999,8 +4926,7 @@
bool _do_in_parallel;
public:
G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
- AbstractGangTask("String/Symbol Unlinking"),
- _is_alive(is_alive),
+ AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
_do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
@@ -5022,14 +4948,6 @@
guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
err_msg("claim value %d after unlink less than initial symbol table size %d",
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
-
- if (G1TraceStringSymbolTableScrubbing) {
- gclog_or_tty->print_cr("Cleaned string and symbol table, "
- "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
- "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
- strings_processed(), strings_removed(),
- symbols_processed(), symbols_removed());
- }
}
void work(uint worker_id) {
@@ -5065,279 +4983,12 @@
size_t symbols_removed() const { return (size_t)_symbols_removed; }
};
-class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
-private:
- static Monitor* _lock;
-
- BoolObjectClosure* const _is_alive;
- const bool _unloading_occurred;
- const uint _num_workers;
-
- // Variables used to claim nmethods.
- nmethod* _first_nmethod;
- volatile nmethod* _claimed_nmethod;
-
- // The list of nmethods that need to be processed by the second pass.
- volatile nmethod* _postponed_list;
- volatile uint _num_entered_barrier;
-
- public:
- G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
- _is_alive(is_alive),
- _unloading_occurred(unloading_occurred),
- _num_workers(num_workers),
- _first_nmethod(NULL),
- _claimed_nmethod(NULL),
- _postponed_list(NULL),
- _num_entered_barrier(0)
- {
- nmethod::increase_unloading_clock();
- _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
- _claimed_nmethod = (volatile nmethod*)_first_nmethod;
- }
-
- ~G1CodeCacheUnloadingTask() {
- CodeCache::verify_clean_inline_caches();
-
- CodeCache::set_needs_cache_clean(false);
- guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
-
- CodeCache::verify_icholder_relocations();
- }
-
- private:
- void add_to_postponed_list(nmethod* nm) {
- nmethod* old;
- do {
- old = (nmethod*)_postponed_list;
- nm->set_unloading_next(old);
- } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
- }
-
- void clean_nmethod(nmethod* nm) {
- bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
-
- if (postponed) {
- // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
- add_to_postponed_list(nm);
- }
-
- // Mark that this thread has been cleaned/unloaded.
- // After this call, it will be safe to ask if this nmethod was unloaded or not.
- nm->set_unloading_clock(nmethod::global_unloading_clock());
- }
-
- void clean_nmethod_postponed(nmethod* nm) {
- nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
- }
-
- static const int MaxClaimNmethods = 16;
-
- void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
- nmethod* first;
- nmethod* last;
-
- do {
- *num_claimed_nmethods = 0;
-
- first = last = (nmethod*)_claimed_nmethod;
-
- if (first != NULL) {
- for (int i = 0; i < MaxClaimNmethods; i++) {
- last = CodeCache::alive_nmethod(CodeCache::next(last));
-
- if (last == NULL) {
- break;
- }
-
- claimed_nmethods[i] = last;
- (*num_claimed_nmethods)++;
- }
- }
-
- } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
- }
-
- nmethod* claim_postponed_nmethod() {
- nmethod* claim;
- nmethod* next;
-
- do {
- claim = (nmethod*)_postponed_list;
- if (claim == NULL) {
- return NULL;
- }
-
- next = claim->unloading_next();
-
- } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
-
- return claim;
- }
-
- public:
- // Mark that we're done with the first pass of nmethod cleaning.
- void barrier_mark(uint worker_id) {
- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
- _num_entered_barrier++;
- if (_num_entered_barrier == _num_workers) {
- ml.notify_all();
- }
- }
-
- // See if we have to wait for the other workers to
- // finish their first-pass nmethod cleaning work.
- void barrier_wait(uint worker_id) {
- if (_num_entered_barrier < _num_workers) {
- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
- while (_num_entered_barrier < _num_workers) {
- ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
- }
- }
- }
-
- // Cleaning and unloading of nmethods. Some work has to be postponed
- // to the second pass, when we know which nmethods survive.
- void work_first_pass(uint worker_id) {
- // The first nmethods is claimed by the first worker.
- if (worker_id == 0 && _first_nmethod != NULL) {
- clean_nmethod(_first_nmethod);
- _first_nmethod = NULL;
- }
-
- int num_claimed_nmethods;
- nmethod* claimed_nmethods[MaxClaimNmethods];
-
- while (true) {
- claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
-
- if (num_claimed_nmethods == 0) {
- break;
- }
-
- for (int i = 0; i < num_claimed_nmethods; i++) {
- clean_nmethod(claimed_nmethods[i]);
- }
- }
- }
-
- void work_second_pass(uint worker_id) {
- nmethod* nm;
- // Take care of postponed nmethods.
- while ((nm = claim_postponed_nmethod()) != NULL) {
- clean_nmethod_postponed(nm);
- }
- }
-};
-
-Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
-
-class G1KlassCleaningTask : public StackObj {
- BoolObjectClosure* _is_alive;
- volatile jint _clean_klass_tree_claimed;
- ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
-
- public:
- G1KlassCleaningTask(BoolObjectClosure* is_alive) :
- _is_alive(is_alive),
- _clean_klass_tree_claimed(0),
- _klass_iterator() {
- }
-
- private:
- bool claim_clean_klass_tree_task() {
- if (_clean_klass_tree_claimed) {
- return false;
- }
-
- return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
- }
-
- InstanceKlass* claim_next_klass() {
- Klass* klass;
- do {
- klass =_klass_iterator.next_klass();
- } while (klass != NULL && !klass->oop_is_instance());
-
- return (InstanceKlass*)klass;
- }
-
-public:
-
- void clean_klass(InstanceKlass* ik) {
- ik->clean_implementors_list(_is_alive);
- ik->clean_method_data(_is_alive);
-
- // G1 specific cleanup work that has
- // been moved here to be done in parallel.
- ik->clean_dependent_nmethods();
- }
-
- void work() {
- ResourceMark rm;
-
- // One worker will clean the subklass/sibling klass tree.
- if (claim_clean_klass_tree_task()) {
- Klass::clean_subklass_tree(_is_alive);
- }
-
- // All workers will help cleaning the classes,
- InstanceKlass* klass;
- while ((klass = claim_next_klass()) != NULL) {
- clean_klass(klass);
- }
- }
-};
-
-// To minimize the remark pause times, the tasks below are done in parallel.
-class G1ParallelCleaningTask : public AbstractGangTask {
-private:
- G1StringSymbolTableUnlinkTask _string_symbol_task;
- G1CodeCacheUnloadingTask _code_cache_task;
- G1KlassCleaningTask _klass_cleaning_task;
-
-public:
- // The constructor is run in the VMThread.
- G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
- AbstractGangTask("Parallel Cleaning"),
- _string_symbol_task(is_alive, process_strings, process_symbols),
- _code_cache_task(num_workers, is_alive, unloading_occurred),
- _klass_cleaning_task(is_alive) {
- }
-
- // The parallel work done by all worker threads.
- void work(uint worker_id) {
- // Do first pass of code cache cleaning.
- _code_cache_task.work_first_pass(worker_id);
-
- // Let the threads, mark that the first pass is done.
- _code_cache_task.barrier_mark(worker_id);
-
- // Clean the Strings and Symbols.
- _string_symbol_task.work(worker_id);
-
- // Wait for all workers to finish the first code cache cleaning pass.
- _code_cache_task.barrier_wait(worker_id);
-
- // Do the second code cache cleaning work, which realize on
- // the liveness information gathered during the first pass.
- _code_cache_task.work_second_pass(worker_id);
-
- // Clean all klasses that were not unloaded.
- _klass_cleaning_task.work();
- }
-};
-
-
-void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
- bool process_strings,
- bool process_symbols,
- bool class_unloading_occurred) {
+void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
+ bool process_strings, bool process_symbols) {
uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
- workers()->active_workers() : 1);
-
- G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
- n_workers, class_unloading_occurred);
+ _g1h->workers()->active_workers() : 1);
+
+ G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
if (G1CollectedHeap::use_parallel_gc_threads()) {
set_par_threads(n_workers);
workers()->run_task(&g1_unlink_task);
@@ -5345,21 +4996,12 @@
} else {
g1_unlink_task.work(0);
}
-}
-
-void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
- bool process_strings, bool process_symbols) {
- {
- uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
- _g1h->workers()->active_workers() : 1);
- G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
- if (G1CollectedHeap::use_parallel_gc_threads()) {
- set_par_threads(n_workers);
- workers()->run_task(&g1_unlink_task);
- set_par_threads(0);
- } else {
- g1_unlink_task.work(0);
- }
+ if (G1TraceStringSymbolTableScrubbing) {
+ gclog_or_tty->print_cr("Cleaned string and symbol table, "
+ "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
+ "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
+ g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
+ g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
}
if (G1StringDedup::is_enabled()) {
@@ -5952,10 +5594,6 @@
{
StrongRootsScope srs(this);
- // InitialMark needs claim bits to keep track of the marked-through CLDs.
- if (g1_policy()->during_initial_mark_pause()) {
- ClassLoaderDataGraph::clear_claimed_marks();
- }
if (G1CollectedHeap::use_parallel_gc_threads()) {
// The individual threads will set their evac-failure closures.
@@ -6991,6 +6629,106 @@
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
}
+// Mark all the code roots that point into regions *not* in the
+// collection set.
+//
+// Note we do not want to use a "marking" CodeBlobToOopClosure while
+// walking the the code roots lists of regions not in the collection
+// set. Suppose we have an nmethod (M) that points to objects in two
+// separate regions - one in the collection set (R1) and one not (R2).
+// Using a "marking" CodeBlobToOopClosure here would result in "marking"
+// nmethod M when walking the code roots for R1. When we come to scan
+// the code roots for R2, we would see that M is already marked and it
+// would be skipped and the objects in R2 that are referenced from M
+// would not be evacuated.
+
+class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
+
+ class MarkStrongCodeRootOopClosure: public OopClosure {
+ ConcurrentMark* _cm;
+ HeapRegion* _hr;
+ uint _worker_id;
+
+ template <class T> void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ // Only mark objects in the region (which is assumed
+ // to be not in the collection set).
+ if (_hr->is_in(obj)) {
+ _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
+ }
+ }
+ }
+
+ public:
+ MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
+ _cm(cm), _hr(hr), _worker_id(worker_id) {
+ assert(!_hr->in_collection_set(), "sanity");
+ }
+
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+ void do_oop(oop* p) { do_oop_work(p); }
+ };
+
+ MarkStrongCodeRootOopClosure _oop_cl;
+
+public:
+ MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
+ _oop_cl(cm, hr, worker_id) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ nm->oops_do(&_oop_cl);
+ }
+ }
+};
+
+class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
+ G1CollectedHeap* _g1h;
+ uint _worker_id;
+
+public:
+ MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
+ _g1h(g1h), _worker_id(worker_id) {}
+
+ bool doHeapRegion(HeapRegion *hr) {
+ HeapRegionRemSet* hrrs = hr->rem_set();
+ if (hr->continuesHumongous()) {
+ // Code roots should never be attached to a continuation of a humongous region
+ assert(hrrs->strong_code_roots_list_length() == 0,
+ err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
+ " starting at "HR_FORMAT", but has "SIZE_FORMAT,
+ HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
+ hrrs->strong_code_roots_list_length()));
+ return false;
+ }
+
+ if (hr->in_collection_set()) {
+ // Don't mark code roots into regions in the collection set here.
+ // They will be marked when we scan them.
+ return false;
+ }
+
+ MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
+ hr->strong_code_roots_do(&cb_cl);
+ return false;
+ }
+};
+
+void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
+ MarkStrongCodeRootsHRClosure cl(this, worker_id);
+ if (G1CollectedHeap::use_parallel_gc_threads()) {
+ heap_region_par_iterate_chunked(&cl,
+ worker_id,
+ workers()->active_workers(),
+ HeapRegion::ParMarkRootClaimValue);
+ } else {
+ heap_region_iterate(&cl);
+ }
+}
+
class RebuildStrongCodeRootClosure: public CodeBlobClosure {
G1CollectedHeap* _g1h;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -210,7 +210,6 @@
class RefineCardTableEntryClosure;
class G1CollectedHeap : public SharedHeap {
- friend class VM_CollectForMetadataAllocation;
friend class VM_G1CollectForAllocation;
friend class VM_G1CollectFull;
friend class VM_G1IncCollectionPause;
@@ -220,7 +219,7 @@
friend class OldGCAllocRegion;
// Closures used in implementation.
- template <G1Barrier barrier, G1Mark do_mark_object>
+ template <G1Barrier barrier, bool do_mark_object>
friend class G1ParCopyClosure;
friend class G1IsAliveClosure;
friend class G1EvacuateFollowersClosure;
@@ -347,9 +346,6 @@
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
- // Setup the retained old gc alloc region as the currrent old gc alloc region.
- void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
-
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
@@ -831,13 +827,12 @@
// param is for use with parallel roots processing, and should be
// the "i" of the calling parallel worker thread's work(i) function.
// In the sequential case this param will be ignored.
- void g1_process_roots(OopClosure* scan_non_heap_roots,
- OopClosure* scan_non_heap_weak_roots,
- OopsInHeapRegionClosure* scan_rs,
- CLDClosure* scan_strong_clds,
- CLDClosure* scan_weak_clds,
- CodeBlobClosure* scan_strong_code,
- uint worker_i);
+ void g1_process_strong_roots(bool is_scavenging,
+ ScanningOption so,
+ OopClosure* scan_non_heap_roots,
+ OopsInHeapRegionClosure* scan_rs,
+ G1KlassScanClosure* scan_klasses,
+ uint worker_i);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
@@ -1030,7 +1025,7 @@
// of G1CollectedHeap::_gc_time_stamp.
unsigned int* _worker_cset_start_region_time_stamp;
- enum G1H_process_roots_tasks {
+ enum G1H_process_strong_roots_tasks {
G1H_PS_filter_satb_buffers,
G1H_PS_refProcessor_oops_do,
// Leave this one last.
@@ -1612,6 +1607,10 @@
// Free up superfluous code root memory.
void purge_code_root_memory();
+ // During an initial mark pause, mark all the code roots that
+ // point into regions *not* in the collection set.
+ void mark_strong_code_roots(uint worker_id);
+
// Rebuild the strong code root lists for each region
// after a full GC.
void rebuild_strong_code_roots();
@@ -1620,9 +1619,6 @@
// in symbol table, possibly in parallel.
void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
- // Parallel phase of unloading/cleaning after G1 concurrent mark.
- void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
-
// Redirty logged cards in the refinement queue.
void redirty_logged_cards();
// Verification
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -71,9 +71,6 @@
bool _during_initial_mark;
bool _during_conc_mark;
uint _worker_id;
- HeapWord* _end_of_last_gap;
- HeapWord* _last_gap_threshold;
- HeapWord* _last_obj_threshold;
public:
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
@@ -86,10 +83,7 @@
_update_rset_cl(update_rset_cl),
_during_initial_mark(during_initial_mark),
_during_conc_mark(during_conc_mark),
- _worker_id(worker_id),
- _end_of_last_gap(hr->bottom()),
- _last_gap_threshold(hr->bottom()),
- _last_obj_threshold(hr->bottom()) { }
+ _worker_id(worker_id) { }
size_t marked_bytes() { return _marked_bytes; }
@@ -113,12 +107,7 @@
HeapWord* obj_addr = (HeapWord*) obj;
assert(_hr->is_in(obj_addr), "sanity");
size_t obj_size = obj->size();
- HeapWord* obj_end = obj_addr + obj_size;
-
- if (_end_of_last_gap != obj_addr) {
- // there was a gap before obj_addr
- _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
- }
+ _hr->update_bot_for_object(obj_addr, obj_size);
if (obj->is_forwarded() && obj->forwardee() == obj) {
// The object failed to move.
@@ -126,9 +115,7 @@
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
- if (!_cm->isPrevMarked(obj)) {
- _cm->markPrev(obj);
- }
+ _cm->markPrev(obj);
if (_during_initial_mark) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
@@ -158,18 +145,13 @@
// remembered set entries missing given that we skipped cards on
// the collection set. So, we'll recreate such entries now.
obj->oop_iterate(_update_rset_cl);
+ assert(_cm->isPrevMarked(obj), "Should be marked!");
} else {
-
// The object has been either evacuated or is dead. Fill it with a
// dummy object.
- MemRegion mr(obj_addr, obj_size);
+ MemRegion mr((HeapWord*) obj, obj_size);
CollectedHeap::fill_with_object(mr);
-
- // must nuke all dead objects which we skipped when iterating over the region
- _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
}
- _end_of_last_gap = obj_end;
- _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
}
};
@@ -200,6 +182,13 @@
during_conc_mark,
_worker_id);
+ MemRegion mr(hr->bottom(), hr->end());
+ // We'll recreate the prev marking info so we'll first clear
+ // the prev bitmap range for this region. We never mark any
+ // CSet objects explicitly so the next bitmap range should be
+ // cleared anyway.
+ _cm->clearRangePrevBitmap(mr);
+
hr->note_self_forwarding_removal_start(during_initial_mark,
during_conc_mark);
_g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -167,6 +167,7 @@
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
_last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
+ _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
@@ -193,6 +194,7 @@
_last_update_rs_processed_buffers.reset();
_last_scan_rs_times_ms.reset();
_last_strong_code_root_scan_times_ms.reset();
+ _last_strong_code_root_mark_times_ms.reset();
_last_obj_copy_times_ms.reset();
_last_termination_times_ms.reset();
_last_termination_attempts.reset();
@@ -213,6 +215,7 @@
_last_update_rs_processed_buffers.verify();
_last_scan_rs_times_ms.verify();
_last_strong_code_root_scan_times_ms.verify();
+ _last_strong_code_root_mark_times_ms.verify();
_last_obj_copy_times_ms.verify();
_last_termination_times_ms.verify();
_last_termination_attempts.verify();
@@ -227,6 +230,7 @@
_last_update_rs_times_ms.get(i) +
_last_scan_rs_times_ms.get(i) +
_last_strong_code_root_scan_times_ms.get(i) +
+ _last_strong_code_root_mark_times_ms.get(i) +
_last_obj_copy_times_ms.get(i) +
_last_termination_times_ms.get(i);
@@ -298,6 +302,9 @@
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
}
+ if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
+ _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
+ }
_last_update_rs_times_ms.print(2, "Update RS (ms)");
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
@@ -315,6 +322,9 @@
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
}
+ if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
+ _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
+ }
_last_update_rs_times_ms.print(1, "Update RS (ms)");
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -120,6 +120,7 @@
WorkerDataArray<int> _last_update_rs_processed_buffers;
WorkerDataArray<double> _last_scan_rs_times_ms;
WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
+ WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
WorkerDataArray<double> _last_obj_copy_times_ms;
WorkerDataArray<double> _last_termination_times_ms;
WorkerDataArray<size_t> _last_termination_attempts;
@@ -198,6 +199,10 @@
_last_strong_code_root_scan_times_ms.set(worker_i, ms);
}
+ void record_strong_code_root_mark_time(uint worker_i, double ms) {
+ _last_strong_code_root_mark_times_ms.set(worker_i, ms);
+ }
+
void record_obj_copy_time(uint worker_i, double ms) {
_last_obj_copy_times_ms.set(worker_i, ms);
}
@@ -364,6 +369,10 @@
return _last_strong_code_root_scan_times_ms.average();
}
+ double average_last_strong_code_root_mark_time(){
+ return _last_strong_code_root_mark_times_ms.average();
+ }
+
double average_last_obj_copy_time() {
return _last_obj_copy_times_ms.average();
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -129,15 +129,13 @@
SharedHeap* sh = SharedHeap::heap();
- // Need cleared claim bits for the roots processing
+ // Need cleared claim bits for the strong roots processing
ClassLoaderDataGraph::clear_claimed_marks();
- MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
- sh->process_strong_roots(true, // activate StrongRootsScope
- SharedHeap::SO_None,
+ sh->process_strong_roots(true, // activate StrongRootsScope
+ SharedHeap::SO_SystemClasses,
&GenMarkSweep::follow_root_closure,
- &GenMarkSweep::follow_cld_closure,
- &follow_code_closure);
+ &GenMarkSweep::follow_klass_closure);
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
@@ -306,15 +304,13 @@
SharedHeap* sh = SharedHeap::heap();
- // Need cleared claim bits for the roots processing
+ // Need cleared claim bits for the strong roots processing
ClassLoaderDataGraph::clear_claimed_marks();
- CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
- sh->process_all_roots(true, // activate StrongRootsScope
- SharedHeap::SO_AllCodeCache,
- &GenMarkSweep::adjust_pointer_closure,
- &GenMarkSweep::adjust_cld_closure,
- &adjust_code_closure);
+ sh->process_strong_roots(true, // activate StrongRootsScope
+ SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
+ &GenMarkSweep::adjust_pointer_closure,
+ &GenMarkSweep::adjust_klass_closure);
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -25,8 +25,6 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
-#include "memory/iterator.hpp"
-
class HeapRegion;
class G1CollectedHeap;
class G1RemSet;
@@ -108,7 +106,7 @@
template <class T> void do_klass_barrier(T* p, oop new_obj);
};
-template <G1Barrier barrier, G1Mark do_mark_object>
+template <G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper {
private:
template <class T> void do_oop_work(T* p);
@@ -123,19 +121,19 @@
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-
- G1CollectedHeap* g1() { return _g1; };
- G1ParScanThreadState* pss() { return _par_scan_state; }
- ReferenceProcessor* rp() { return _ref_processor; };
};
-typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
+
+
+typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
+
// We use a separate closure to handle references during evacuation
// failure processing.
-typedef G1ParCopyClosure<G1BarrierEvac, G1MarkNone> G1ParScanHeapEvacFailureClosure;
+typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
class FilterIntoCSClosure: public ExtendedOopClosure {
G1CollectedHeap* _g1;
@@ -166,11 +164,10 @@
};
// Closure for iterating over object fields during concurrent marking
-class G1CMOopClosure : public MetadataAwareOopClosure {
-protected:
- ConcurrentMark* _cm;
+class G1CMOopClosure : public ExtendedOopClosure {
private:
G1CollectedHeap* _g1h;
+ ConcurrentMark* _cm;
CMTask* _task;
public:
G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
@@ -180,7 +177,7 @@
};
// Closure to scan the root regions during concurrent marking
-class G1RootRegionScanClosure : public MetadataAwareOopClosure {
+class G1RootRegionScanClosure : public ExtendedOopClosure {
private:
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -32,7 +32,6 @@
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "memory/iterator.inline.hpp"
#include "runtime/prefetch.inline.hpp"
/*
@@ -109,6 +108,10 @@
template <class T>
inline void G1CMOopClosure::do_oop_nv(T* p) {
+ assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
+ assert(!_g1h->is_on_master_free_list(
+ _g1h->heap_region_containing((HeapWord*) p)), "invariant");
+
oop obj = oopDesc::load_decode_heap_oop(p);
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] we're looking at location "
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -30,21 +30,14 @@
// non-virtually, using a mechanism defined in this file. Extend these
// macros in the obvious way to add specializations for new closures.
+// Forward declarations.
enum G1Barrier {
G1BarrierNone,
G1BarrierEvac,
G1BarrierKlass
};
-enum G1Mark {
- G1MarkNone,
- G1MarkFromRoot,
- G1MarkPromotedFromRoot
-};
-
-// Forward declarations.
-
-template<G1Barrier barrier, G1Mark do_mark_object>
+template<G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure;
class G1ParScanClosure;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -400,6 +400,7 @@
// We always recreate the prev marking info and we'll explicitly
// mark all objects we find to be self-forwarded on the prev
// bitmap. So all objects need to be below PTAMS.
+ _prev_top_at_mark_start = top();
_prev_marked_bytes = 0;
if (during_initial_mark) {
@@ -423,7 +424,6 @@
assert(0 <= marked_bytes && marked_bytes <= used(),
err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
marked_bytes, used()));
- _prev_top_at_mark_start = top();
_prev_marked_bytes = marked_bytes;
}
@@ -905,8 +905,7 @@
size_t obj_size = block_size(p);
object_num += 1;
- if (is_humongous != g1->isHumongous(obj_size) &&
- !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
+ if (is_humongous != g1->isHumongous(obj_size)) {
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
SIZE_FORMAT" words) in a %shumongous region",
p, g1->isHumongous(obj_size) ? "" : "non-",
@@ -917,9 +916,7 @@
// If it returns false, verify_for_object() will output the
// appropriate messasge.
- if (do_bot_verify &&
- !g1->is_obj_dead(obj, this) &&
- !_offsets.verify_for_object(p, obj_size)) {
+ if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
*failures = true;
return;
}
@@ -927,10 +924,7 @@
if (!g1->is_obj_dead_cond(obj, this, vo)) {
if (obj->is_oop()) {
Klass* klass = obj->klass();
- bool is_metaspace_object = Metaspace::contains(klass) ||
- (vo == VerifyOption_G1UsePrevMarking &&
- ClassLoaderDataGraph::unload_list_contains(klass));
- if (!is_metaspace_object) {
+ if (!klass->is_metaspace_object()) {
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
"not metadata", klass, (void *)obj);
*failures = true;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -247,9 +247,11 @@
bool _evacuation_failed;
// A heap region may be a member one of a number of special subsets, each
- // represented as linked lists through the field below. Currently, there
- // is only one set:
+ // represented as linked lists through the field below. Currently, these
+ // sets include:
// The collection set.
+ // The set of allocation regions used in a collection pause.
+ // Spaces that may contain gray objects.
HeapRegion* _next_in_special_set;
// next region in the young "generation" region set
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -93,27 +93,18 @@
inline bool
HeapRegion::block_is_obj(const HeapWord* p) const {
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- return !g1h->is_obj_dead(oop(p), this);
+ return p < top();
}
inline size_t
HeapRegion::block_size(const HeapWord *addr) const {
- // Old regions' dead objects may have dead classes
- // We need to find the next live object in some other
- // manner than getting the oop size
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- if (g1h->is_obj_dead(oop(addr), this)) {
- HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
- getNextMarkedWordAddress(addr, prev_top_at_mark_start());
-
- assert(next > addr, "must get the next live object");
-
- return pointer_delta(next, addr);
- } else if (addr == top()) {
+ const HeapWord* current_top = top();
+ if (addr < current_top) {
+ return oop(addr)->size();
+ } else {
+ assert(addr == current_top, "just checking");
return pointer_delta(end(), addr);
}
- return oop(addr)->size();
}
inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -931,10 +931,7 @@
void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
- assert_locked_or_safepoint(CodeCache_lock);
-
- _code_roots.remove_lock_free(nm);
-
+ _code_roots.remove(nm);
// Check that there were no duplicates
guarantee(!_code_roots.contains(nm), "duplicate entry found");
}
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -285,6 +285,37 @@
_par_closures[i] = par_closure;
}
+void SATBMarkQueueSet::iterate_closure_all_threads() {
+ for(JavaThread* t = Threads::first(); t; t = t->next()) {
+ t->satb_mark_queue().apply_closure_and_empty(_closure);
+ }
+ shared_satb_queue()->apply_closure_and_empty(_closure);
+}
+
+void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
+ SharedHeap* sh = SharedHeap::heap();
+ int parity = sh->strong_roots_parity();
+
+ for(JavaThread* t = Threads::first(); t; t = t->next()) {
+ if (t->claim_oops_do(true, parity)) {
+ t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
+ }
+ }
+
+ // We also need to claim the VMThread so that its parity is updated
+ // otherwise the next call to Thread::possibly_parallel_oops_do inside
+ // a StrongRootsScope might skip the VMThread because it has a stale
+ // parity that matches the parity set by the StrongRootsScope
+ //
+ // Whichever worker succeeds in claiming the VMThread gets to do
+ // the shared queue.
+
+ VMThread* vmt = VMThread::vm_thread();
+ if (vmt->claim_oops_do(true, parity)) {
+ shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
+ }
+}
+
bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
uint worker) {
BufferNode* nd = NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -33,9 +33,7 @@
// A ptrQueue whose elements are "oops", pointers to object heads.
class ObjPtrQueue: public PtrQueue {
- friend class Threads;
friend class SATBMarkQueueSet;
- friend class G1RemarkThreadsClosure;
private:
// Filter out unwanted entries from the buffer.
@@ -121,6 +119,13 @@
// closures, one for each parallel GC thread.
void set_par_closure(int i, ObjectClosure* closure);
+ // Apply the registered closure to all entries on each
+ // currently-active buffer and then empty the buffer. It should only
+ // be called serially and at a safepoint.
+ void iterate_closure_all_threads();
+ // Parallel version of the above.
+ void par_iterate_closure_all_threads(uint worker);
+
// If there exists some completed buffer, pop it, then apply the
// registered closure to all its elements, and return true. If no
// completed buffers exist, return false.
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -614,21 +614,18 @@
KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
gch->rem_set()->klass_rem_set());
- CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
- &par_scan_state.to_space_root_closure(),
- false);
+
+ int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
par_scan_state.start_strong_roots();
- gch->gen_process_roots(_gen->level(),
- true, // Process younger gens, if any,
- // as strong roots.
- false, // no scope; this is parallel code
- SharedHeap::SO_ScavengeCodeCache,
- GenCollectedHeap::StrongAndWeakRoots,
- &par_scan_state.to_space_root_closure(),
- &par_scan_state.older_gen_closure(),
- &cld_scan_closure);
-
+ gch->gen_process_strong_roots(_gen->level(),
+ true, // Process younger gens, if any,
+ // as strong roots.
+ false, // no scope; this is parallel code
+ SharedHeap::ScanningOption(so),
+ &par_scan_state.to_space_root_closure(),
+ &par_scan_state.older_gen_closure(),
+ &klass_scan_closure);
par_scan_state.end_strong_roots();
// "evacuate followers".
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -69,7 +69,7 @@
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
- // One of these two will be passed to process_roots, which will
+ // One of these two will be passed to process_strong_roots, which will
// set its generation. The first is for two-gen configs where the
// old gen collects the perm gen; the second is for arbitrary configs.
// The second isn't used right now (it used to be used for the train, an
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -59,7 +59,7 @@
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
- MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
+ CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
if (_java_thread != NULL)
_java_thread->oops_do(
@@ -100,7 +100,7 @@
case threads:
{
ResourceMark rm;
- MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
+ CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -536,14 +536,14 @@
Universe::oops_do(mark_and_push_closure());
JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
- MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
+ CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
ObjectSynchronizer::oops_do(mark_and_push_closure());
FlatProfiler::oops_do(mark_and_push_closure());
Management::oops_do(mark_and_push_closure());
JvmtiExport::oops_do(mark_and_push_closure());
SystemDictionary::always_strong_oops_do(mark_and_push_closure());
- ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
+ ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
}
@@ -633,16 +633,16 @@
FlatProfiler::oops_do(adjust_pointer_closure());
Management::oops_do(adjust_pointer_closure());
JvmtiExport::oops_do(adjust_pointer_closure());
+ // SO_AllClasses
SystemDictionary::oops_do(adjust_pointer_closure());
- ClassLoaderDataGraph::cld_do(adjust_cld_closure());
+ ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
// Global (weak) JNI handles
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
- CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
- CodeCache::blobs_do(&adjust_from_blobs);
+ CodeCache::oops_do(adjust_pointer_closure());
StringTable::oops_do(adjust_pointer_closure());
ref_processor()->weak_oops_do(adjust_pointer_closure());
PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -40,11 +40,11 @@
static CollectorCounters* _counters;
// Closure accessors
- static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
- static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
- static CLDClosure* follow_cld_closure() { return &MarkSweep::follow_cld_closure; }
- static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
- static CLDClosure* adjust_cld_closure() { return &MarkSweep::adjust_cld_closure; }
+ static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
+ static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
+ static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
+ static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
+ static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
debug_only(public:) // Used for PSParallelCompact debugging
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -2474,6 +2474,7 @@
FlatProfiler::oops_do(adjust_pointer_closure());
Management::oops_do(adjust_pointer_closure());
JvmtiExport::oops_do(adjust_pointer_closure());
+ // SO_AllClasses
SystemDictionary::oops_do(adjust_pointer_closure());
ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
@@ -2482,8 +2483,7 @@
// Global (weak) JNI handles
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
- CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
- CodeCache::blobs_do(&adjust_from_blobs);
+ CodeCache::oops_do(adjust_pointer_closure());
StringTable::oops_do(adjust_pointer_closure());
ref_processor()->weak_oops_do(adjust_pointer_closure());
// Roots were visited so references into the young gen in roots
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -100,7 +100,7 @@
case code_cache:
{
- MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
+ CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true);
CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
}
break;
@@ -123,7 +123,7 @@
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
PSScavengeRootsClosure roots_closure(pm);
CLDClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited.
- MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
+ CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
if (_java_thread != NULL)
_java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -54,14 +54,21 @@
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
-CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure);
-CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
+MarkSweep::FollowKlassClosure MarkSweep::follow_klass_closure;
+MarkSweep::AdjustKlassClosure MarkSweep::adjust_klass_closure;
void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
+void MarkSweep::FollowKlassClosure::do_klass(Klass* klass) {
+ klass->oops_do(&MarkSweep::mark_and_push_closure);
+}
+void MarkSweep::AdjustKlassClosure::do_klass(Klass* klass) {
+ klass->oops_do(&MarkSweep::adjust_pointer_closure);
+}
+
void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
- MarkSweep::follow_cld_closure.do_cld(cld);
+ cld->oops_do(&MarkSweep::mark_and_push_closure, &MarkSweep::follow_klass_closure, true);
}
void MarkSweep::follow_stack() {
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -65,6 +65,17 @@
virtual void do_oop(narrowOop* p);
};
+ // The one and only place to start following the classes.
+ // Should only be applied to the ClassLoaderData klasses list.
+ class FollowKlassClosure : public KlassClosure {
+ public:
+ void do_klass(Klass* klass);
+ };
+ class AdjustKlassClosure : public KlassClosure {
+ public:
+ void do_klass(Klass* klass);
+ };
+
class FollowStackClosure: public VoidClosure {
public:
virtual void do_void();
@@ -133,10 +144,10 @@
static IsAliveClosure is_alive;
static FollowRootClosure follow_root_closure;
static MarkAndPushClosure mark_and_push_closure;
+ static FollowKlassClosure follow_klass_closure;
static FollowStackClosure follow_stack_closure;
- static CLDToOopClosure follow_cld_closure;
static AdjustPointerClosure adjust_pointer_closure;
- static CLDToOopClosure adjust_cld_closure;
+ static AdjustKlassClosure adjust_klass_closure;
// Accessors
static uint total_invocations() { return _total_invocations; }
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -195,43 +195,6 @@
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
}
-bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
-#if INCLUDE_ALL_GCS
- if (UseConcMarkSweepGC || UseG1GC) {
- if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
- MetaspaceGC::set_should_concurrent_collect(true);
- } else if (UseG1GC) {
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- g1h->g1_policy()->set_initiate_conc_mark_if_possible();
-
- GCCauseSetter x(g1h, _gc_cause);
-
- // At this point we are supposed to start a concurrent cycle. We
- // will do so if one is not already in progress.
- bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
-
- if (should_start) {
- double pause_target = g1h->g1_policy()->max_pause_time_ms();
- g1h->do_collection_pause_at_safepoint(pause_target);
- }
- }
-
- return true;
- }
-#endif
- return false;
-}
-
-static void log_metaspace_alloc_failure_for_concurrent_GC() {
- if (Verbose && PrintGCDetails) {
- if (UseConcMarkSweepGC) {
- gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
- } else if (UseG1GC) {
- gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
- }
- }
-}
-
void VM_CollectForMetadataAllocation::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
@@ -243,57 +206,54 @@
// a GC that freed space for the allocation.
if (!MetadataAllocationFailALot) {
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
+ }
+
+ if (_result == NULL) {
+ if (UseConcMarkSweepGC) {
+ if (CMSClassUnloadingEnabled) {
+ MetaspaceGC::set_should_concurrent_collect(true);
+ }
+ // For CMS expand since the collection is going to be concurrent.
+ _result =
+ _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+ }
+ if (_result == NULL) {
+ // Don't clear the soft refs yet.
+ if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
+ gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
+ }
+ heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
+ // After a GC try to allocate without expanding. Could fail
+ // and expansion will be tried below.
+ _result =
+ _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+ }
+ if (_result == NULL) {
+ // If still failing, allow the Metaspace to expand.
+ // See delta_capacity_until_GC() for explanation of the
+ // amount of the expansion.
+ // This should work unless there really is no more space
+ // or a MaxMetaspaceSize has been specified on the command line.
+ _result =
+ _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+ if (_result == NULL) {
+ // If expansion failed, do a last-ditch collection and try allocating
+ // again. A last-ditch collection will clear softrefs. This
+ // behavior is similar to the last-ditch collection done for perm
+ // gen when it was full and a collection for failed allocation
+ // did not free perm gen space.
+ heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
+ _result =
+ _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+ }
+ }
+ if (Verbose && PrintGCDetails && _result == NULL) {
+ gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
+ SIZE_FORMAT, _size);
}
}
- if (initiate_concurrent_GC()) {
- // For CMS and G1 expand since the collection is going to be concurrent.
- _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- log_metaspace_alloc_failure_for_concurrent_GC();
- }
-
- // Don't clear the soft refs yet.
- heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
- // After a GC try to allocate without expanding. Could fail
- // and expansion will be tried below.
- _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- // If still failing, allow the Metaspace to expand.
- // See delta_capacity_until_GC() for explanation of the
- // amount of the expansion.
- // This should work unless there really is no more space
- // or a MaxMetaspaceSize has been specified on the command line.
- _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- // If expansion failed, do a last-ditch collection and try allocating
- // again. A last-ditch collection will clear softrefs. This
- // behavior is similar to the last-ditch collection done for perm
- // gen when it was full and a collection for failed allocation
- // did not free perm gen space.
- heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
- _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- if (Verbose && PrintGCDetails) {
- gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
- SIZE_FORMAT, _size);
- }
-
- if (GC_locker::is_active_and_needs_gc()) {
+ if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
}
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -217,8 +217,6 @@
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
virtual void doit();
MetaWord* result() const { return _result; }
-
- bool initiate_concurrent_GC();
};
class SvcGCMarker : public StackObj {
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -429,7 +429,7 @@
OopsInGenClosure* cl,
CardTableRS* ct) {
if (!mr.is_empty()) {
- // Caller (process_roots()) claims that all GC threads
+ // Caller (process_strong_roots()) claims that all GC threads
// execute this call. With UseDynamicNumberOfGCThreads now all
// active GC threads execute this call. The number of active GC
// threads needs to be passed to par_non_clean_card_iterate_work()
@@ -438,7 +438,7 @@
// This is an example of where n_par_threads() is used instead
// of workers()->active_workers(). n_par_threads can be set to 0 to
// turn off parallelism. For example when this code is called as
- // part of verification and SharedHeap::process_roots() is being
+ // part of verification and SharedHeap::process_strong_roots() is being
// used, then n_par_threads() may have been set to 0. active_workers
// is not overloaded with the meaning that it is a switch to disable
// parallelism and so keeps the meaning of the number of
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -614,9 +614,6 @@
KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
gch->rem_set()->klass_rem_set());
- CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
- &fsc_with_no_gc_barrier,
- false);
set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
@@ -626,15 +623,16 @@
assert(gch->no_allocs_since_save_marks(0),
"save marks have not been newly set.");
- gch->gen_process_roots(_level,
- true, // Process younger gens, if any,
- // as strong roots.
- true, // activate StrongRootsScope
- SharedHeap::SO_ScavengeCodeCache,
- GenCollectedHeap::StrongAndWeakRoots,
- &fsc_with_no_gc_barrier,
- &fsc_with_gc_barrier,
- &cld_scan_closure);
+ int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
+
+ gch->gen_process_strong_roots(_level,
+ true, // Process younger gens, if any,
+ // as strong roots.
+ true, // activate StrongRootsScope
+ SharedHeap::ScanningOption(so),
+ &fsc_with_no_gc_barrier,
+ &fsc_with_gc_barrier,
+ &klass_scan_closure);
// "evacuate followers".
evacuate_followers.do_void();
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -61,8 +61,8 @@
GenCollectedHeap* GenCollectedHeap::_gch;
NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
-// The set of potentially parallel tasks in root scanning.
-enum GCH_strong_roots_tasks {
+// The set of potentially parallel tasks in strong root scanning.
+enum GCH_process_strong_roots_tasks {
// We probably want to parallelize both of these internally, but for now...
GCH_PS_younger_gens,
// Leave this one last.
@@ -72,11 +72,11 @@
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
SharedHeap(policy),
_gen_policy(policy),
- _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
+ _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
_full_collections_completed(0)
{
- if (_gen_process_roots_tasks == NULL ||
- !_gen_process_roots_tasks->valid()) {
+ if (_gen_process_strong_tasks == NULL ||
+ !_gen_process_strong_tasks->valid()) {
vm_exit_during_initialization("Failed necessary allocation.");
}
assert(policy != NULL, "Sanity check");
@@ -584,29 +584,24 @@
void GenCollectedHeap::set_par_threads(uint t) {
SharedHeap::set_par_threads(t);
- _gen_process_roots_tasks->set_n_threads(t);
+ _gen_process_strong_tasks->set_n_threads(t);
}
void GenCollectedHeap::
-gen_process_roots(int level,
- bool younger_gens_as_roots,
- bool activate_scope,
- SharedHeap::ScanningOption so,
- OopsInGenClosure* not_older_gens,
- OopsInGenClosure* weak_roots,
- OopsInGenClosure* older_gens,
- CLDClosure* cld_closure,
- CLDClosure* weak_cld_closure,
- CodeBlobClosure* code_closure) {
+gen_process_strong_roots(int level,
+ bool younger_gens_as_roots,
+ bool activate_scope,
+ SharedHeap::ScanningOption so,
+ OopsInGenClosure* not_older_gens,
+ OopsInGenClosure* older_gens,
+ KlassClosure* klass_closure) {
+ // General strong roots.
- // General roots.
- SharedHeap::process_roots(activate_scope, so,
- not_older_gens, weak_roots,
- cld_closure, weak_cld_closure,
- code_closure);
+ SharedHeap::process_strong_roots(activate_scope, so,
+ not_older_gens, klass_closure);
if (younger_gens_as_roots) {
- if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+ if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
for (int i = 0; i < level; i++) {
not_older_gens->set_generation(_gens[i]);
_gens[i]->oop_iterate(not_older_gens);
@@ -622,38 +617,7 @@
older_gens->reset_generation();
}
- _gen_process_roots_tasks->all_tasks_completed();
-}
-
-void GenCollectedHeap::
-gen_process_roots(int level,
- bool younger_gens_as_roots,
- bool activate_scope,
- SharedHeap::ScanningOption so,
- bool only_strong_roots,
- OopsInGenClosure* not_older_gens,
- OopsInGenClosure* older_gens,
- CLDClosure* cld_closure) {
-
- const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
-
- bool is_moving_collection = false;
- if (level == 0 || is_adjust_phase) {
- // young collections are always moving
- is_moving_collection = true;
- }
-
- MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
- CodeBlobClosure* code_closure = &mark_code_closure;
-
- gen_process_roots(level,
- younger_gens_as_roots,
- activate_scope, so,
- not_older_gens, only_strong_roots ? NULL : not_older_gens,
- older_gens,
- cld_closure, only_strong_roots ? NULL : cld_closure,
- code_closure);
-
+ _gen_process_strong_tasks->all_tasks_completed();
}
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -78,9 +78,9 @@
unsigned int _full_collections_completed;
// Data structure for claiming the (potentially) parallel tasks in
- // (gen-specific) roots processing.
- SubTasksDone* _gen_process_roots_tasks;
- SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
+ // (gen-specific) strong roots processing.
+ SubTasksDone* _gen_process_strong_tasks;
+ SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; }
// In block contents verification, the number of header words to skip
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
@@ -403,30 +403,18 @@
// The "so" argument determines which of the roots
// the closure is applied to:
// "SO_None" does none;
- private:
- void gen_process_roots(int level,
- bool younger_gens_as_roots,
- bool activate_scope,
- SharedHeap::ScanningOption so,
- OopsInGenClosure* not_older_gens,
- OopsInGenClosure* weak_roots,
- OopsInGenClosure* older_gens,
- CLDClosure* cld_closure,
- CLDClosure* weak_cld_closure,
- CodeBlobClosure* code_closure);
-
- public:
- static const bool StrongAndWeakRoots = false;
- static const bool StrongRootsOnly = true;
-
- void gen_process_roots(int level,
- bool younger_gens_as_roots,
- bool activate_scope,
- SharedHeap::ScanningOption so,
- bool only_strong_roots,
- OopsInGenClosure* not_older_gens,
- OopsInGenClosure* older_gens,
- CLDClosure* cld_closure);
+ // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
+ // "SO_SystemClasses" to all the "system" classes and loaders;
+ // "SO_Strings" applies the closure to all entries in the StringTable.
+ void gen_process_strong_roots(int level,
+ bool younger_gens_as_roots,
+ // The remaining arguments are in an order
+ // consistent with SharedHeap::process_strong_roots:
+ bool activate_scope,
+ SharedHeap::ScanningOption so,
+ OopsInGenClosure* not_older_gens,
+ OopsInGenClosure* older_gens,
+ KlassClosure* klass_closure);
// Apply "root_closure" to all the weak roots of the system.
// These include JNI weak roots, string table,
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -207,14 +207,13 @@
// Need new claim bits before marking starts.
ClassLoaderDataGraph::clear_claimed_marks();
- gch->gen_process_roots(level,
- false, // Younger gens are not roots.
- true, // activate StrongRootsScope
- SharedHeap::SO_None,
- GenCollectedHeap::StrongRootsOnly,
- &follow_root_closure,
- &follow_root_closure,
- &follow_cld_closure);
+ gch->gen_process_strong_roots(level,
+ false, // Younger gens are not roots.
+ true, // activate StrongRootsScope
+ SharedHeap::SO_SystemClasses,
+ &follow_root_closure,
+ &follow_root_closure,
+ &follow_klass_closure);
// Process reference objects found during marking
{
@@ -292,14 +291,13 @@
// are run.
adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
- gch->gen_process_roots(level,
- false, // Younger gens are not roots.
- true, // activate StrongRootsScope
- SharedHeap::SO_AllCodeCache,
- GenCollectedHeap::StrongAndWeakRoots,
- &adjust_pointer_closure,
- &adjust_pointer_closure,
- &adjust_cld_closure);
+ gch->gen_process_strong_roots(level,
+ false, // Younger gens are not roots.
+ true, // activate StrongRootsScope
+ SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
+ &adjust_pointer_closure,
+ &adjust_pointer_closure,
+ &adjust_klass_closure);
gch->gen_process_weak_roots(&adjust_pointer_closure);
--- a/hotspot/src/share/vm/memory/iterator.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/iterator.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -35,10 +35,6 @@
cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
}
-void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
- cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
-}
-
void ObjectToOopClosure::do_object(oop obj) {
obj->oop_iterate(_cl);
}
@@ -47,20 +43,6 @@
ShouldNotCallThis();
}
-void CodeBlobToOopClosure::do_nmethod(nmethod* nm) {
- nm->oops_do(_cl);
- if (_fix_relocations) {
- nm->fix_oop_relocations();
- }
-}
-
-void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
- nmethod* nm = cb->as_nmethod_or_null();
- if (nm != NULL) {
- do_nmethod(nm);
- }
-}
-
MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
: _active(activate)
{
@@ -73,7 +55,32 @@
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
- if (nm != NULL && !nm->test_set_oops_do_mark()) {
- do_nmethod(nm);
+ if (nm == NULL) return;
+ if (!nm->test_set_oops_do_mark()) {
+ NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, 1st visit\n"));
+ do_newly_marked_nmethod(nm);
+ } else {
+ NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
}
}
+
+void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
+ nm->oops_do(_cl, /*allow_zombie=*/ false);
+}
+
+void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
+ if (!_do_marking) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL) nm->print_on(tty, "oops_do, unmarked visit\n"));
+ // This assert won't work, since there are lots of mini-passes
+ // (mostly in debug mode) that co-exist with marking phases.
+ //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
+ if (nm != NULL) {
+ nm->oops_do(_cl);
+ }
+ } else {
+ MarkingCodeBlobClosure::do_code_blob(cb);
+ }
+}
+
+
--- a/hotspot/src/share/vm/memory/iterator.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/iterator.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -70,8 +70,8 @@
//
// Providing default implementations of the _nv functions unfortunately
// removes the compile-time safeness, but reduces the clutter for the
- // ExtendedOopClosures that don't need to walk the metadata.
- // Currently, only CMS and G1 need these.
+ // ExtendedOopClosures that don't need to walk the metadata. Currently,
+ // only CMS needs these.
virtual bool do_metadata() { return do_metadata_nv(); }
bool do_metadata_v() { return do_metadata(); }
@@ -126,16 +126,15 @@
_oop_closure = oop_closure;
}
- public:
+public:
KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
-
virtual void do_klass(Klass* k);
};
class CLDToOopClosure : public CLDClosure {
- OopClosure* _oop_closure;
+ OopClosure* _oop_closure;
KlassToOopClosure _klass_closure;
- bool _must_claim_cld;
+ bool _must_claim_cld;
public:
CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
@@ -146,23 +145,6 @@
void do_cld(ClassLoaderData* cld);
};
-class CLDToKlassAndOopClosure : public CLDClosure {
- friend class SharedHeap;
- friend class G1CollectedHeap;
- protected:
- OopClosure* _oop_closure;
- KlassClosure* _klass_closure;
- bool _must_claim_cld;
- public:
- CLDToKlassAndOopClosure(KlassClosure* klass_closure,
- OopClosure* oop_closure,
- bool must_claim_cld) :
- _oop_closure(oop_closure),
- _klass_closure(klass_closure),
- _must_claim_cld(must_claim_cld) {}
- void do_cld(ClassLoaderData* cld);
-};
-
// The base class for all concurrent marking closures,
// that participates in class unloading.
// It's used to proxy through the metadata to the oops defined in them.
@@ -264,26 +246,14 @@
virtual void do_code_blob(CodeBlob* cb) = 0;
};
-// Applies an oop closure to all ref fields in code blobs
-// iterated over in an object iteration.
-class CodeBlobToOopClosure : public CodeBlobClosure {
- OopClosure* _cl;
- bool _fix_relocations;
- protected:
- void do_nmethod(nmethod* nm);
+
+class MarkingCodeBlobClosure : public CodeBlobClosure {
public:
- CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
- virtual void do_code_blob(CodeBlob* cb);
-
- const static bool FixRelocations = true;
-};
-
-class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
- public:
- MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
// Called for each code blob, but at most once per unique blob.
+ virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
virtual void do_code_blob(CodeBlob* cb);
+ // = { if (!nmethod(cb)->test_set_oops_do_mark()) do_newly_marked_nmethod(cb); }
class MarkScope : public StackObj {
protected:
@@ -296,6 +266,23 @@
};
};
+
+// Applies an oop closure to all ref fields in code blobs
+// iterated over in an object iteration.
+class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
+ OopClosure* _cl;
+ bool _do_marking;
+public:
+ virtual void do_newly_marked_nmethod(nmethod* cb);
+ // = { cb->oops_do(_cl); }
+ virtual void do_code_blob(CodeBlob* cb);
+ // = { if (_do_marking) super::do_code_blob(cb); else cb->oops_do(_cl); }
+ CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
+ : _cl(cl), _do_marking(do_marking) {}
+};
+
+
+
// MonitorClosure is used for iterating over monitors in the monitors cache
class ObjectMonitor;
--- a/hotspot/src/share/vm/memory/metadataFactory.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/metadataFactory.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_MEMORY_METADATAFACTORY_HPP
#define SHARE_VM_MEMORY_METADATAFACTORY_HPP
-#include "classfile/classLoaderData.hpp"
#include "utilities/array.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -29,7 +29,6 @@
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/java.hpp"
#include "services/management.hpp"
@@ -40,8 +39,8 @@
SharedHeap* SharedHeap::_sh;
-// The set of potentially parallel tasks in root scanning.
-enum SH_process_roots_tasks {
+// The set of potentially parallel tasks in strong root scanning.
+enum SH_process_strong_roots_tasks {
SH_PS_Universe_oops_do,
SH_PS_JNIHandles_oops_do,
SH_PS_ObjectSynchronizer_oops_do,
@@ -59,7 +58,6 @@
CollectedHeap(),
_collector_policy(policy_),
_rem_set(NULL),
- _strong_roots_scope(NULL),
_strong_roots_parity(0),
_process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
_workers(NULL)
@@ -116,19 +114,6 @@
static AssertNonScavengableClosure assert_is_non_scavengable_closure;
#endif
-SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
- return _strong_roots_scope;
-}
-void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
- assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
- assert(scope != NULL, "Illegal argument");
- _strong_roots_scope = scope;
-}
-void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
- assert(_strong_roots_scope == scope, "Wrong scope unregistered");
- _strong_roots_scope = NULL;
-}
-
void SharedHeap::change_strong_roots_parity() {
// Also set the new collection parity.
assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
@@ -139,161 +124,112 @@
"Not in range.");
}
-SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
- : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
+SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
+ : MarkScope(activate)
{
if (_active) {
- _sh->register_strong_roots_scope(this);
- _sh->change_strong_roots_parity();
+ outer->change_strong_roots_parity();
// Zero the claimed high water mark in the StringTable
StringTable::clear_parallel_claimed_index();
}
}
SharedHeap::StrongRootsScope::~StrongRootsScope() {
- if (_active) {
- _sh->unregister_strong_roots_scope(this);
- }
-}
-
-Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
-
-void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
- // The Thread work barrier is only needed by G1.
- // No need to use the barrier if this is single-threaded code.
- if (UseG1GC && n_workers > 0) {
- uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
- if (new_value == n_workers) {
- // This thread is last. Notify the others.
- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
- _lock->notify_all();
- }
- }
+ // nothing particular
}
-void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
- // No need to use the barrier if this is single-threaded code.
- if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
- while ((uint)_n_workers_done_with_threads != n_workers) {
- _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
- }
- }
-}
-
-void SharedHeap::process_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* strong_roots,
- OopClosure* weak_roots,
- CLDClosure* strong_cld_closure,
- CLDClosure* weak_cld_closure,
- CodeBlobClosure* code_roots) {
+void SharedHeap::process_strong_roots(bool activate_scope,
+ ScanningOption so,
+ OopClosure* roots,
+ KlassClosure* klass_closure) {
StrongRootsScope srs(this, activate_scope);
- // General roots.
+ // General strong roots.
assert(_strong_roots_parity != 0, "must have called prologue code");
- assert(code_roots != NULL, "code root closure should always be set");
// _n_termination for _process_strong_tasks should be set up stream
// in a method not running in a GC worker. Otherwise the GC worker
// could be trying to change the termination condition while the task
// is executing in another GC worker.
-
- // Iterating over the CLDG and the Threads are done early to allow G1 to
- // first process the strong CLDs and nmethods and then, after a barrier,
- // let the thread process the weak CLDs and nmethods.
-
- if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
- ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
- }
-
- // Some CLDs contained in the thread frames should be considered strong.
- // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
- CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
- // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
- CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
-
- Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
-
- // This is the point where this worker thread will not find more strong CLDs/nmethods.
- // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
- active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
-
if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
- Universe::oops_do(strong_roots);
+ Universe::oops_do(roots);
}
// Global (strong) JNI handles
if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
- JNIHandles::oops_do(strong_roots);
+ JNIHandles::oops_do(roots);
+
+ CodeBlobToOopClosure code_roots(roots, true);
+
+ CLDToOopClosure roots_from_clds(roots);
+ // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
+ // CLDs which are strongly reachable from the thread stacks.
+ CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
+ // All threads execute this; the individual threads are task groups.
+ if (CollectedHeap::use_parallel_gc_threads()) {
+ Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
+ } else {
+ Threads::oops_do(roots, roots_from_clds_p, &code_roots);
+ }
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
- ObjectSynchronizer::oops_do(strong_roots);
+ ObjectSynchronizer::oops_do(roots);
if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
- FlatProfiler::oops_do(strong_roots);
+ FlatProfiler::oops_do(roots);
if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
- Management::oops_do(strong_roots);
+ Management::oops_do(roots);
if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
- JvmtiExport::oops_do(strong_roots);
+ JvmtiExport::oops_do(roots);
if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
- SystemDictionary::roots_oops_do(strong_roots, weak_roots);
+ if (so & SO_AllClasses) {
+ SystemDictionary::oops_do(roots);
+ } else if (so & SO_SystemClasses) {
+ SystemDictionary::always_strong_oops_do(roots);
+ } else {
+ fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
+ }
+ }
+
+ if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
+ if (so & SO_AllClasses) {
+ ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
+ } else if (so & SO_SystemClasses) {
+ ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
+ }
}
// All threads execute the following. A specific chunk of buckets
// from the StringTable are the individual tasks.
- if (weak_roots != NULL) {
+ if (so & SO_Strings) {
if (CollectedHeap::use_parallel_gc_threads()) {
- StringTable::possibly_parallel_oops_do(weak_roots);
+ StringTable::possibly_parallel_oops_do(roots);
} else {
- StringTable::oops_do(weak_roots);
+ StringTable::oops_do(roots);
}
}
if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
if (so & SO_ScavengeCodeCache) {
- assert(code_roots != NULL, "must supply closure for code cache");
+ assert(&code_roots != NULL, "must supply closure for code cache");
// We only visit parts of the CodeCache when scavenging.
- CodeCache::scavenge_root_nmethods_do(code_roots);
+ CodeCache::scavenge_root_nmethods_do(&code_roots);
}
if (so & SO_AllCodeCache) {
- assert(code_roots != NULL, "must supply closure for code cache");
+ assert(&code_roots != NULL, "must supply closure for code cache");
// CMSCollector uses this to do intermediate-strength collections.
// We scan the entire code cache, since CodeCache::do_unloading is not called.
- CodeCache::blobs_do(code_roots);
+ CodeCache::blobs_do(&code_roots);
}
// Verify that the code cache contents are not subject to
// movement by a scavenging collection.
- DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
+ DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
}
_process_strong_tasks->all_tasks_completed();
}
-void SharedHeap::process_all_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_closure) {
- process_roots(activate_scope, so,
- roots, roots,
- cld_closure, cld_closure,
- code_closure);
-}
-
-void SharedHeap::process_strong_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_closure) {
- process_roots(activate_scope, so,
- roots, NULL,
- cld_closure, NULL,
- code_closure);
-}
-
-
class AlwaysTrueClosure: public BoolObjectClosure {
public:
bool do_object_b(oop p) { return true; }
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -69,10 +69,14 @@
// number of active GC workers. CompactibleFreeListSpace and Space
// have SequentialSubTasksDone's.
// Example of using SubTasksDone and SequentialSubTasksDone
-// G1CollectedHeap::g1_process_roots()
-// to SharedHeap::process_roots() and uses
+// G1CollectedHeap::g1_process_strong_roots() calls
+// process_strong_roots(false, // no scoping; this is parallel code
+// is_scavenging, so,
+// &buf_scan_non_heap_roots,
+// &eager_scan_code_roots);
+// which delegates to SharedHeap::process_strong_roots() and uses
// SubTasksDone* _process_strong_tasks to claim tasks.
-// process_roots() calls
+// process_strong_roots() calls
// rem_set()->younger_refs_iterate()
// to scan the card table and which eventually calls down into
// CardTableModRefBS::par_non_clean_card_iterate_work(). This method
@@ -178,12 +182,12 @@
// task. (This also means that a parallel thread may only call
// process_strong_roots once.)
//
- // For calls to process_roots by sequential code, the parity is
+ // For calls to process_strong_roots by sequential code, the parity is
// updated automatically.
//
// The idea is that objects representing fine-grained tasks, such as
// threads, will contain a "parity" field. A task will is claimed in the
- // current "process_roots" call only if its parity field is the
+ // current "process_strong_roots" call only if its parity field is the
// same as the "strong_roots_parity"; task claiming is accomplished by
// updating the parity field to the strong_roots_parity with a CAS.
//
@@ -194,44 +198,27 @@
// c) to never return a distinguished value (zero) with which such
// task-claiming variables may be initialized, to indicate "never
// claimed".
+ private:
+ void change_strong_roots_parity();
public:
int strong_roots_parity() { return _strong_roots_parity; }
- // Call these in sequential code around process_roots.
+ // Call these in sequential code around process_strong_roots.
// strong_roots_prologue calls change_strong_roots_parity, if
// parallel tasks are enabled.
class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
- // Used to implement the Thread work barrier.
- static Monitor* _lock;
-
- SharedHeap* _sh;
- volatile jint _n_workers_done_with_threads;
-
- public:
- StrongRootsScope(SharedHeap* heap, bool activate = true);
+ public:
+ StrongRootsScope(SharedHeap* outer, bool activate = true);
~StrongRootsScope();
-
- // Mark that this thread is done with the Threads work.
- void mark_worker_done_with_threads(uint n_workers);
- // Wait until all n_workers are done with the Threads work.
- void wait_until_all_workers_done_with_threads(uint n_workers);
};
friend class StrongRootsScope;
- // The current active StrongRootScope
- StrongRootsScope* _strong_roots_scope;
-
- StrongRootsScope* active_strong_roots_scope() const;
-
- private:
- void register_strong_roots_scope(StrongRootsScope* scope);
- void unregister_strong_roots_scope(StrongRootsScope* scope);
- void change_strong_roots_parity();
-
- public:
enum ScanningOption {
- SO_None = 0x0,
- SO_AllCodeCache = 0x8,
+ SO_None = 0x0,
+ SO_AllClasses = 0x1,
+ SO_SystemClasses = 0x2,
+ SO_Strings = 0x4,
+ SO_AllCodeCache = 0x8,
SO_ScavengeCodeCache = 0x10
};
@@ -240,26 +227,15 @@
// Invoke the "do_oop" method the closure "roots" on all root locations.
// The "so" argument determines which roots the closure is applied to:
// "SO_None" does none;
+ // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
+ // "SO_SystemClasses" to all the "system" classes and loaders;
+ // "SO_Strings" applies the closure to all entries in StringTable;
// "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
// "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
- void process_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* strong_roots,
- OopClosure* weak_roots,
- CLDClosure* strong_cld_closure,
- CLDClosure* weak_cld_closure,
- CodeBlobClosure* code_roots);
- void process_all_roots(bool activate_scope,
- ScanningOption so,
- OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_roots);
void process_strong_roots(bool activate_scope,
ScanningOption so,
OopClosure* roots,
- CLDClosure* cld_closure,
- CodeBlobClosure* code_roots);
-
+ KlassClosure* klass_closure);
// Apply "root_closure" to the JNI weak roots..
void process_weak_roots(OopClosure* root_closure);
@@ -275,7 +251,7 @@
virtual void gc_epilogue(bool full) = 0;
// Sets the number of parallel threads that will be doing tasks
- // (such as process roots) subsequently.
+ // (such as process strong roots) subsequently.
virtual void set_par_threads(uint t);
int n_termination();
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -245,7 +245,6 @@
set_static_oop_field_count(0);
set_nonstatic_field_size(0);
set_is_marked_dependent(false);
- set_has_unloaded_dependent(false);
set_init_state(InstanceKlass::allocated);
set_init_thread(NULL);
set_reference_type(rt);
@@ -1802,9 +1801,6 @@
return id;
}
-int nmethodBucket::decrement() {
- return Atomic::add(-1, (volatile int *)&_count);
-}
//
// Walk the list of dependent nmethods searching for nmethods which
@@ -1819,7 +1815,7 @@
nmethod* nm = b->get_nmethod();
// since dependencies aren't removed until an nmethod becomes a zombie,
// the dependency list may contain nmethods which aren't alive.
- if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
+ if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
if (TraceDependencies) {
ResourceMark rm;
tty->print_cr("Marked for deoptimization");
@@ -1836,43 +1832,6 @@
return found;
}
-void InstanceKlass::clean_dependent_nmethods() {
- assert_locked_or_safepoint(CodeCache_lock);
-
- if (has_unloaded_dependent()) {
- nmethodBucket* b = _dependencies;
- nmethodBucket* last = NULL;
- while (b != NULL) {
- assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
-
- nmethodBucket* next = b->next();
-
- if (b->count() == 0) {
- if (last == NULL) {
- _dependencies = next;
- } else {
- last->set_next(next);
- }
- delete b;
- // last stays the same.
- } else {
- last = b;
- }
-
- b = next;
- }
- set_has_unloaded_dependent(false);
- }
-#ifdef ASSERT
- else {
- // Verification
- for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
- assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
- assert(b->count() != 0, "empty buckets need to be cleaned");
- }
- }
-#endif
-}
//
// Add an nmethodBucket to the list of dependencies for this nmethod.
@@ -1907,10 +1866,13 @@
nmethodBucket* last = NULL;
while (b != NULL) {
if (nm == b->get_nmethod()) {
- int val = b->decrement();
- guarantee(val >= 0, err_msg("Underflow: %d", val));
- if (val == 0) {
- set_has_unloaded_dependent(true);
+ if (b->decrement() == 0) {
+ if (last == NULL) {
+ _dependencies = b->next();
+ } else {
+ last->set_next(b->next());
+ }
+ delete b;
}
return;
}
@@ -1949,11 +1911,6 @@
nmethodBucket* b = _dependencies;
while (b != NULL) {
if (nm == b->get_nmethod()) {
-#ifdef ASSERT
- int count = b->count();
- assert(count >= 0, "Just check if we ever get here 1");
- assert(count > 0, "Just check if we ever get here 2");
-#endif
return true;
}
b = b->next();
@@ -2252,7 +2209,7 @@
#endif // INCLUDE_ALL_GCS
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
- assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
+ assert(is_loader_alive(is_alive), "this klass should be live");
if (is_interface()) {
if (ClassUnloading) {
Klass* impl = implementor();
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -197,7 +197,6 @@
// _is_marked_dependent can be set concurrently, thus cannot be part of the
// _misc_flags.
bool _is_marked_dependent; // used for marking during flushing and deoptimization
- bool _has_unloaded_dependent;
enum {
_misc_rewritten = 1 << 0, // methods rewritten.
@@ -445,9 +444,6 @@
bool is_marked_dependent() const { return _is_marked_dependent; }
void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
- bool has_unloaded_dependent() const { return _has_unloaded_dependent; }
- void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
-
// initialization (virtuals from Klass)
bool should_be_initialized() const; // means that initialize should be called
void initialize(TRAPS);
@@ -926,7 +922,6 @@
void clean_implementors_list(BoolObjectClosure* is_alive);
void clean_method_data(BoolObjectClosure* is_alive);
- void clean_dependent_nmethods();
// Explicit metaspace deallocation of fields
// For RedefineClasses and class file parsing errors, we need to deallocate
@@ -1215,7 +1210,7 @@
}
int count() { return _count; }
int increment() { _count += 1; return _count; }
- int decrement();
+ int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
nmethodBucket* next() { return _next; }
void set_next(nmethodBucket* b) { _next = b; }
nmethod* get_nmethod() { return _nmethod; }
--- a/hotspot/src/share/vm/oops/klass.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/oops/klass.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -42,7 +42,6 @@
#include "utilities/stack.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
@@ -160,12 +159,7 @@
_primary_supers[0] = k;
set_super_check_offset(in_bytes(primary_supers_offset()));
- // The constructor is used from init_self_patching_vtbl_list,
- // which doesn't zero out the memory before calling the constructor.
- // Need to set the field explicitly to not hit an assert that the field
- // should be NULL before setting it.
- _java_mirror = NULL;
-
+ set_java_mirror(NULL);
set_modifier_flags(0);
set_layout_helper(Klass::_lh_neutral_value);
set_name(NULL);
@@ -389,7 +383,7 @@
return mirror_alive;
}
-void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses) {
+void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
if (!ClassUnloading) {
return;
}
@@ -434,7 +428,7 @@
}
// Clean the implementors list and method data.
- if (clean_alive_klasses && current->oop_is_instance()) {
+ if (current->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(current);
ik->clean_implementors_list(is_alive);
ik->clean_method_data(is_alive);
@@ -446,18 +440,12 @@
record_modified_oops();
}
-// This barrier is used by G1 to remember the old oop values, so
-// that we don't forget any objects that were live at the snapshot at
-// the beginning. This function is only used when we write oops into Klasses.
-void Klass::klass_update_barrier_set_pre(oop* p, oop v) {
-#if INCLUDE_ALL_GCS
- if (UseG1GC) {
- oop obj = *p;
- if (obj != NULL) {
- G1SATBCardTableModRefBS::enqueue(obj);
- }
- }
-#endif
+void Klass::klass_update_barrier_set_pre(void* p, oop v) {
+ // This barrier used by G1, where it's used remember the old oop values,
+ // so that we don't forget any objects that were live at the snapshot at
+ // the beginning. This function is only used when we write oops into
+ // Klasses. Since the Klasses are used as roots in G1, we don't have to
+ // do anything here.
}
void Klass::klass_oop_store(oop* p, oop v) {
@@ -468,7 +456,7 @@
if (always_do_update_barrier) {
klass_oop_store((volatile oop*)p, v);
} else {
- klass_update_barrier_set_pre(p, v);
+ klass_update_barrier_set_pre((void*)p, v);
*p = v;
klass_update_barrier_set(v);
}
@@ -478,7 +466,7 @@
assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
- klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile.
+ klass_update_barrier_set_pre((void*)p, v);
OrderAccess::release_store_ptr(p, v);
klass_update_barrier_set(v);
}
--- a/hotspot/src/share/vm/oops/klass.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/oops/klass.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -553,10 +553,7 @@
// The is_alive closure passed in depends on the Garbage Collector used.
bool is_loader_alive(BoolObjectClosure* is_alive);
- static void clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses = true);
- static void clean_subklass_tree(BoolObjectClosure* is_alive) {
- clean_weak_klass_links(is_alive, false /* clean_alive_klasses */);
- }
+ static void clean_weak_klass_links(BoolObjectClosure* is_alive);
// iterators
virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0;
@@ -663,7 +660,7 @@
private:
// barriers used by klass_oop_store
void klass_update_barrier_set(oop v);
- void klass_update_barrier_set_pre(oop* p, oop v);
+ void klass_update_barrier_set_pre(void* p, oop v);
};
#endif // SHARE_VM_OOPS_KLASS_HPP
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -3019,7 +3019,7 @@
// If there are any non-perm roots in the code cache, visit them.
blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
- CodeBlobToOopClosure look_in_blobs(&blk, !CodeBlobToOopClosure::FixRelocations);
+ CodeBlobToOopClosure look_in_blobs(&blk, false);
CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
return true;
--- a/hotspot/src/share/vm/prims/whitebox.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
-#include "memory/metadataFactory.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
@@ -39,7 +38,6 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
-#include "utilities/array.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
#include "utilities/exceptions.hpp"
@@ -728,6 +726,7 @@
return result;
WB_END
+
WB_ENTRY(jlong, WB_GetThreadStackSize(JNIEnv* env, jobject o))
return (jlong) Thread::current()->stack_size();
WB_END
@@ -737,35 +736,6 @@
return (jlong) t->stack_available(os::current_stack_pointer()) - (jlong) StackShadowPages * os::vm_page_size();
WB_END
-int WhiteBox::array_bytes_to_length(size_t bytes) {
- return Array<u1>::bytes_to_length(bytes);
-}
-
-WB_ENTRY(jlong, WB_AllocateMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong size))
- if (size < 0) {
- THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
- err_msg("WB_AllocateMetaspace: size is negative: " JLONG_FORMAT, size));
- }
-
- oop class_loader_oop = JNIHandles::resolve(class_loader);
- ClassLoaderData* cld = class_loader_oop != NULL
- ? java_lang_ClassLoader::loader_data(class_loader_oop)
- : ClassLoaderData::the_null_class_loader_data();
-
- void* metadata = MetadataFactory::new_writeable_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
-
- return (jlong)(uintptr_t)metadata;
-WB_END
-
-WB_ENTRY(void, WB_FreeMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong addr, jlong size))
- oop class_loader_oop = JNIHandles::resolve(class_loader);
- ClassLoaderData* cld = class_loader_oop != NULL
- ? java_lang_ClassLoader::loader_data(class_loader_oop)
- : ClassLoaderData::the_null_class_loader_data();
-
- MetadataFactory::free_array(cld, (Array<u1>*)(uintptr_t)addr);
-WB_END
-
//Some convenience methods to deal with objects from java
int WhiteBox::offset_for_field(const char* field_name, oop object,
Symbol* signature_symbol) {
@@ -896,10 +866,6 @@
{CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable },
{CC"fullGC", CC"()V", (void*)&WB_FullGC },
{CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory },
- {CC"allocateMetaspace",
- CC"(Ljava/lang/ClassLoader;J)J", (void*)&WB_AllocateMetaspace },
- {CC"freeMetaspace",
- CC"(Ljava/lang/ClassLoader;JJ)V", (void*)&WB_FreeMetaspace },
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
{CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
(void*)&WB_GetNMethod },
--- a/hotspot/src/share/vm/prims/whitebox.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -62,8 +62,6 @@
Symbol* signature_symbol);
static const char* lookup_jstring(const char* field_name, oop object);
static bool lookup_bool(const char* field_name, oop object);
-
- static int array_bytes_to_length(size_t bytes);
};
--- a/hotspot/src/share/vm/runtime/thread.cpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp Mon Jul 07 12:37:11 2014 +0200
@@ -4101,8 +4101,8 @@
SharedHeap* sh = SharedHeap::heap();
// Cannot yet substitute active_workers for n_par_threads
// because of G1CollectedHeap::verify() use of
- // SharedHeap::process_roots(). n_par_threads == 0 will
- // turn off parallelism in process_roots while active_workers
+ // SharedHeap::process_strong_roots(). n_par_threads == 0 will
+ // turn off parallelism in process_strong_roots while active_workers
// is being used for parallelism elsewhere.
bool is_par = sh->n_par_threads() > 0;
assert(!is_par ||
--- a/hotspot/src/share/vm/runtime/thread.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -452,7 +452,7 @@
private:
bool claim_oops_do_par_case(int collection_parity);
public:
- // Requires that "collection_parity" is that of the current roots
+ // Requires that "collection_parity" is that of the current strong roots
// iteration. If "is_par" is false, sets the parity of "this" to
// "collection_parity", and returns "true". If "is_par" is true,
// uses an atomic instruction to set the current threads parity to
--- a/hotspot/src/share/vm/utilities/array.hpp Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/src/share/vm/utilities/array.hpp Mon Jul 07 12:37:11 2014 +0200
@@ -305,7 +305,6 @@
friend class MetadataFactory;
friend class VMStructs;
friend class MethodHandleCompiler; // special case
- friend class WhiteBox;
protected:
int _length; // the number of array elements
T _data[1]; // the array memory
@@ -327,29 +326,6 @@
static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
- // WhiteBox API helper.
- static int bytes_to_length(size_t bytes) {
- assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now");
-
- if (sizeof(Array<T>) >= bytes) {
- return 0;
- }
-
- size_t left = bytes - sizeof(Array<T>);
- assert(is_size_aligned(left, sizeof(T)), "Must be");
-
- size_t elements = left / sizeof(T);
- assert(elements <= (size_t)INT_MAX, err_msg("number of elements " SIZE_FORMAT "doesn't fit into an int.", elements));
-
- int length = (int)elements;
-
- assert((size_t)size(length) * BytesPerWord == bytes,
- err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT,
- bytes, (size_t)size(length) * BytesPerWord));
-
- return length;
- }
-
explicit Array(int length) : _length(length) {
assert(length >= 0, "illegal length");
}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Mon Jul 07 10:12:40 2014 +0200
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Mon Jul 07 12:37:11 2014 +0200
@@ -142,8 +142,6 @@
// Memory
public native void readReservedMemory();
- public native long allocateMetaspace(ClassLoader classLoader, long size);
- public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
// force Full GC
public native void fullGC();