--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -332,27 +332,6 @@
}
}
-#ifdef ASSERT
-class AllAliveClosure : public OopClosure {
- BoolObjectClosure* _is_alive_closure;
- bool _found_dead;
- public:
- AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
- template <typename T> void do_oop_work(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (!_is_alive_closure->do_object_b(obj)) {
- _found_dead = true;
- }
- }
- }
- void do_oop(oop* p) { do_oop_work<oop>(p); }
- void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
- bool found_dead() { return _found_dead; }
-};
-#endif
-
oop ClassLoaderData::keep_alive_object() const {
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader();
@@ -362,15 +341,6 @@
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|| is_alive_closure->do_object_b(keep_alive_object());
-#ifdef ASSERT
- if (alive) {
- AllAliveClosure all_alive_closure(is_alive_closure);
- KlassToOopClosure klass_closure(&all_alive_closure);
- const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
- assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
- }
-#endif
-
return alive;
}
--- a/hotspot/src/share/vm/classfile/stringTable.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -109,7 +109,7 @@
}
}
// If the bucket size is too deep check if this hash code is insufficient.
- if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
+ if (count >= rehash_count && !needs_rehashing()) {
_needs_rehashing = check_rehash_table(count);
}
return NULL;
--- a/hotspot/src/share/vm/classfile/stringTable.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stringTable.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -28,7 +28,7 @@
#include "memory/allocation.inline.hpp"
#include "utilities/hashtable.hpp"
-class StringTable : public Hashtable<oop, mtSymbol> {
+class StringTable : public RehashableHashtable<oop, mtSymbol> {
friend class VMStructs;
friend class Symbol;
@@ -55,11 +55,11 @@
// in the range [start_idx, end_idx).
static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed);
- StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
+ StringTable() : RehashableHashtable<oop, mtSymbol>((int)StringTableSize,
sizeof (HashtableEntry<oop, mtSymbol>)) {}
StringTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
- : Hashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
+ : RehashableHashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
number_of_entries) {}
public:
// The string table
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -201,7 +201,7 @@
}
}
// If the bucket size is too deep check if this hash code is insufficient.
- if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
+ if (count >= rehash_count && !needs_rehashing()) {
_needs_rehashing = check_rehash_table(count);
}
return NULL;
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -73,7 +73,7 @@
operator Symbol*() { return _temp; }
};
-class SymbolTable : public Hashtable<Symbol*, mtSymbol> {
+class SymbolTable : public RehashableHashtable<Symbol*, mtSymbol> {
friend class VMStructs;
friend class ClassFileParser;
@@ -109,10 +109,10 @@
Symbol* lookup(int index, const char* name, int len, unsigned int hash);
SymbolTable()
- : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
+ : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
- : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
+ : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
number_of_entries) {}
// Arena for permanent symbols (null class loader) that are never unloaded
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -22,372 +22,375 @@
*
*/
-
#include "precompiled.hpp"
+#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "memory/heap.hpp"
#include "memory/iterator.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/hashtable.inline.hpp"
+#include "utilities/stack.inline.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
- _top = bottom();
+class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
+ friend class G1CodeRootSetTest;
+ typedef HashtableEntry<nmethod*, mtGC> Entry;
+
+ static CodeRootSetTable* volatile _purge_list;
+
+ CodeRootSetTable* _purge_next;
+
+ unsigned int compute_hash(nmethod* nm) {
+ uintptr_t hash = (uintptr_t)nm;
+ return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
+ }
+
+ Entry* new_entry(nmethod* nm);
+
+ public:
+ CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
+ ~CodeRootSetTable();
+
+ // Needs to be protected locks
+ bool add(nmethod* nm);
+ bool remove(nmethod* nm);
+
+ // Can be called without locking
+ bool contains(nmethod* nm);
+
+ int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
+
+ void copy_to(CodeRootSetTable* new_table);
+ void nmethods_do(CodeBlobClosure* blk);
+
+ template<typename CB>
+ void remove_if(CB& should_remove);
+
+ static void purge_list_append(CodeRootSetTable* tbl);
+ static void purge();
+
+ static size_t static_mem_size() {
+ return sizeof(_purge_list);
+ }
+};
+
+CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
+
+CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
+ unsigned int hash = compute_hash(nm);
+ Entry* entry = (Entry*) new_entry_free_list();
+ if (entry == NULL) {
+ entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
+ }
+ entry->set_next(NULL);
+ entry->set_hash(hash);
+ entry->set_literal(nm);
+ return entry;
}
-void G1CodeRootChunk::reset() {
- _next = _prev = NULL;
- _free = NULL;
- _top = bottom();
-}
-
-void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
- NmethodOrLink* cur = bottom();
- while (cur != _top) {
- if (is_nmethod(cur)) {
- cl->do_code_blob(cur->_nmethod);
+CodeRootSetTable::~CodeRootSetTable() {
+ for (int index = 0; index < table_size(); ++index) {
+ for (Entry* e = bucket(index); e != NULL; ) {
+ Entry* to_remove = e;
+ // read next before freeing.
+ e = e->next();
+ unlink_entry(to_remove);
+ FREE_C_HEAP_ARRAY(char, to_remove, mtGC);
}
- cur++;
+ }
+ assert(number_of_entries() == 0, "should have removed all entries");
+ free_buckets();
+ for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
+ FREE_C_HEAP_ARRAY(char, e, mtGC);
}
}
-bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
- NmethodOrLink* cur = bottom();
-
- for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
- if (cur->_nmethod == method) {
- bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
+bool CodeRootSetTable::add(nmethod* nm) {
+ if (!contains(nm)) {
+ Entry* e = new_entry(nm);
+ int index = hash_to_index(e->hash());
+ add_entry(index, e);
+ return true;
+ }
+ return false;
+}
- if (!result) {
- // Someone else cleared out this entry.
- return false;
- }
+bool CodeRootSetTable::contains(nmethod* nm) {
+ int index = hash_to_index(compute_hash(nm));
+ for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+ if (e->literal() == nm) {
+ return true;
+ }
+ }
+ return false;
+}
- // The method was cleared. Time to link it into the free list.
- NmethodOrLink* prev_free;
- do {
- prev_free = (NmethodOrLink*)_free;
- cur->_link = prev_free;
- } while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
-
+bool CodeRootSetTable::remove(nmethod* nm) {
+ int index = hash_to_index(compute_hash(nm));
+ Entry* previous = NULL;
+ for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
+ if (e->literal() == nm) {
+ if (previous != NULL) {
+ previous->set_next(e->next());
+ } else {
+ set_entry(index, e->next());
+ }
+ free_entry(e);
return true;
}
}
-
return false;
}
-G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
- _free_list.initialize();
- _free_list.set_size(G1CodeRootChunk::word_size());
+void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
+ for (int index = 0; index < table_size(); ++index) {
+ for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+ new_table->add(e->literal());
+ }
+ }
+ new_table->copy_freelist(this);
}
-size_t G1CodeRootChunkManager::fl_mem_size() {
- return _free_list.count() * _free_list.size();
-}
-
-void G1CodeRootChunkManager::free_all_chunks(FreeList<G1CodeRootChunk>* list) {
- _num_chunks_handed_out -= list->count();
- _free_list.prepend(list);
+void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
+ for (int index = 0; index < table_size(); ++index) {
+ for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+ blk->do_code_blob(e->literal());
+ }
+ }
}
-void G1CodeRootChunkManager::free_chunk(G1CodeRootChunk* chunk) {
- _free_list.return_chunk_at_head(chunk);
- _num_chunks_handed_out--;
+template<typename CB>
+void CodeRootSetTable::remove_if(CB& should_remove) {
+ for (int index = 0; index < table_size(); ++index) {
+ Entry* previous = NULL;
+ Entry* e = bucket(index);
+ while (e != NULL) {
+ Entry* next = e->next();
+ if (should_remove(e->literal())) {
+ if (previous != NULL) {
+ previous->set_next(next);
+ } else {
+ set_entry(index, next);
+ }
+ free_entry(e);
+ } else {
+ previous = e;
+ }
+ e = next;
+ }
+ }
+}
+
+G1CodeRootSet::~G1CodeRootSet() {
+ delete _table;
}
-void G1CodeRootChunkManager::purge_chunks(size_t keep_ratio) {
- size_t keep = _num_chunks_handed_out * keep_ratio / 100;
- if (keep >= (size_t)_free_list.count()) {
- return;
- }
+CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
+ return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
+}
+
+void G1CodeRootSet::allocate_small_table() {
+ _table = new CodeRootSetTable(SmallSize);
+}
- FreeList<G1CodeRootChunk> temp;
- temp.initialize();
- temp.set_size(G1CodeRootChunk::word_size());
+void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) {
+ for (;;) {
+ table->_purge_next = _purge_list;
+ CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
+ if (old == table->_purge_next) {
+ break;
+ }
+ }
+}
- _free_list.getFirstNChunksFromList((size_t)_free_list.count() - keep, &temp);
-
- G1CodeRootChunk* cur = temp.get_chunk_at_head();
- while (cur != NULL) {
- delete cur;
- cur = temp.get_chunk_at_head();
+void CodeRootSetTable::purge() {
+ CodeRootSetTable* table = _purge_list;
+ _purge_list = NULL;
+ while (table != NULL) {
+ CodeRootSetTable* to_purge = table;
+ table = table->_purge_next;
+ delete to_purge;
}
}
-size_t G1CodeRootChunkManager::static_mem_size() {
- return sizeof(G1CodeRootChunkManager);
+void G1CodeRootSet::move_to_large() {
+ CodeRootSetTable* temp = new CodeRootSetTable(LargeSize);
+
+ _table->copy_to(temp);
+
+ CodeRootSetTable::purge_list_append(_table);
+
+ OrderAccess::release_store_ptr(&_table, temp);
}
-G1CodeRootChunk* G1CodeRootChunkManager::new_chunk() {
- G1CodeRootChunk* result = _free_list.get_chunk_at_head();
- if (result == NULL) {
- result = new G1CodeRootChunk();
+void G1CodeRootSet::purge() {
+ CodeRootSetTable::purge();
+}
+
+size_t G1CodeRootSet::static_mem_size() {
+ return CodeRootSetTable::static_mem_size();
+}
+
+void G1CodeRootSet::add(nmethod* method) {
+ bool added = false;
+ if (is_empty()) {
+ allocate_small_table();
+ }
+ added = _table->add(method);
+ if (_length == Threshold) {
+ move_to_large();
+ }
+ if (added) {
+ ++_length;
+ }
+}
+
+bool G1CodeRootSet::remove(nmethod* method) {
+ bool removed = false;
+ if (_table != NULL) {
+ removed = _table->remove(method);
+ }
+ if (removed) {
+ _length--;
+ if (_length == 0) {
+ clear();
+ }
+ }
+ return removed;
+}
+
+bool G1CodeRootSet::contains(nmethod* method) {
+ CodeRootSetTable* table = load_acquire_table();
+ if (table != NULL) {
+ return table->contains(method);
}
- _num_chunks_handed_out++;
- result->reset();
- return result;
+ return false;
+}
+
+void G1CodeRootSet::clear() {
+ delete _table;
+ _table = NULL;
+ _length = 0;
+}
+
+size_t G1CodeRootSet::mem_size() {
+ return sizeof(*this) +
+ (_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0);
+}
+
+void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
+ if (_table != NULL) {
+ _table->nmethods_do(blk);
+ }
+}
+
+class CleanCallback : public StackObj {
+ class PointsIntoHRDetectionClosure : public OopClosure {
+ HeapRegion* _hr;
+ public:
+ bool _points_into;
+ PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
+
+ void do_oop(narrowOop* o) {
+ do_oop_work(o);
+ }
+
+ void do_oop(oop* o) {
+ do_oop_work(o);
+ }
+
+ template <typename T>
+ void do_oop_work(T* p) {
+ if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
+ _points_into = true;
+ }
+ }
+ };
+
+ PointsIntoHRDetectionClosure _detector;
+ CodeBlobToOopClosure _blobs;
+
+ public:
+ CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
+
+ bool operator() (nmethod* nm) {
+ _detector._points_into = false;
+ _blobs.do_code_blob(nm);
+ return _detector._points_into;
+ }
+};
+
+void G1CodeRootSet::clean(HeapRegion* owner) {
+ CleanCallback should_clean(owner);
+ if (_table != NULL) {
+ _table->remove_if(should_clean);
+ }
}
#ifndef PRODUCT
-size_t G1CodeRootChunkManager::num_chunks_handed_out() const {
- return _num_chunks_handed_out;
-}
+class G1CodeRootSetTest {
+ public:
+ static void test() {
+ {
+ G1CodeRootSet set1;
+ assert(set1.is_empty(), "Code root set must be initially empty but is not.");
+
+ assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
+ err_msg("The code root set's static memory usage is incorrect, "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size()));
+
+ set1.add((nmethod*)1);
+ assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
+ SIZE_FORMAT" elements", set1.length()));
+
+ const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
+
+ for (size_t i = 1; i <= num_to_add; i++) {
+ set1.add((nmethod*)1);
+ }
+ assert(set1.length() == 1,
+ err_msg("Duplicate detection should not have increased the set size but "
+ "is "SIZE_FORMAT, set1.length()));
-size_t G1CodeRootChunkManager::num_free_chunks() const {
- return (size_t)_free_list.count();
+ for (size_t i = 2; i <= num_to_add; i++) {
+ set1.add((nmethod*)(uintptr_t)(i));
+ }
+ assert(set1.length() == num_to_add,
+ err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
+ "need to be in the set, but there are only "SIZE_FORMAT,
+ num_to_add, set1.length()));
+
+ assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
+
+ size_t num_popped = 0;
+ for (size_t i = 1; i <= num_to_add; i++) {
+ bool removed = set1.remove((nmethod*)i);
+ if (removed) {
+ num_popped += 1;
+ } else {
+ break;
+ }
+ }
+ assert(num_popped == num_to_add,
+ err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
+ "were added", num_popped, num_to_add));
+ assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
+
+ G1CodeRootSet::purge();
+
+ assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables");
+
+ }
+
+ }
+};
+
+void TestCodeCacheRemSet_test() {
+ G1CodeRootSetTest::test();
}
#endif
-
-G1CodeRootChunkManager G1CodeRootSet::_default_chunk_manager;
-
-void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
- _default_chunk_manager.purge_chunks(keep_ratio);
-}
-
-size_t G1CodeRootSet::free_chunks_static_mem_size() {
- return _default_chunk_manager.static_mem_size();
-}
-
-size_t G1CodeRootSet::free_chunks_mem_size() {
- return _default_chunk_manager.fl_mem_size();
-}
-
-G1CodeRootSet::G1CodeRootSet(G1CodeRootChunkManager* manager) : _manager(manager), _list(), _length(0) {
- if (_manager == NULL) {
- _manager = &_default_chunk_manager;
- }
- _list.initialize();
- _list.set_size(G1CodeRootChunk::word_size());
-}
-
-G1CodeRootSet::~G1CodeRootSet() {
- clear();
-}
-
-void G1CodeRootSet::add(nmethod* method) {
- if (!contains(method)) {
- // Find the first chunk that isn't full.
- G1CodeRootChunk* cur = _list.head();
- while (cur != NULL) {
- if (!cur->is_full()) {
- break;
- }
- cur = cur->next();
- }
-
- // All chunks are full, get a new chunk.
- if (cur == NULL) {
- cur = new_chunk();
- _list.return_chunk_at_head(cur);
- }
-
- // Add the nmethod.
- bool result = cur->add(method);
-
- guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
-
- _length++;
- }
-}
-
-void G1CodeRootSet::remove_lock_free(nmethod* method) {
- G1CodeRootChunk* found = find(method);
- if (found != NULL) {
- bool result = found->remove_lock_free(method);
- if (result) {
- Atomic::dec_ptr((volatile intptr_t*)&_length);
- }
- }
- assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
-}
-
-nmethod* G1CodeRootSet::pop() {
- while (true) {
- G1CodeRootChunk* cur = _list.head();
- if (cur == NULL) {
- assert(_length == 0, "when there are no chunks, there should be no elements");
- return NULL;
- }
- nmethod* result = cur->pop();
- if (result != NULL) {
- _length--;
- return result;
- } else {
- free(_list.get_chunk_at_head());
- }
- }
-}
-
-G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
- G1CodeRootChunk* cur = _list.head();
- while (cur != NULL) {
- if (cur->contains(method)) {
- return cur;
- }
- cur = (G1CodeRootChunk*)cur->next();
- }
- return NULL;
-}
-
-void G1CodeRootSet::free(G1CodeRootChunk* chunk) {
- free_chunk(chunk);
-}
-
-bool G1CodeRootSet::contains(nmethod* method) {
- return find(method) != NULL;
-}
-
-void G1CodeRootSet::clear() {
- free_all_chunks(&_list);
- _length = 0;
-}
-
-void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
- G1CodeRootChunk* cur = _list.head();
- while (cur != NULL) {
- cur->nmethods_do(blk);
- cur = (G1CodeRootChunk*)cur->next();
- }
-}
-
-size_t G1CodeRootSet::static_mem_size() {
- return sizeof(G1CodeRootSet);
-}
-
-size_t G1CodeRootSet::mem_size() {
- return G1CodeRootSet::static_mem_size() + _list.count() * _list.size();
-}
-
-#ifndef PRODUCT
-
-void G1CodeRootSet::test() {
- G1CodeRootChunkManager mgr;
-
- assert(mgr.num_chunks_handed_out() == 0, "Must not have handed out chunks yet");
-
- assert(G1CodeRootChunkManager::static_mem_size() > sizeof(void*),
- err_msg("The chunk manager's static memory usage seems too small, is only "SIZE_FORMAT" bytes.", G1CodeRootChunkManager::static_mem_size()));
-
- // The number of chunks that we allocate for purge testing.
- size_t const num_chunks = 10;
-
- {
- G1CodeRootSet set1(&mgr);
- assert(set1.is_empty(), "Code root set must be initially empty but is not.");
-
- assert(G1CodeRootSet::static_mem_size() > sizeof(void*),
- err_msg("The code root set's static memory usage seems too small, is only "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size()));
-
- set1.add((nmethod*)1);
- assert(mgr.num_chunks_handed_out() == 1,
- err_msg("Must have allocated and handed out one chunk, but handed out "
- SIZE_FORMAT" chunks", mgr.num_chunks_handed_out()));
- assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
- SIZE_FORMAT" elements", set1.length()));
-
- // G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which
- // we cannot access.
- for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) {
- set1.add((nmethod*)1);
- }
- assert(mgr.num_chunks_handed_out() == 1,
- err_msg("Duplicate detection must have prevented allocation of further "
- "chunks but allocated "SIZE_FORMAT, mgr.num_chunks_handed_out()));
- assert(set1.length() == 1,
- err_msg("Duplicate detection should not have increased the set size but "
- "is "SIZE_FORMAT, set1.length()));
-
- size_t num_total_after_add = G1CodeRootChunk::word_size() + 1;
- for (size_t i = 0; i < num_total_after_add - 1; i++) {
- set1.add((nmethod*)(uintptr_t)(2 + i));
- }
- assert(mgr.num_chunks_handed_out() > 1,
- "After adding more code roots, more than one additional chunk should have been handed out");
- assert(set1.length() == num_total_after_add,
- err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
- "need to be in the set, but there are only "SIZE_FORMAT,
- num_total_after_add, set1.length()));
-
- size_t num_popped = 0;
- while (set1.pop() != NULL) {
- num_popped++;
- }
- assert(num_popped == num_total_after_add,
- err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
- "were added", num_popped, num_total_after_add));
- assert(mgr.num_chunks_handed_out() == 0,
- err_msg("After popping all elements, all chunks must have been returned "
- "but there are still "SIZE_FORMAT" additional", mgr.num_chunks_handed_out()));
-
- mgr.purge_chunks(0);
- assert(mgr.num_free_chunks() == 0,
- err_msg("After purging everything, the free list must be empty but still "
- "contains "SIZE_FORMAT" chunks", mgr.num_free_chunks()));
-
- // Add some more handed out chunks.
- size_t i = 0;
- while (mgr.num_chunks_handed_out() < num_chunks) {
- set1.add((nmethod*)i);
- i++;
- }
-
- {
- // Generate chunks on the free list.
- G1CodeRootSet set2(&mgr);
- size_t i = 0;
- while (mgr.num_chunks_handed_out() < (num_chunks * 2)) {
- set2.add((nmethod*)i);
- i++;
- }
- // Exit of the scope of the set2 object will call the destructor that generates
- // num_chunks elements on the free list.
- }
-
- assert(mgr.num_chunks_handed_out() == num_chunks,
- err_msg("Deletion of the second set must have resulted in giving back "
- "those, but there are still "SIZE_FORMAT" additional handed out, expecting "
- SIZE_FORMAT, mgr.num_chunks_handed_out(), num_chunks));
- assert(mgr.num_free_chunks() == num_chunks,
- err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
- "but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks()));
-
- size_t const test_percentage = 50;
- mgr.purge_chunks(test_percentage);
- assert(mgr.num_chunks_handed_out() == num_chunks,
- err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT,
- mgr.num_chunks_handed_out()));
- assert(mgr.num_free_chunks() == (size_t)(mgr.num_chunks_handed_out() * test_percentage / 100),
- err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks"
- "but there are "SIZE_FORMAT, test_percentage, num_chunks,
- mgr.num_free_chunks()));
- // Purge the remainder of the chunks on the free list.
- mgr.purge_chunks(0);
- assert(mgr.num_free_chunks() == 0, "Free List must be empty");
- assert(mgr.num_chunks_handed_out() == num_chunks,
- err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set "
- "but there are "SIZE_FORMAT, num_chunks, mgr.num_chunks_handed_out()));
-
- // Exit of the scope of the set1 object will call the destructor that generates
- // num_chunks additional elements on the free list.
- }
-
- assert(mgr.num_chunks_handed_out() == 0,
- err_msg("Deletion of the only set must have resulted in no chunks handed "
- "out, but there is still "SIZE_FORMAT" handed out", mgr.num_chunks_handed_out()));
- assert(mgr.num_free_chunks() == num_chunks,
- err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
- "but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks()));
-
- // Restore initial state.
- mgr.purge_chunks(0);
- assert(mgr.num_free_chunks() == 0, "Free List must be empty");
- assert(mgr.num_chunks_handed_out() == 0, "No additional elements must have been handed out yet");
-}
-
-void TestCodeCacheRemSet_test() {
- G1CodeRootSet::test();
-}
-#endif
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -26,222 +26,64 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#include "memory/allocation.hpp"
-#include "memory/freeList.hpp"
-#include "runtime/globals.hpp"
class CodeBlobClosure;
-
-// The elements of the G1CodeRootChunk is either:
-// 1) nmethod pointers
-// 2) nodes in an internally chained free list
-typedef union {
- nmethod* _nmethod;
- void* _link;
-} NmethodOrLink;
-
-class G1CodeRootChunk : public CHeapObj<mtGC> {
- private:
- static const int NUM_ENTRIES = 32;
- public:
- G1CodeRootChunk* _next;
- G1CodeRootChunk* _prev;
-
- NmethodOrLink* _top;
- // First free position within the chunk.
- volatile NmethodOrLink* _free;
-
- NmethodOrLink _data[NUM_ENTRIES];
-
- NmethodOrLink* bottom() const {
- return (NmethodOrLink*) &(_data[0]);
- }
-
- NmethodOrLink* end() const {
- return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
- }
-
- bool is_link(NmethodOrLink* nmethod_or_link) {
- return nmethod_or_link->_link == NULL ||
- (bottom() <= nmethod_or_link->_link
- && nmethod_or_link->_link < end());
- }
-
- bool is_nmethod(NmethodOrLink* nmethod_or_link) {
- return !is_link(nmethod_or_link);
- }
-
- public:
- G1CodeRootChunk();
- ~G1CodeRootChunk() {}
-
- static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); }
-
- // FreeList "interface" methods
-
- G1CodeRootChunk* next() const { return _next; }
- G1CodeRootChunk* prev() const { return _prev; }
- void set_next(G1CodeRootChunk* v) { _next = v; assert(v != this, "Boom");}
- void set_prev(G1CodeRootChunk* v) { _prev = v; assert(v != this, "Boom");}
- void clear_next() { set_next(NULL); }
- void clear_prev() { set_prev(NULL); }
-
- size_t size() const { return word_size(); }
-
- void link_next(G1CodeRootChunk* ptr) { set_next(ptr); }
- void link_prev(G1CodeRootChunk* ptr) { set_prev(ptr); }
- void link_after(G1CodeRootChunk* ptr) {
- link_next(ptr);
- if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this);
- }
-
- bool is_free() { return true; }
-
- // New G1CodeRootChunk routines
-
- void reset();
-
- bool is_empty() const {
- return _top == bottom();
- }
-
- bool is_full() const {
- return _top == end() && _free == NULL;
- }
-
- bool contains(nmethod* method) {
- NmethodOrLink* cur = bottom();
- while (cur != _top) {
- if (cur->_nmethod == method) return true;
- cur++;
- }
- return false;
- }
-
- bool add(nmethod* method) {
- if (is_full()) {
- return false;
- }
-
- if (_free != NULL) {
- // Take from internally chained free list
- NmethodOrLink* first_free = (NmethodOrLink*)_free;
- _free = (NmethodOrLink*)_free->_link;
- first_free->_nmethod = method;
- } else {
- // Take from top.
- _top->_nmethod = method;
- _top++;
- }
-
- return true;
- }
-
- bool remove_lock_free(nmethod* method);
-
- void nmethods_do(CodeBlobClosure* blk);
-
- nmethod* pop() {
- if (_free != NULL) {
- // Kill the free list.
- _free = NULL;
- }
-
- while (!is_empty()) {
- _top--;
- if (is_nmethod(_top)) {
- return _top->_nmethod;
- }
- }
-
- return NULL;
- }
-};
-
-// Manages free chunks.
-class G1CodeRootChunkManager VALUE_OBJ_CLASS_SPEC {
- private:
- // Global free chunk list management
- FreeList<G1CodeRootChunk> _free_list;
- // Total number of chunks handed out
- size_t _num_chunks_handed_out;
-
- public:
- G1CodeRootChunkManager();
-
- G1CodeRootChunk* new_chunk();
- void free_chunk(G1CodeRootChunk* chunk);
- // Free all elements of the given list.
- void free_all_chunks(FreeList<G1CodeRootChunk>* list);
-
- void initialize();
- void purge_chunks(size_t keep_ratio);
-
- static size_t static_mem_size();
- size_t fl_mem_size();
-
-#ifndef PRODUCT
- size_t num_chunks_handed_out() const;
- size_t num_free_chunks() const;
-#endif
-};
+class CodeRootSetTable;
+class HeapRegion;
+class nmethod;
// Implements storage for a set of code roots.
// All methods that modify the set are not thread-safe except if otherwise noted.
class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
+ friend class G1CodeRootSetTest;
private:
- // Global default free chunk manager instance.
- static G1CodeRootChunkManager _default_chunk_manager;
- G1CodeRootChunk* new_chunk() { return _manager->new_chunk(); }
- void free_chunk(G1CodeRootChunk* chunk) { _manager->free_chunk(chunk); }
- // Free all elements of the given list.
- void free_all_chunks(FreeList<G1CodeRootChunk>* list) { _manager->free_all_chunks(list); }
+ const static size_t SmallSize = 32;
+ const static size_t Threshold = 24;
+ const static size_t LargeSize = 512;
- // Return the chunk that contains the given nmethod, NULL otherwise.
- // Scans the list of chunks backwards, as this method is used to add new
- // entries, which are typically added in bulk for a single nmethod.
- G1CodeRootChunk* find(nmethod* method);
- void free(G1CodeRootChunk* chunk);
+ CodeRootSetTable* _table;
+ CodeRootSetTable* load_acquire_table();
size_t _length;
- FreeList<G1CodeRootChunk> _list;
- G1CodeRootChunkManager* _manager;
+
+ void move_to_large();
+ void allocate_small_table();
public:
- // If an instance is initialized with a chunk manager of NULL, use the global
- // default one.
- G1CodeRootSet(G1CodeRootChunkManager* manager = NULL);
+ G1CodeRootSet() : _table(NULL), _length(0) {}
~G1CodeRootSet();
- static void purge_chunks(size_t keep_ratio);
+ static void purge();
- static size_t free_chunks_static_mem_size();
- static size_t free_chunks_mem_size();
+ static size_t static_mem_size();
- // Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
- // method is likely to be repeatedly called with the same nmethod.
void add(nmethod* method);
- void remove_lock_free(nmethod* method);
- nmethod* pop();
+ bool remove(nmethod* method);
+ // Safe to call without synchronization, but may return false negatives.
bool contains(nmethod* method);
void clear();
void nmethods_do(CodeBlobClosure* blk) const;
- bool is_empty() { return length() == 0; }
+ // Remove all nmethods which no longer contain pointers into our "owner" region
+ void clean(HeapRegion* owner);
+
+ bool is_empty() {
+ bool empty = length() == 0;
+ assert(empty == (_table == NULL), "is empty only if table is deallocated");
+ return empty;
+ }
// Length in elements
size_t length() const { return _length; }
- // Static data memory size in bytes of this set.
- static size_t static_mem_size();
// Memory size in bytes taken by this set.
size_t mem_size();
- static void test() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -4670,6 +4670,56 @@
}
};
+class G1CodeBlobClosure : public CodeBlobClosure {
+ class HeapRegionGatheringOopClosure : public OopClosure {
+ G1CollectedHeap* _g1h;
+ OopClosure* _work;
+ nmethod* _nm;
+
+ template <typename T>
+ void do_oop_work(T* p) {
+ _work->do_oop(p);
+ T oop_or_narrowoop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(oop_or_narrowoop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+ HeapRegion* hr = _g1h->heap_region_containing_raw(o);
+ assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
+ hr->add_strong_code_root(_nm);
+ }
+ }
+
+ public:
+ HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
+
+ void do_oop(oop* o) {
+ do_oop_work(o);
+ }
+
+ void do_oop(narrowOop* o) {
+ do_oop_work(o);
+ }
+
+ void set_nm(nmethod* nm) {
+ _nm = nm;
+ }
+ };
+
+ HeapRegionGatheringOopClosure _oc;
+public:
+ G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ if (!nm->test_set_oops_do_mark()) {
+ _oc.set_nm(nm);
+ nm->oops_do(&_oc);
+ nm->fix_oop_relocations();
+ }
+ }
+ }
+};
+
class G1ParTask : public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
@@ -4738,22 +4788,6 @@
}
};
- class G1CodeBlobClosure: public CodeBlobClosure {
- OopClosure* _f;
-
- public:
- G1CodeBlobClosure(OopClosure* f) : _f(f) {}
- void do_code_blob(CodeBlob* blob) {
- nmethod* that = blob->as_nmethod_or_null();
- if (that != NULL) {
- if (!that->test_set_oops_do_mark()) {
- that->oops_do(_f);
- that->fix_oop_relocations();
- }
- }
- }
- };
-
void work(uint worker_id) {
if (worker_id >= _n_workers) return; // no work needed this round
@@ -4944,7 +4978,7 @@
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
// Now scan the complement of the collection set.
- MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
+ G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
@@ -5991,12 +6025,6 @@
hot_card_cache->reset_hot_cache();
hot_card_cache->set_use_cache(true);
- // Migrate the strong code roots attached to each region in
- // the collection set. Ideally we would like to do this
- // after we have finished the scanning/evacuation of the
- // strong code roots for a particular heap region.
- migrate_strong_code_roots();
-
purge_code_root_memory();
if (g1_policy()->during_initial_mark_pause()) {
@@ -7049,13 +7077,8 @@
" starting at "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
- // HeapRegion::add_strong_code_root() avoids adding duplicate
- // entries but having duplicates is OK since we "mark" nmethods
- // as visited when we scan the strong code root lists during the GC.
- hr->add_strong_code_root(_nm);
- assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
- err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
- _nm, HR_FORMAT_PARAMS(hr)));
+ // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
+ hr->add_strong_code_root_locked(_nm);
}
}
@@ -7082,9 +7105,6 @@
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
hr->remove_strong_code_root(_nm);
- assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
- err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
- _nm, HR_FORMAT_PARAMS(hr)));
}
}
@@ -7112,28 +7132,9 @@
nm->oops_do(®_cl, true);
}
-class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
-public:
- bool doHeapRegion(HeapRegion *hr) {
- assert(!hr->isHumongous(),
- err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
- HR_FORMAT_PARAMS(hr)));
- hr->migrate_strong_code_roots();
- return false;
- }
-};
-
-void G1CollectedHeap::migrate_strong_code_roots() {
- MigrateCodeRootsHeapRegionClosure cl;
- double migrate_start = os::elapsedTime();
- collection_set_iterate(&cl);
- double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
- g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
-}
-
void G1CollectedHeap::purge_code_root_memory() {
double purge_start = os::elapsedTime();
- G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
+ G1CodeRootSet::purge();
double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -1662,12 +1662,6 @@
// Unregister the given nmethod from the G1 heap.
virtual void unregister_nmethod(nmethod* nm);
- // Migrate the nmethods in the code root lists of the regions
- // in the collection set to regions in to-space. In the event
- // of an evacuation failure, nmethods that reference objects
- // that were not successfully evacuated are not migrated.
- void migrate_strong_code_roots();
-
// Free up superfluous code root memory.
void purge_code_root_memory();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -217,6 +217,8 @@
_update_rset_cl->set_region(hr);
hr->object_iterate(&rspc);
+ hr->rem_set()->clean_strong_code_roots(hr);
+
hr->note_self_forwarding_removal_end(during_initial_mark,
during_conc_mark,
rspc.marked_bytes());
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -275,9 +275,6 @@
// Now subtract the time taken to fix up roots in generated code
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
- // Strong code root migration time
- misc_time_ms += _cur_strong_code_root_migration_time_ms;
-
// Strong code root purge time
misc_time_ms += _cur_strong_code_root_purge_time_ms;
@@ -328,7 +325,6 @@
_last_obj_copy_times_ms.print(1, "Object Copy (ms)");
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
- print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) {
print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -129,7 +129,6 @@
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
- double _cur_strong_code_root_migration_time_ms;
double _cur_strong_code_root_purge_time_ms;
double _cur_evac_fail_recalc_used;
@@ -233,10 +232,6 @@
_cur_collection_code_root_fixup_time_ms = ms;
}
- void record_strong_code_root_migration_time(double ms) {
- _cur_strong_code_root_migration_time_ms = ms;
- }
-
void record_strong_code_root_purge_time(double ms) {
_cur_strong_code_root_purge_time_ms = ms;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -110,7 +110,7 @@
G1CollectedHeap* _g1h;
OopsInHeapRegionClosure* _oc;
- CodeBlobToOopClosure* _code_root_cl;
+ CodeBlobClosure* _code_root_cl;
G1BlockOffsetSharedArray* _bot_shared;
G1SATBCardTableModRefBS *_ct_bs;
@@ -122,7 +122,7 @@
public:
ScanRSClosure(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i) :
_oc(oc),
_code_root_cl(code_root_cl),
@@ -242,7 +242,7 @@
};
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
@@ -321,7 +321,7 @@
}
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i) {
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -96,7 +96,7 @@
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
@@ -108,7 +108,7 @@
void cleanup_after_oops_into_collection_set_do();
void scanRS(OopsInHeapRegionClosure* oc,
- CodeBlobToOopClosure* code_root_cl,
+ CodeBlobClosure* code_root_cl,
uint worker_i);
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -253,6 +253,7 @@
size_t occupied_cards = hrrs->occupied();
size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
if (code_root_mem_sz > max_code_root_mem_sz()) {
+ _max_code_root_mem_sz = code_root_mem_sz;
_max_code_root_mem_sz_region = r;
}
size_t code_root_elems = hrrs->strong_code_roots_list_length();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -277,10 +277,6 @@
product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \
\
- experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \
- "The amount of code root chunks that should be kept at most " \
- "as percentage of already allocated.") \
- \
experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \
"Try to reclaim dead large objects at every young GC.") \
\
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -540,21 +540,17 @@
hrrs->add_strong_code_root(nm);
}
+void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ HeapRegionRemSet* hrrs = rem_set();
+ hrrs->add_strong_code_root_locked(nm);
+}
+
void HeapRegion::remove_strong_code_root(nmethod* nm) {
HeapRegionRemSet* hrrs = rem_set();
hrrs->remove_strong_code_root(nm);
}
-void HeapRegion::migrate_strong_code_roots() {
- assert(in_collection_set(), "only collection set regions");
- assert(!isHumongous(),
- err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
- HR_FORMAT_PARAMS(this)));
-
- HeapRegionRemSet* hrrs = rem_set();
- hrrs->migrate_strong_code_roots();
-}
-
void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
HeapRegionRemSet* hrrs = rem_set();
hrrs->strong_code_roots_do(blk);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -756,14 +756,9 @@
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void add_strong_code_root(nmethod* nm);
+ void add_strong_code_root_locked(nmethod* nm);
void remove_strong_code_root(nmethod* nm);
- // During a collection, migrate the successfully evacuated
- // strong code roots that referenced into this region to the
- // new regions that they now point into. Unsuccessfully
- // evacuated code roots are not migrated.
- void migrate_strong_code_roots();
-
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list for this region
void strong_code_roots_do(CodeBlobClosure* blk) const;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -448,10 +448,10 @@
// Note that this may be a continued H region.
HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
- RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrm_index();
+ RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
// If the region is already coarsened, return.
- if (_coarse_map.at(from_hrs_ind)) {
+ if (_coarse_map.at(from_hrm_ind)) {
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" coarse map hit.");
}
@@ -460,7 +460,7 @@
}
// Otherwise find a per-region table to add it to.
- size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
+ size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
PerRegionTable* prt = find_region_table(ind, from_hr);
if (prt == NULL) {
MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
@@ -475,7 +475,7 @@
assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
"Must be in range.");
if (G1HRRSUseSparseTable &&
- _sparse_table.add_card(from_hrs_ind, card_index)) {
+ _sparse_table.add_card(from_hrm_ind, card_index)) {
if (G1RecordHRRSOops) {
HeapRegionRemSet::record(hr(), from);
if (G1TraceHeapRegionRememberedSet) {
@@ -495,7 +495,7 @@
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr(" [tid %d] sparse table entry "
"overflow(f: %d, t: %u)",
- tid, from_hrs_ind, cur_hrm_ind);
+ tid, from_hrm_ind, cur_hrm_ind);
}
}
@@ -516,7 +516,7 @@
if (G1HRRSUseSparseTable) {
// Transfer from sparse to fine-grain.
- SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
+ SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
assert(sprt_entry != NULL, "There should have been an entry");
for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
CardIdx_t c = sprt_entry->card(i);
@@ -525,7 +525,7 @@
}
}
// Now we can delete the sparse entry.
- bool res = _sparse_table.delete_entry(from_hrs_ind);
+ bool res = _sparse_table.delete_entry(from_hrm_ind);
assert(res, "It should have been there.");
}
}
@@ -926,9 +926,25 @@
}
// Code roots support
+//
+// The code root set is protected by two separate locking schemes
+// When at safepoint the per-hrrs lock must be held during modifications
+// except when doing a full gc.
+// When not at safepoint the CodeCache_lock must be held during modifications.
+// When concurrent readers access the contains() function
+// (during the evacuation phase) no removals are allowed.
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
+ // Optimistic unlocked contains-check
+ if (!_code_roots.contains(nm)) {
+ MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
+ add_strong_code_root_locked(nm);
+ }
+}
+
+void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
+ assert(nm != NULL, "sanity");
_code_roots.add(nm);
}
@@ -936,96 +952,19 @@
assert(nm != NULL, "sanity");
assert_locked_or_safepoint(CodeCache_lock);
- _code_roots.remove_lock_free(nm);
+ MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
+ _code_roots.remove(nm);
// Check that there were no duplicates
guarantee(!_code_roots.contains(nm), "duplicate entry found");
}
-class NMethodMigrationOopClosure : public OopClosure {
- G1CollectedHeap* _g1h;
- HeapRegion* _from;
- nmethod* _nm;
-
- uint _num_self_forwarded;
-
- template <class T> void do_oop_work(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (_from->is_in(obj)) {
- // Reference still points into the source region.
- // Since roots are immediately evacuated this means that
- // we must have self forwarded the object
- assert(obj->is_forwarded(),
- err_msg("code roots should be immediately evacuated. "
- "Ref: "PTR_FORMAT", "
- "Obj: "PTR_FORMAT", "
- "Region: "HR_FORMAT,
- p, (void*) obj, HR_FORMAT_PARAMS(_from)));
- assert(obj->forwardee() == obj,
- err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
-
- // The object has been self forwarded.
- // Note, if we're during an initial mark pause, there is
- // no need to explicitly mark object. It will be marked
- // during the regular evacuation failure handling code.
- _num_self_forwarded++;
- } else {
- // The reference points into a promotion or to-space region
- HeapRegion* to = _g1h->heap_region_containing(obj);
- to->rem_set()->add_strong_code_root(_nm);
- }
- }
- }
-
-public:
- NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
- _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
-
- void do_oop(narrowOop* p) { do_oop_work(p); }
- void do_oop(oop* p) { do_oop_work(p); }
-
- uint retain() { return _num_self_forwarded > 0; }
-};
-
-void HeapRegionRemSet::migrate_strong_code_roots() {
- assert(hr()->in_collection_set(), "only collection set regions");
- assert(!hr()->isHumongous(),
- err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
- HR_FORMAT_PARAMS(hr())));
-
- ResourceMark rm;
-
- // List of code blobs to retain for this region
- GrowableArray<nmethod*> to_be_retained(10);
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
- while (!_code_roots.is_empty()) {
- nmethod *nm = _code_roots.pop();
- if (nm != NULL) {
- NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
- nm->oops_do(&oop_cl);
- if (oop_cl.retain()) {
- to_be_retained.push(nm);
- }
- }
- }
-
- // Now push any code roots we need to retain
- assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
- "Retained nmethod list must be empty or "
- "evacuation of this region failed");
-
- while (to_be_retained.is_nonempty()) {
- nmethod* nm = to_be_retained.pop();
- assert(nm != NULL, "sanity");
- add_strong_code_root(nm);
- }
+void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
+ _code_roots.nmethods_do(blk);
}
-void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
- _code_roots.nmethods_do(blk);
+void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
+ _code_roots.clean(hr);
}
size_t HeapRegionRemSet::strong_code_roots_mem_size() {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -349,13 +349,13 @@
// Returns the memory occupancy of all static data structures associated
// with remembered sets.
static size_t static_mem_size() {
- return OtherRegionsTable::static_mem_size() + G1CodeRootSet::free_chunks_static_mem_size();
+ return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
}
// Returns the memory occupancy of all free_list data structures associated
// with remembered sets.
static size_t fl_mem_size() {
- return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::free_chunks_mem_size();
+ return OtherRegionsTable::fl_mem_size();
}
bool contains_reference(OopOrNarrowOopStar from) const {
@@ -365,18 +365,15 @@
// Routines for managing the list of code roots that point into
// the heap region that owns this RSet.
void add_strong_code_root(nmethod* nm);
+ void add_strong_code_root_locked(nmethod* nm);
void remove_strong_code_root(nmethod* nm);
- // During a collection, migrate the successfully evacuated strong
- // code roots that referenced into the region that owns this RSet
- // to the RSets of the new regions that they now point into.
- // Unsuccessfully evacuated code roots are not migrated.
- void migrate_strong_code_roots();
-
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list
void strong_code_roots_do(CodeBlobClosure* blk) const;
+ void clean_strong_code_roots(HeapRegion* hr);
+
// Returns the number of elements in the strong code roots list
size_t strong_code_roots_list_length() const {
return _code_roots.length();
--- a/hotspot/src/share/vm/memory/freeList.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/memory/freeList.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -34,7 +34,6 @@
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#endif // INCLUDE_ALL_GCS
// Free list. A FreeList is used to access a linked list of chunks
@@ -333,5 +332,4 @@
template class FreeList<Metachunk>;
#if INCLUDE_ALL_GCS
template class FreeList<FreeChunk>;
-template class FreeList<G1CodeRootChunk>;
#endif // INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/utilities/hashtable.cpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/utilities/hashtable.cpp Thu Sep 04 12:25:05 2014 -0700
@@ -37,21 +37,22 @@
#include "utilities/numberSeq.hpp"
-// This is a generic hashtable, designed to be used for the symbol
-// and string tables.
-//
-// It is implemented as an open hash table with a fixed number of buckets.
-//
-// %note:
-// - HashtableEntrys are allocated in blocks to reduce the space overhead.
+// This hashtable is implemented as an open hash table with a fixed number of buckets.
-template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
- BasicHashtableEntry<F>* entry;
-
- if (_free_list) {
+template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
+ BasicHashtableEntry<F>* entry = NULL;
+ if (_free_list != NULL) {
entry = _free_list;
_free_list = _free_list->next();
- } else {
+ }
+ return entry;
+}
+
+// HashtableEntrys are allocated in blocks to reduce the space overhead.
+template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
+ BasicHashtableEntry<F>* entry = new_entry_free_list();
+
+ if (entry == NULL) {
if (_first_free_entry + _entry_size >= _end_block) {
int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
int len = _entry_size * block_size;
@@ -84,9 +85,9 @@
// This is somewhat an arbitrary heuristic but if one bucket gets to
// rehash_count which is currently 100, there's probably something wrong.
-template <MEMFLAGS F> bool BasicHashtable<F>::check_rehash_table(int count) {
- assert(table_size() != 0, "underflow");
- if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) {
+template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
+ assert(this->table_size() != 0, "underflow");
+ if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
// Set a flag for the next safepoint, which should be at some guaranteed
// safepoint interval.
return true;
@@ -94,13 +95,13 @@
return false;
}
-template <class T, MEMFLAGS F> juint Hashtable<T, F>::_seed = 0;
+template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
// Create a new table and using alternate hash code, populate the new table
// with the existing elements. This can be used to change the hash code
// and could in the future change the size of the table.
-template <class T, MEMFLAGS F> void Hashtable<T, F>::move_to(Hashtable<T, F>* new_table) {
+template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
// Initialize the global seed for hashing.
_seed = AltHashing::compute_seed();
@@ -110,7 +111,7 @@
// Iterate through the table and create a new entry for the new table
for (int i = 0; i < new_table->table_size(); ++i) {
- for (HashtableEntry<T, F>* p = bucket(i); p != NULL; ) {
+ for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
HashtableEntry<T, F>* next = p->next();
T string = p->literal();
// Use alternate hashing algorithm on the symbol in the first table
@@ -239,11 +240,11 @@
}
}
-template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(Symbol *symbol) {
+template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
return symbol->size() * HeapWordSize;
}
-template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(oop oop) {
+template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
// NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
// and the String.value array is shared by several Strings. However, starting from JDK8,
// the String.value array is not shared anymore.
@@ -256,12 +257,12 @@
// Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
// add a new function Hashtable<T, F>::literal_size(MyNewType lit)
-template <class T, MEMFLAGS F> void Hashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
+template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
NumberSeq summary;
int literal_bytes = 0;
for (int i = 0; i < this->table_size(); ++i) {
int count = 0;
- for (HashtableEntry<T, F>* e = bucket(i);
+ for (HashtableEntry<T, F>* e = this->bucket(i);
e != NULL; e = e->next()) {
count++;
literal_bytes += literal_size(e->literal());
@@ -271,7 +272,7 @@
double num_buckets = summary.num();
double num_entries = summary.sum();
- int bucket_bytes = (int)num_buckets * sizeof(bucket(0));
+ int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>);
int total_bytes = literal_bytes + bucket_bytes + entry_bytes;
@@ -354,12 +355,20 @@
// Explicitly instantiate these types
+#if INCLUDE_ALL_GCS
+template class Hashtable<nmethod*, mtGC>;
+template class HashtableEntry<nmethod*, mtGC>;
+template class BasicHashtable<mtGC>;
+#endif
template class Hashtable<ConstantPool*, mtClass>;
+template class RehashableHashtable<Symbol*, mtSymbol>;
+template class RehashableHashtable<oopDesc*, mtSymbol>;
template class Hashtable<Symbol*, mtSymbol>;
template class Hashtable<Klass*, mtClass>;
template class Hashtable<oop, mtClass>;
#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
template class Hashtable<oop, mtSymbol>;
+template class RehashableHashtable<oop, mtSymbol>;
#endif // SOLARIS || CHECK_UNHANDLED_OOPS
template class Hashtable<oopDesc*, mtSymbol>;
template class Hashtable<Symbol*, mtClass>;
--- a/hotspot/src/share/vm/utilities/hashtable.hpp Tue Sep 02 09:51:24 2014 -0700
+++ b/hotspot/src/share/vm/utilities/hashtable.hpp Thu Sep 04 12:25:05 2014 -0700
@@ -178,11 +178,6 @@
void verify_lookup_length(double load);
#endif
- enum {
- rehash_count = 100,
- rehash_multiple = 60
- };
-
void initialize(int table_size, int entry_size, int number_of_entries);
// Accessor
@@ -194,12 +189,12 @@
// The following method is not MT-safe and must be done under lock.
BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
+ // Attempt to get an entry from the free list
+ BasicHashtableEntry<F>* new_entry_free_list();
+
// Table entry management
BasicHashtableEntry<F>* new_entry(unsigned int hashValue);
- // Check that the table is unbalanced
- bool check_rehash_table(int count);
-
// Used when moving the entry to another table
// Clean up links, but do not add to free_list
void unlink_entry(BasicHashtableEntry<F>* entry) {
@@ -277,8 +272,30 @@
return (HashtableEntry<T, F>**)BasicHashtable<F>::bucket_addr(i);
}
+};
+
+template <class T, MEMFLAGS F> class RehashableHashtable : public Hashtable<T, F> {
+ protected:
+
+ enum {
+ rehash_count = 100,
+ rehash_multiple = 60
+ };
+
+ // Check that the table is unbalanced
+ bool check_rehash_table(int count);
+
+ public:
+ RehashableHashtable(int table_size, int entry_size)
+ : Hashtable<T, F>(table_size, entry_size) { }
+
+ RehashableHashtable(int table_size, int entry_size,
+ HashtableBucket<F>* buckets, int number_of_entries)
+ : Hashtable<T, F>(table_size, entry_size, buckets, number_of_entries) { }
+
+
// Function to move these elements into the new table.
- void move_to(Hashtable<T, F>* new_table);
+ void move_to(RehashableHashtable<T, F>* new_table);
static bool use_alternate_hashcode() { return _seed != 0; }
static juint seed() { return _seed; }
@@ -292,7 +309,6 @@
static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;}
static int literal_size(Klass *k) {Unimplemented(); return 0;}
-public:
void dump_table(outputStream* st, const char *table_name);
private: