--- a/src/hotspot/share/gc/z/zNMethodTable.cpp Tue Feb 19 13:47:45 2019 +0100
+++ b/src/hotspot/share/gc/z/zNMethodTable.cpp Wed Feb 20 10:46:39 2019 +0100
@@ -153,7 +153,6 @@
ZNMethodTableEntry* ZNMethodTable::_table = NULL;
size_t ZNMethodTable::_size = 0;
-ZLock ZNMethodTable::_iter_lock;
ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
size_t ZNMethodTable::_iter_table_size = 0;
ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
@@ -162,11 +161,12 @@
volatile size_t ZNMethodTable::_claimed = 0;
void ZNMethodTable::safe_delete(void* data) {
+ assert(CodeCache_lock->owned_by_self(), "Lock must be held");
+
if (data == NULL) {
return;
}
- ZLocker<ZLock> locker(&_iter_lock);
if (_iter_table != NULL) {
// Iteration in progress, defer delete
_iter_deferred_deletes.add(data);
@@ -290,7 +290,8 @@
}
void ZNMethodTable::rebuild(size_t new_size) {
- ZLocker<ZLock> locker(&_iter_lock);
+ assert(CodeCache_lock->owned_by_self(), "Lock must be held");
+
assert(is_power_of_2(new_size), "Invalid size");
log_debug(gc, nmethod)("Rebuilding NMethod Table: "
@@ -475,7 +476,6 @@
void ZNMethodTable::nmethod_entries_do_begin() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- ZLocker<ZLock> locker(&_iter_lock);
// Prepare iteration
_iter_table = _table;
@@ -486,7 +486,6 @@
void ZNMethodTable::nmethod_entries_do_end() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- ZLocker<ZLock> locker(&_iter_lock);
// Finish iteration
if (_iter_table != _table) {
--- a/src/hotspot/share/gc/z/zNMethodTable.hpp Tue Feb 19 13:47:45 2019 +0100
+++ b/src/hotspot/share/gc/z/zNMethodTable.hpp Wed Feb 20 10:46:39 2019 +0100
@@ -41,7 +41,6 @@
private:
static ZNMethodTableEntry* _table;
static size_t _size;
- static ZLock _iter_lock;
static ZNMethodTableEntry* _iter_table;
static size_t _iter_table_size;
static ZArray<void*> _iter_deferred_deletes;