Fix various test issues.
--- a/src/hotspot/share/classfile/classLoaderData.cpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.cpp Thu Sep 19 15:21:27 2019 +0200
@@ -962,7 +962,7 @@
// Verify the integrity of the allocated space.
#ifdef ASSERT
if (metaspace_or_null() != NULL) {
- metaspace_or_null()->verify(false);
+ metaspace_or_null()->verify();
}
#endif
--- a/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp Thu Sep 19 15:21:27 2019 +0200
@@ -78,8 +78,12 @@
"class sm");
}
- DEBUG_ONLY(InternalStats::inc_num_metaspace_births();)
-
+#ifdef ASSERT
+ InternalStats::inc_num_metaspace_births();
+ if (_space_type == metaspace::UnsafeAnonymousMetaspaceType) {
+ InternalStats::inc_num_anon_cld_births();
+ }
+#endif
}
ClassLoaderMetaspace::~ClassLoaderMetaspace() {
@@ -88,7 +92,12 @@
delete _non_class_space_manager;
delete _class_space_manager;
- DEBUG_ONLY(InternalStats::inc_num_metaspace_deaths();)
+#ifdef ASSERT
+ InternalStats::inc_num_metaspace_deaths();
+ if (_space_type == metaspace::UnsafeAnonymousMetaspaceType) {
+ InternalStats::inc_num_anon_cld_deaths();
+ }
+#endif
}
@@ -159,13 +168,13 @@
}
#ifdef ASSERT
-void ClassLoaderMetaspace::verify(bool slow) const {
+void ClassLoaderMetaspace::verify() const {
check_valid_spacetype(_space_type);
if (non_class_space_manager() != NULL) {
- non_class_space_manager()->verify(slow);
+ non_class_space_manager()->verify();
}
if (class_space_manager() != NULL) {
- class_space_manager()->verify(slow);
+ class_space_manager()->verify();
}
}
#endif // ASSERT
--- a/src/hotspot/share/memory/metaspace/classLoaderMetaspace.hpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/classLoaderMetaspace.hpp Thu Sep 19 15:21:27 2019 +0200
@@ -76,7 +76,7 @@
// Update statistics. This walks all in-use chunks.
void add_to_statistics(clms_stats_t* out) const;
- DEBUG_ONLY(void verify(bool slow) const;)
+ DEBUG_ONLY(void verify() const;)
// TODO
size_t allocated_blocks_bytes() const { return 0; }
--- a/src/hotspot/share/memory/metaspace/counter.hpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/counter.hpp Thu Sep 19 15:21:27 2019 +0200
@@ -44,19 +44,35 @@
T _c;
+ // Optional name for easier reference
+ const char* const _name;
+
// Only allow unsigned values for now
STATIC_ASSERT(IsSigned<T>::value == false);
public:
- AbstractCounter() : _c(0) {}
+ AbstractCounter(const char* name) : _c(0), _name(name) {}
+ AbstractCounter() : _c(0), _name("") {}
T get() const { return _c; }
- void increment() { assert(_c + 1 > _c, "overflow"); _c ++; }
- void increment_by(T v) { assert(_c + v >= _c, "overflow"); _c += v; }
- void decrement() { assert(_c - 1 < _c, "underflow"); _c --; }
- void decrement_by(T v) { assert(_c - v <= _c, "underflow"); _c -= v; }
+ void increment() { increment_by(1); }
+ void decrement() { decrement_by(1); }
+
+ void increment_by(T v) {
+ assert(_c + v >= _c,
+ "%s overflow (" UINT64_FORMAT "+" UINT64_FORMAT ")",
+ _name, (uint64_t)_c, (uint64_t)v);
+ _c += v;
+ }
+
+ void decrement_by(T v) {
+ assert(_c - v <= _c,
+ "%s underflow (" UINT64_FORMAT "-" UINT64_FORMAT ")",
+ _name, (uint64_t)_c, (uint64_t)v);
+ _c -= v;
+ }
void reset() { _c = 0; }
@@ -78,19 +94,48 @@
volatile T _c;
+ // Optional name for easier reference
+ const char* const _name;
+
// Only allow unsigned values for now
STATIC_ASSERT(IsSigned<T>::value == false);
public:
- AbstractAtomicCounter() : _c(0) {}
+
+ AbstractAtomicCounter(const char* name) : _c(0), _name(name) {}
+ AbstractAtomicCounter() : _c(0), _name("") {}
T get() const { return _c; }
- void increment() { assert(_c + 1 > _c, "overflow"); Atomic::inc(&_c); }
- void increment_by(T v) { assert(_c + v >= _c, "overflow"); Atomic::add(v, &_c); }
- void decrement() { assert(_c - 1 < _c, "underflow"); Atomic::dec(&_c); }
- void decrement_by(T v) { assert(_c - v <= _c, "underflow"); Atomic::sub(v, &_c); }
+ void increment() {
+ assert(_c + 1 != 0,
+ "%s overflow (" UINT64_FORMAT "+1)",
+ _name, (uint64_t)_c);
+ Atomic::inc(&_c);
+ }
+
+ void decrement() {
+ assert(_c >= 1,
+ "%s underflow (" UINT64_FORMAT "-1)",
+ _name, (uint64_t)_c);
+ Atomic::dec(&_c);
+ }
+
+ void increment_by(T v) {
+ T v1 = _c;
+ T v2 = Atomic::add(v, &_c);
+ assert(v2 > v1,
+ "%s overflow (" UINT64_FORMAT "+" UINT64_FORMAT ")",
+ _name, (uint64_t)v1, (uint64_t)v);
+ }
+
+ void decrement_by(T v) {
+ assert(_c >= v,
+ "%s underflow (" UINT64_FORMAT "-" UINT64_FORMAT ")",
+ _name, (uint64_t)_c, (uint64_t)v);
+ Atomic::sub(v, &_c);
+ }
#ifdef ASSERT
void check(T expected) const {
--- a/src/hotspot/share/memory/metaspace/internStat.hpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/internStat.hpp Thu Sep 19 15:21:27 2019 +0200
@@ -66,6 +66,12 @@
/* ... and died. */ \
x(num_metaspace_deaths) \
\
+ /* Number of times a ClassLoaderMetaspace was */ \
+ /* born for an anonymous class... */ \
+ x(num_anon_cld_births) \
+ /* ... and died. */ \
+ x(num_anon_cld_deaths) \
+ \
/* Number of times VirtualSpaceNode were */ \
/* created... */ \
x(num_vsnodes_created) \
--- a/src/hotspot/share/memory/metaspace/metaDebug.hpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metaDebug.hpp Thu Sep 19 15:21:27 2019 +0200
@@ -50,8 +50,26 @@
counter_ = 0; \
#define END_EVERY_NTH } } }
+
+#define SOMETIMES(code) \
+ EVERY_NTH(VerifyMetaspaceInterval) \
+ { code } \
+ END_EVERY_NTH
+
+#define ASSERT_SOMETIMES(condition, ...) \
+ EVERY_NTH(VerifyMetaspaceInterval) \
+ assert( (condition), __VA_ARGS__); \
+ END_EVERY_NTH
+
+#else
+
+#define SOMETIMES(code)
+#define ASSERT_SOMETIMES(condition, ...)
+
#endif // ASSERT
+
+
} // namespace metaspace
#endif // SHARE_MEMORY_METASPACE_METADEBUG_HPP
--- a/src/hotspot/share/memory/metaspace/metachunk.cpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metachunk.cpp Thu Sep 19 15:21:27 2019 +0200
@@ -30,8 +30,10 @@
#include "logging/log.hpp"
#include "memory/metaspace/chunkLevel.hpp"
#include "memory/metaspace/metachunk.hpp"
+#include "memory/metaspace/metaDebug.hpp"
#include "memory/metaspace/metaspaceCommon.hpp"
#include "memory/metaspace/virtualSpaceNode.hpp"
+#include "runtime/mutexLocker.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
@@ -52,6 +54,12 @@
return '?';
}
+#ifdef ASSERT
+void Metachunk::assert_have_expand_lock() {
+ assert_lock_strong(MetaspaceExpand_lock);
+}
+#endif
+
// Commit uncommitted section of the chunk.
// Fails if we hit a commit limit.
bool Metachunk::commit_up_to(size_t new_committed_words) {
@@ -222,6 +230,8 @@
_used_words += request_word_size;
+ SOMETIMES(verify(false);)
+
return p;
}
@@ -275,74 +285,70 @@
}
}
-volatile MetaWord dummy = 0;
-
-void Metachunk::verify(bool slow) const {
-
- assert(!is_dead(), "dead chunk.");
- // Note: only call this on a life Metachunk.
- chklvl::check_valid_level(level());
+// Verifies linking with neighbors in virtual space.
+// Can only be done under expand lock protection.
+void Metachunk::verify_neighborhood() const {
- assert(base() != NULL, "No base ptr");
- assert(committed_words() >= used_words(),
- "mismatch: committed: " SIZE_FORMAT ", used: " SIZE_FORMAT ".",
- committed_words(), used_words());
- assert(word_size() >= committed_words(),
- "mismatch: word_size: " SIZE_FORMAT ", committed: " SIZE_FORMAT ".",
- word_size(), committed_words());
+ assert_lock_strong(MetaspaceExpand_lock);
+ assert(!is_dead(), "Do not call on dead chunks.");
- // Test base pointer
- assert(vsnode() != NULL, "No space");
- vsnode()->check_pointer(base());
- assert(base() != NULL, "Base pointer NULL");
-
- // Neighbors shall be adjacent to us...
- if (prev_in_vs() != NULL) {
- assert(prev_in_vs()->end() == base() &&
- prev_in_vs()->next_in_vs() == this,
- "Chunk " METACHUNK_FORMAT ": broken link to left neighbor: " METACHUNK_FORMAT ".",
- METACHUNK_FORMAT_ARGS(this), METACHUNK_FORMAT_ARGS(prev_in_vs()));
- }
+ if (is_root_chunk()) {
- if (next_in_vs() != NULL) {
- assert(end() == next_in_vs()->base() &&
- next_in_vs()->prev_in_vs() == this,
- "Chunk " METACHUNK_FORMAT ": broken link to right neighbor: " METACHUNK_FORMAT ".",
- METACHUNK_FORMAT_ARGS(this), METACHUNK_FORMAT_ARGS(next_in_vs()));
- }
+ // Root chunks are all alone in the world.
+ assert(next_in_vs() == NULL || prev_in_vs() == NULL, "Root chunks should have no neighbors");
- // Starting address shall be aligned to chunk size.
- const size_t required_alignment = word_size() * sizeof(MetaWord);
- assert_is_aligned(base(), required_alignment);
+ } else {
- if (!is_root_chunk()) {
+ // Non-root chunks have neighbors, at least one, possibly two.
assert(next_in_vs() != NULL || prev_in_vs() != NULL,
"A non-root chunk should have neighbors (chunk @" PTR_FORMAT
", base " PTR_FORMAT ", level " CHKLVL_FORMAT ".",
p2i(this), p2i(base()), level());
- // check buddy. Note: the chunk following us or preceeding us may
- // be our buddy or a splintered part of it.
+ if (prev_in_vs() != NULL) {
+ assert(prev_in_vs()->end() == base(),
+ "Chunk " METACHUNK_FULL_FORMAT ": should be adjacent to predecessor: " METACHUNK_FULL_FORMAT ".",
+ METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(prev_in_vs()));
+ assert(prev_in_vs()->next_in_vs() == this,
+ "Chunk " METACHUNK_FULL_FORMAT ": broken link to left neighbor: " METACHUNK_FULL_FORMAT " (" PTR_FORMAT ").",
+ METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(prev_in_vs()), p2i(prev_in_vs()->next_in_vs()));
+ }
+
+ if (next_in_vs() != NULL) {
+ assert(end() == next_in_vs()->base(),
+ "Chunk " METACHUNK_FULL_FORMAT ": should be adjacent to successor: " METACHUNK_FULL_FORMAT ".",
+ METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(next_in_vs()));
+ assert(next_in_vs()->prev_in_vs() == this,
+ "Chunk " METACHUNK_FULL_FORMAT ": broken link to right neighbor: " METACHUNK_FULL_FORMAT " (" PTR_FORMAT ").",
+ METACHUNK_FULL_FORMAT_ARGS(this), METACHUNK_FULL_FORMAT_ARGS(next_in_vs()), p2i(next_in_vs()->prev_in_vs()));
+ }
+
+ // One of the neighbors must be the buddy. It can be whole or splintered.
+
+ // The chunk following us or preceeding us may be our buddy or a splintered part of it.
Metachunk* buddy = is_leader() ? next_in_vs() : prev_in_vs();
assert(buddy != NULL, "Missing neighbor.");
- assert(!buddy->is_dead(), "buddy dead.");
+ assert(!buddy->is_dead(), "Invalid buddy state.");
// This neighbor is either or buddy (same level) or a splinter of our buddy - hence
- // the level can never be smaller (larger chunk size).
+ // the level can never be smaller (aka the chunk size cannot be larger).
assert(buddy->level() >= level(), "Wrong level.");
+
if (buddy->level() == level()) {
- // we have a direct, unsplintered buddy.
- assert(buddy->is_leader() == !is_leader(), "Only one chunk can be leader in a pair");
+ // If the buddy is of the same size as us, it is unsplintered.
+ assert(buddy->is_leader() == !is_leader(),
+ "Only one chunk can be leader in a pair");
// When direct buddies are neighbors, one or both should be in use, otherwise they should
// have been merged.
- // Since we call verify() from internal functions where we are about to merge or just did split,
- // do not test this.
+ // But since we call this verification function from internal functions where we are about to merge or just did split,
+ // do not test this. We have RootChunkArea::verify_area_is_ideally_merged() for testing that.
+
// assert(buddy->is_in_use() || is_in_use(), "incomplete merging?");
if (is_leader()) {
@@ -352,15 +358,53 @@
assert(buddy->end() == base(), "Sanity");
assert(is_aligned(buddy->base(), word_size() * 2 * BytesPerWord), "Sanity");
}
+
} else {
+
// Buddy, but splintered, and this is a part of it.
if (is_leader()) {
assert(buddy->base() == end(), "Sanity");
} else {
assert(buddy->end() > (base() - word_size()), "Sanity");
}
+
}
}
+}
+
+volatile MetaWord dummy = 0;
+
+void Metachunk::verify(bool slow) const {
+
+ // Note. This should be called under CLD lock protection.
+
+ // We can verify everything except the _prev_in_vs/_next_in_vs pair.
+ // This is because neighbor chunks may be added concurrently, so we cannot rely
+ // on the content of _next_in_vs/_prev_in_vs unless we have the expand lock.
+
+ assert(!is_dead(), "Do not call on dead chunks.");
+
+ // Note: only call this on a life Metachunk.
+ chklvl::check_valid_level(level());
+
+ assert(base() != NULL, "No base ptr");
+
+ assert(committed_words() >= used_words(),
+ "mismatch: committed: " SIZE_FORMAT ", used: " SIZE_FORMAT ".",
+ committed_words(), used_words());
+
+ assert(word_size() >= committed_words(),
+ "mismatch: word_size: " SIZE_FORMAT ", committed: " SIZE_FORMAT ".",
+ word_size(), committed_words());
+
+ // Test base pointer
+ assert(base() != NULL, "Base pointer NULL");
+ assert(vsnode() != NULL, "No space");
+ vsnode()->check_pointer(base());
+
+ // Starting address shall be aligned to chunk size.
+ const size_t required_alignment = word_size() * sizeof(MetaWord);
+ assert_is_aligned(base(), required_alignment);
// If slow, test the committed area
if (slow && _committed_words > 0) {
@@ -399,7 +443,7 @@
return false;
}
-void MetachunkList::verify(bool slow) const {
+void MetachunkList::verify() const {
int num = 0;
const Metachunk* last_c = NULL;
for (const Metachunk* c = first(); c != NULL; c = c->next()) {
@@ -407,9 +451,7 @@
assert(c->prev() == last_c,
"Broken link to predecessor. Chunk " METACHUNK_FULL_FORMAT ".",
METACHUNK_FULL_FORMAT_ARGS(c));
- if (slow) {
- c->verify(false);
- }
+ c->verify(false);
last_c = c;
}
_num.check(num);
@@ -458,7 +500,7 @@
}
// Check each list.
- list_for_level(l)->verify(slow);
+ list_for_level(l)->verify();
num += list_for_level(l)->size();
word_size += list_for_level(l)->size() * chklvl::word_size_for_level(l);
--- a/src/hotspot/share/memory/metaspace/metachunk.hpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/metachunk.hpp Thu Sep 19 15:21:27 2019 +0200
@@ -63,11 +63,12 @@
// Used words.
size_t _used_words;
- // Guaranteed-to-be-committed-words, counted from base
+ // Size of the region, starting from base, which is guaranteed to be committed. In words.
+ // The actual size of committed regions may be larger, but it may be fragmented.
+ //
// (This is a performance optimization. The underlying VirtualSpaceNode knows
// which granules are committed; but we want to avoid asking it unnecessarily
- // in Metachunk::allocate(), so we keep a limit until which we are guaranteed
- // to have committed memory under us.)
+ // in Metachunk::allocate().)
size_t _committed_words;
chklvl_t _level; // aka size.
@@ -99,6 +100,10 @@
// Furthermore, we keep, per chunk, information about the neighboring chunks.
// This is needed to split and merge chunks.
+ //
+ // Note: These members can be modified concurrently while a chunk is alive and in use.
+ // This can happen if a neighboring chunk is added or removed.
+ // This means only read or modify these members under expand lock protection.
Metachunk* _prev_in_vs;
Metachunk* _next_in_vs;
@@ -108,6 +113,8 @@
// Fails if we hit a commit limit.
bool commit_up_to(size_t new_committed_words);
+ DEBUG_ONLY(static void assert_have_expand_lock();)
+
public:
Metachunk()
@@ -136,10 +143,10 @@
DEBUG_ONLY(bool in_list() const { return _prev != NULL || _next != NULL; })
// Physical neighbors wiring
- void set_prev_in_vs(Metachunk* c) { _prev_in_vs = c; }
- Metachunk* prev_in_vs() const { return _prev_in_vs; }
- void set_next_in_vs(Metachunk* c) { _next_in_vs = c; }
- Metachunk* next_in_vs() const { return _next_in_vs; }
+ void set_prev_in_vs(Metachunk* c) { DEBUG_ONLY(assert_have_expand_lock()); _prev_in_vs = c; }
+ Metachunk* prev_in_vs() const { DEBUG_ONLY(assert_have_expand_lock()); return _prev_in_vs; }
+ void set_next_in_vs(Metachunk* c) { DEBUG_ONLY(assert_have_expand_lock()); _next_in_vs = c; }
+ Metachunk* next_in_vs() const { DEBUG_ONLY(assert_have_expand_lock()); return _next_in_vs; }
bool is_free() const { return _state == state_free; }
bool is_in_use() const { return _state == state_in_use; }
@@ -226,6 +233,8 @@
//// Debug stuff ////
#ifdef ASSERT
void verify(bool slow) const;
+ // Verifies linking with neighbors in virtual space. Needs expand lock protection.
+ void verify_neighborhood() const;
void zap_header(uint8_t c = 0x17);
void fill_with_pattern(MetaWord pattern, size_t word_size);
void check_pattern(MetaWord pattern, size_t word_size);
@@ -313,7 +322,7 @@
#ifdef ASSERT
bool contains(const Metachunk* c) const;
- void verify(bool slow) const;
+ void verify() const;
#endif
// Returns size, in words, of committed space of all chunks in this list.
--- a/src/hotspot/share/memory/metaspace/rootChunkArea.cpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/rootChunkArea.cpp Thu Sep 19 15:21:27 2019 +0200
@@ -32,6 +32,7 @@
#include "memory/metaspace/metachunk.hpp"
#include "memory/metaspace/metaspaceCommon.hpp"
#include "memory/metaspace/rootChunkArea.hpp"
+#include "runtime/mutexLocker.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -422,6 +423,8 @@
void RootChunkArea::verify(bool slow) const {
+ assert_lock_strong(MetaspaceExpand_lock);
+
assert_is_aligned(_base, chklvl::MAX_CHUNK_BYTE_SIZE);
// Iterate thru all chunks in this area. They must be ordered correctly,
@@ -433,7 +436,6 @@
assrt_(_first_chunk->prev_in_vs() == NULL, "Sanity");
const Metachunk* c = _first_chunk;
- const Metachunk* c_last = NULL;
const MetaWord* expected_next_base = _base;
const MetaWord* const area_end = _base + word_size();
@@ -450,37 +452,10 @@
assrt_(is_aligned(c->base(), c->word_size()),
"misaligned chunk %d " METACHUNK_FORMAT ".", num_chunk, METACHUNK_FORMAT_ARGS(c));
- const Metachunk* const successor = c->next_in_vs();
- if (successor != NULL) {
- assrt_(successor->prev_in_vs() == c,
- "Chunk No. %d " METACHUNK_FORMAT " - vs link to successor " METACHUNK_FORMAT " broken.", num_chunk,
- METACHUNK_FORMAT_ARGS(c), METACHUNK_FORMAT_ARGS(successor));
- assrt_(c->end() == successor->base(),
- "areas between neighbor chunks do not connect: "
- "this chunk %d " METACHUNK_FORMAT " and successor chunk %d " METACHUNK_FORMAT ".",
- num_chunk, METACHUNK_FORMAT_ARGS(c), num_chunk + 1, METACHUNK_FORMAT_ARGS(successor));
- }
-
- if (c_last != NULL) {
- assrt_(c->prev_in_vs() == c_last,
- "Chunk No. %d " METACHUNK_FORMAT " - vs backlink invalid.", num_chunk, METACHUNK_FORMAT_ARGS(c));
- assrt_(c_last->end() == c->base(),
- "areas between neighbor chunks do not connect: "
- "previous chunk %d " METACHUNK_FORMAT " and this chunk %d " METACHUNK_FORMAT ".",
- num_chunk - 1, METACHUNK_FORMAT_ARGS(c_last), num_chunk, METACHUNK_FORMAT_ARGS(c));
- } else {
- assrt_(c->prev_in_vs() == NULL,
- "unexpected back link: chunk %d " METACHUNK_FORMAT ".",
- num_chunk, METACHUNK_FORMAT_ARGS(c));
- assrt_(c == _first_chunk,
- "should be first: chunk %d " METACHUNK_FORMAT ".",
- num_chunk, METACHUNK_FORMAT_ARGS(c));
- }
-
- c->verify(slow); // <- also checks alignment and level etc
+ c->verify_neighborhood();
+ c->verify(slow);
expected_next_base = c->end();
- c_last = c;
num_chunk ++;
c = c->next_in_vs();
@@ -492,6 +467,9 @@
}
void RootChunkArea::verify_area_is_ideally_merged() const {
+
+ assert_lock_strong(MetaspaceExpand_lock);
+
int num_chunk = 0;
for (const Metachunk* c = _first_chunk; c != NULL; c = c->next_in_vs()) {
if (!c->is_root_chunk() && c->is_free()) {
--- a/src/hotspot/share/memory/metaspace/runningCounters.cpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/runningCounters.cpp Thu Sep 19 15:21:27 2019 +0200
@@ -31,8 +31,8 @@
namespace metaspace {
-SizeAtomicCounter RunningCounters::_used_class_counter;
-SizeAtomicCounter RunningCounters::_used_nonclass_counter;
+SizeAtomicCounter RunningCounters::_used_class_counter("used-words-in-class-space");
+SizeAtomicCounter RunningCounters::_used_nonclass_counter("used-words-in-nonclass-space");
// Return reserved size, in words, for Metaspace
size_t RunningCounters::reserved_words() {
--- a/src/hotspot/share/memory/metaspace/spaceManager.cpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/spaceManager.cpp Thu Sep 19 15:21:27 2019 +0200
@@ -206,6 +206,8 @@
"Chunk retiring did not work (current chunk " METACHUNK_FULL_FORMAT ").",
METACHUNK_FULL_FORMAT_ARGS(current_chunk()));
+ DEBUG_ONLY(verify_locked();)
+
}
// Allocate memory from Metaspace.
@@ -321,6 +323,8 @@
assert(p != NULL || (p == NULL && did_hit_limit), "Sanity");
+ SOMETIMES(verify_locked();)
+
if (p == NULL) {
DEBUG_ONLY(InternalStats::inc_num_allocs_failed_limit();)
} else {
@@ -362,6 +366,8 @@
add_allocation_to_block_freelist(p, raw_word_size);
+ DEBUG_ONLY(verify_locked();)
+
}
// Prematurely returns a metaspace allocation to the _block_freelists because it is not
@@ -395,18 +401,25 @@
out->free_blocks_word_size += block_freelist()->total_size();
}
- DEBUG_ONLY(out->verify();)
+ SOMETIMES(out->verify();)
}
#ifdef ASSERT
-void SpaceManager::verify(bool slow) const {
+void SpaceManager::verify_locked() const {
- MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
+ assert_lock_strong(lock());
assert(_chunk_alloc_sequence != NULL && _chunk_manager != NULL, "Sanity");
- _chunks.verify(true);
+ _chunks.verify();
+
+}
+
+void SpaceManager::verify() const {
+
+ MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
+ verify_locked();
}
--- a/src/hotspot/share/memory/metaspace/spaceManager.hpp Wed Sep 18 07:46:02 2019 +0200
+++ b/src/hotspot/share/memory/metaspace/spaceManager.hpp Thu Sep 19 15:21:27 2019 +0200
@@ -133,8 +133,8 @@
// Update statistics. This walks all in-use chunks.
void add_to_statistics(sm_stats_t* out) const;
- // Run verifications. slow=true: verify chunk-internal integrity too.
- DEBUG_ONLY(void verify(bool slow) const;)
+ DEBUG_ONLY(void verify() const;)
+ DEBUG_ONLY(void verify_locked() const;)
};