8005048: NMT: #loaded classes needs to just show the # defined classes
Summary: Count number of instance classes so that it matches class metadata size
Reviewed-by: coleenp, acorn
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Tue Jan 08 14:04:25 2013 -0500
@@ -160,6 +160,8 @@
#endif // ndef DTRACE_ENABLED
+volatile int InstanceKlass::_total_instanceKlass_count = 0;
+
Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data,
int vtable_len,
int itable_len,
@@ -203,6 +205,7 @@
access_flags, !host_klass.is_null());
}
+ Atomic::inc(&_total_instanceKlass_count);
return ik;
}
@@ -2306,6 +2309,9 @@
if (_array_name != NULL) _array_name->decrement_refcount();
if (_source_file_name != NULL) _source_file_name->decrement_refcount();
if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass);
+
+ assert(_total_instanceKlass_count >= 1, "Sanity check");
+ Atomic::dec(&_total_instanceKlass_count);
}
void InstanceKlass::set_source_file_name(Symbol* n) {
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Tue Jan 08 14:04:25 2013 -0500
@@ -31,6 +31,7 @@
#include "oops/fieldInfo.hpp"
#include "oops/instanceOop.hpp"
#include "oops/klassVtable.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/handles.hpp"
#include "runtime/os.hpp"
#include "utilities/accessFlags.hpp"
@@ -170,6 +171,11 @@
initialization_error // error happened during initialization
};
+ static int number_of_instance_classes() { return _total_instanceKlass_count; }
+
+ private:
+ static volatile int _total_instanceKlass_count;
+
protected:
// Protection domain.
oop _protection_domain;
--- a/hotspot/src/share/vm/services/memBaseline.cpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memBaseline.cpp Tue Jan 08 14:04:25 2013 -0500
@@ -22,7 +22,6 @@
*
*/
#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
#include "memory/allocation.hpp"
#include "services/memBaseline.hpp"
#include "services/memTracker.hpp"
@@ -349,7 +348,7 @@
reset();
_baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
baseline_vm_summary(snapshot._vm_ptrs);
- _number_of_classes = SystemDictionary::number_of_classes();
+ _number_of_classes = snapshot.number_of_classes();
if (!summary_only && MemTracker::track_callsite() && _baselined) {
_baselined = baseline_malloc_details(snapshot._alloc_ptrs) &&
--- a/hotspot/src/share/vm/services/memRecorder.cpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memRecorder.cpp Tue Jan 08 14:04:25 2013 -0500
@@ -84,10 +84,13 @@
}
delete _pointer_records;
}
- if (_next != NULL) {
- delete _next;
+ // delete all linked recorders
+ while (_next != NULL) {
+ MemRecorder* tmp = _next;
+ _next = _next->next();
+ tmp->set_next(NULL);
+ delete tmp;
}
-
Atomic::dec(&_instance_count);
}
--- a/hotspot/src/share/vm/services/memRecorder.hpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memRecorder.hpp Tue Jan 08 14:04:25 2013 -0500
@@ -203,6 +203,7 @@
friend class MemSnapshot;
friend class MemTracker;
friend class MemTrackWorker;
+ friend class GenerationData;
protected:
// the array that holds memory records
--- a/hotspot/src/share/vm/services/memSnapshot.cpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memSnapshot.cpp Tue Jan 08 14:04:25 2013 -0500
@@ -384,6 +384,7 @@
_staging_area.init();
_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
NOT_PRODUCT(_untracked_count = 0;)
+ _number_of_classes = 0;
}
MemSnapshot::~MemSnapshot() {
@@ -479,7 +480,7 @@
// promote data to next generation
-bool MemSnapshot::promote() {
+bool MemSnapshot::promote(int number_of_classes) {
assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
"Just check");
@@ -496,6 +497,7 @@
NOT_PRODUCT(check_malloc_pointers();)
_staging_area.clear();
+ _number_of_classes = number_of_classes;
return promoted;
}
--- a/hotspot/src/share/vm/services/memSnapshot.hpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memSnapshot.hpp Tue Jan 08 14:04:25 2013 -0500
@@ -355,6 +355,9 @@
// the lock to protect this snapshot
Monitor* _lock;
+ // the number of instance classes
+ int _number_of_classes;
+
NOT_PRODUCT(size_t _untracked_count;)
friend class MemBaseline;
@@ -375,8 +378,9 @@
// merge a per-thread memory recorder into staging area
bool merge(MemRecorder* rec);
// promote staged data to snapshot
- bool promote();
+ bool promote(int number_of_classes);
+ int number_of_classes() const { return _number_of_classes; }
void wait(long timeout) {
assert(_lock != NULL, "Just check");
--- a/hotspot/src/share/vm/services/memTrackWorker.cpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memTrackWorker.cpp Tue Jan 08 14:04:25 2013 -0500
@@ -29,6 +29,16 @@
#include "utilities/decoder.hpp"
#include "utilities/vmError.hpp"
+
+void GenerationData::reset() {
+ _number_of_classes = 0;
+ while (_recorder_list != NULL) {
+ MemRecorder* tmp = _recorder_list;
+ _recorder_list = _recorder_list->next();
+ MemTracker::release_thread_recorder(tmp);
+ }
+}
+
MemTrackWorker::MemTrackWorker() {
// create thread uses cgc thread type for now. We should revisit
// the option, or create new thread type.
@@ -39,7 +49,7 @@
if (!has_error()) {
_head = _tail = 0;
for(int index = 0; index < MAX_GENERATIONS; index ++) {
- _gen[index] = NULL;
+ ::new ((void*)&_gen[index]) GenerationData();
}
}
NOT_PRODUCT(_sync_point_count = 0;)
@@ -49,10 +59,7 @@
MemTrackWorker::~MemTrackWorker() {
for (int index = 0; index < MAX_GENERATIONS; index ++) {
- MemRecorder* rc = _gen[index];
- if (rc != NULL) {
- delete rc;
- }
+ _gen[index].reset();
}
}
@@ -90,12 +97,7 @@
{
// take a recorder from earliest generation in buffer
ThreadCritical tc;
- rec = _gen[_head];
- if (rec != NULL) {
- _gen[_head] = rec->next();
- }
- assert(count_recorder(_gen[_head]) <= MemRecorder::_instance_count,
- "infinite loop after dequeue");
+ rec = _gen[_head].next_recorder();
}
if (rec != NULL) {
// merge the recorder into staging area
@@ -109,16 +111,20 @@
// no more recorder to merge, promote staging area
// to snapshot
if (_head != _tail) {
+ long number_of_classes;
{
ThreadCritical tc;
- if (_gen[_head] != NULL || _head == _tail) {
+ if (_gen[_head].has_more_recorder() || _head == _tail) {
continue;
}
+ number_of_classes = _gen[_head].number_of_classes();
+ _gen[_head].reset();
+
// done with this generation, increment _head pointer
_head = (_head + 1) % MAX_GENERATIONS;
}
// promote this generation data to snapshot
- if (!snapshot->promote()) {
+ if (!snapshot->promote(number_of_classes)) {
// failed to promote, means out of memory
MemTracker::shutdown(MemTracker::NMT_out_of_memory);
}
@@ -126,8 +132,8 @@
snapshot->wait(1000);
ThreadCritical tc;
// check if more data arrived
- if (_gen[_head] == NULL) {
- _gen[_head] = MemTracker::get_pending_recorders();
+ if (!_gen[_head].has_more_recorder()) {
+ _gen[_head].add_recorders(MemTracker::get_pending_recorders());
}
}
}
@@ -147,7 +153,7 @@
// 1. add all recorders in pending queue to current generation
// 2. increase generation
-void MemTrackWorker::at_sync_point(MemRecorder* rec) {
+void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
NOT_PRODUCT(_sync_point_count ++;)
assert(count_recorder(rec) <= MemRecorder::_instance_count,
"pending queue has infinite loop");
@@ -155,23 +161,15 @@
bool out_of_generation_buffer = false;
// check shutdown state inside ThreadCritical
if (MemTracker::shutdown_in_progress()) return;
+
+ _gen[_tail].set_number_of_classes(number_of_classes);
// append the recorders to the end of the generation
- if( rec != NULL) {
- MemRecorder* cur_head = _gen[_tail];
- if (cur_head == NULL) {
- _gen[_tail] = rec;
- } else {
- while (cur_head->next() != NULL) {
- cur_head = cur_head->next();
- }
- cur_head->set_next(rec);
- }
- }
- assert(count_recorder(rec) <= MemRecorder::_instance_count,
+ _gen[_tail].add_recorders(rec);
+ assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
"after add to current generation has infinite loop");
// we have collected all recorders for this generation. If there is data,
// we need to increment _tail to start a new generation.
- if (_gen[_tail] != NULL || _head == _tail) {
+ if (_gen[_tail].has_more_recorder() || _head == _tail) {
_tail = (_tail + 1) % MAX_GENERATIONS;
out_of_generation_buffer = (_tail == _head);
}
@@ -194,7 +192,7 @@
int MemTrackWorker::count_pending_recorders() const {
int count = 0;
for (int index = 0; index < MAX_GENERATIONS; index ++) {
- MemRecorder* head = _gen[index];
+ MemRecorder* head = _gen[index].peek();
if (head != NULL) {
count += count_recorder(head);
}
--- a/hotspot/src/share/vm/services/memTrackWorker.hpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memTrackWorker.hpp Tue Jan 08 14:04:25 2013 -0500
@@ -32,17 +32,58 @@
// Maximum MAX_GENERATIONS generation data can be tracked.
#define MAX_GENERATIONS 512
+class GenerationData : public _ValueObj {
+ private:
+ int _number_of_classes;
+ MemRecorder* _recorder_list;
+
+ public:
+ GenerationData(): _number_of_classes(0), _recorder_list(NULL) { }
+
+ inline int number_of_classes() const { return _number_of_classes; }
+ inline void set_number_of_classes(long num) { _number_of_classes = num; }
+
+ inline MemRecorder* next_recorder() {
+ if (_recorder_list == NULL) {
+ return NULL;
+ } else {
+ MemRecorder* tmp = _recorder_list;
+ _recorder_list = _recorder_list->next();
+ return tmp;
+ }
+ }
+
+ inline bool has_more_recorder() const {
+ return (_recorder_list != NULL);
+ }
+
+ // add recorders to this generation
+ void add_recorders(MemRecorder* head) {
+ if (head != NULL) {
+ if (_recorder_list == NULL) {
+ _recorder_list = head;
+ } else {
+ MemRecorder* tmp = _recorder_list;
+ for (; tmp->next() != NULL; tmp = tmp->next());
+ tmp->set_next(head);
+ }
+ }
+ }
+
+ void reset();
+
+ NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; })
+};
class MemTrackWorker : public NamedThread {
private:
- // circular buffer. This buffer contains recorders to be merged into global
+ // circular buffer. This buffer contains generation data to be merged into global
// snaphsot.
- // Each slot holds a linked list of memory recorders, that contains one
- // generation of memory data.
- MemRecorder* _gen[MAX_GENERATIONS];
- int _head, _tail; // head and tail pointers to above circular buffer
+ // Each slot holds a generation
+ GenerationData _gen[MAX_GENERATIONS];
+ int _head, _tail; // head and tail pointers to above circular buffer
- bool _has_error;
+ bool _has_error;
public:
MemTrackWorker();
@@ -56,7 +97,7 @@
inline bool has_error() const { return _has_error; }
// task at synchronization point
- void at_sync_point(MemRecorder* pending_recorders);
+ void at_sync_point(MemRecorder* pending_recorders, int number_of_classes);
// for debugging purpose, they are not thread safe.
NOT_PRODUCT(static int count_recorder(const MemRecorder* head);)
--- a/hotspot/src/share/vm/services/memTracker.cpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memTracker.cpp Tue Jan 08 14:04:25 2013 -0500
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "oops/instanceKlass.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutexLocker.hpp"
@@ -485,7 +486,7 @@
}
// check _worker_thread with lock to avoid racing condition
if (_worker_thread != NULL) {
- _worker_thread->at_sync_point(pending_recorders);
+ _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
}
assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
--- a/hotspot/src/share/vm/services/memTracker.hpp Fri Dec 07 10:55:16 2012 -0800
+++ b/hotspot/src/share/vm/services/memTracker.hpp Tue Jan 08 14:04:25 2013 -0500
@@ -142,6 +142,7 @@
* MemTracker is the 'gate' class to native memory tracking runtime.
*/
class MemTracker : AllStatic {
+ friend class GenerationData;
friend class MemTrackWorker;
friend class MemSnapshot;
friend class SyncThreadRecorderClosure;