8206183: Possible construct EMPTY_STACK and allocation stack, etc. on first use
Summary: Uses "construct on First Use Idiom" pattern to workaround static initialization order
Reviewed-by: dholmes, minqi
--- a/src/hotspot/share/services/mallocSiteTable.cpp Wed Jul 11 08:24:39 2018 -0700
+++ b/src/hotspot/share/services/mallocSiteTable.cpp Wed Jul 11 13:28:07 2018 -0400
@@ -28,34 +28,10 @@
#include "runtime/atomic.hpp"
#include "services/mallocSiteTable.hpp"
-/*
- * Early os::malloc() calls come from initializations of static variables, long before entering any
- * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
- * initialized, along with the allocation site for the hashtable entries.
- * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
- * call, the hashtable bucket array and hashtable entry allocation site have to be static.
- * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
- * allocates a block memory and zero the memory for it.
- * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
- * memory for it, but also calls its constructor at some later time. If we initialize the allocation site
- * at the first os::malloc() call, the object will be reinitialized when its constructor is called
- * by C runtime.
- * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
- * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
- * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
- * which is exactly what we want.
- * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
- *
- * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
- * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
- */
-
-// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
-size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
-size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
-
// Malloc site hashtable buckets
MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
+const NativeCallStack* MallocSiteTable::_hash_entry_allocation_stack = NULL;
+const MallocSiteHashtableEntry* MallocSiteTable::_hash_entry_allocation_site = NULL;
// concurrent access counter
volatile int MallocSiteTable::_access_count = 0;
@@ -73,9 +49,6 @@
* time, it is in single-threaded mode from JVM perspective.
*/
bool MallocSiteTable::initialize() {
- assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
- assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
- "Sanity Check");
assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
// Fake the call stack for hashtable entry allocation
@@ -91,17 +64,19 @@
}
pc[0] = (address)MallocSiteTable::new_entry;
- // Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
- NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
- NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
+ static const NativeCallStack stack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
+ static const MallocSiteHashtableEntry entry(stack, mtNMT);
- // Instantiate hash entry for hashtable entry allocation callsite
- MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
- MallocSiteHashtableEntry(*stack, mtNMT);
+ assert(_hash_entry_allocation_stack == NULL &&
+ _hash_entry_allocation_site == NULL,
+ "Already initailized");
+
+ _hash_entry_allocation_stack = &stack;
+ _hash_entry_allocation_site = &entry;
// Add the allocation site to hashtable.
- int index = hash_to_index(stack->hash());
- _table[index] = entry;
+ int index = hash_to_index(stack.hash());
+ _table[index] = const_cast<MallocSiteHashtableEntry*>(&entry);
return true;
}
@@ -204,6 +179,9 @@
_table[index] = NULL;
delete_linked_list(head);
}
+
+ _hash_entry_allocation_stack = NULL;
+ _hash_entry_allocation_site = NULL;
}
void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
@@ -211,7 +189,7 @@
while (head != NULL) {
p = head;
head = (MallocSiteHashtableEntry*)head->next();
- if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
+ if (p != hash_entry_allocation_site()) {
delete p;
}
}
--- a/src/hotspot/share/services/mallocSiteTable.hpp Wed Jul 11 08:24:39 2018 -0700
+++ b/src/hotspot/share/services/mallocSiteTable.hpp Wed Jul 11 13:28:07 2018 -0400
@@ -247,7 +247,13 @@
}
static inline const NativeCallStack* hash_entry_allocation_stack() {
- return (NativeCallStack*)_hash_entry_allocation_stack;
+ assert(_hash_entry_allocation_stack != NULL, "Must be set");
+ return _hash_entry_allocation_stack;
+ }
+
+ static inline const MallocSiteHashtableEntry* hash_entry_allocation_site() {
+ assert(_hash_entry_allocation_site != NULL, "Must be set");
+ return _hash_entry_allocation_site;
}
private:
@@ -256,15 +262,11 @@
// The callsite hashtable. It has to be a static table,
// since malloc call can come from C runtime linker.
- static MallocSiteHashtableEntry* _table[table_size];
+ static MallocSiteHashtableEntry* _table[table_size];
+ static const NativeCallStack* _hash_entry_allocation_stack;
+ static const MallocSiteHashtableEntry* _hash_entry_allocation_site;
- // Reserve enough memory for placing the objects
-
- // The memory for hashtable entry allocation stack object
- static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
- // The memory for hashtable entry allocation callsite object
- static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
NOT_PRODUCT(static int _peak_count;)
};
--- a/src/hotspot/share/services/memTracker.cpp Wed Jul 11 08:24:39 2018 -0700
+++ b/src/hotspot/share/services/memTracker.cpp Wed Jul 11 13:28:07 2018 -0400
@@ -68,10 +68,6 @@
os::unsetenv(buf);
}
- // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice,
- // but it is benign, the results are the same.
- ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false);
-
if (!MallocTracker::initialize(level) ||
!VirtualMemoryTracker::initialize(level)) {
level = NMT_off;
--- a/src/hotspot/share/utilities/nativeCallStack.cpp Wed Jul 11 08:24:39 2018 -0700
+++ b/src/hotspot/share/utilities/nativeCallStack.cpp Wed Jul 11 13:28:07 2018 -0400
@@ -28,8 +28,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/nativeCallStack.hpp"
-NativeCallStack NativeCallStack::EMPTY_STACK(0, false);
-
NativeCallStack::NativeCallStack(int toSkip, bool fillStack) :
_hash_value(0) {
--- a/src/hotspot/share/utilities/nativeCallStack.hpp Wed Jul 11 08:24:39 2018 -0700
+++ b/src/hotspot/share/utilities/nativeCallStack.hpp Wed Jul 11 13:28:07 2018 -0400
@@ -54,18 +54,16 @@
class MemTracker;
class NativeCallStack : public StackObj {
- friend class MemTracker;
-
private:
address _stack[NMT_TrackingStackDepth];
unsigned int _hash_value;
- static NativeCallStack EMPTY_STACK;
public:
NativeCallStack(int toSkip = 0, bool fillStack = false);
NativeCallStack(address* pc, int frameCount);
static inline const NativeCallStack& empty_stack() {
+ static const NativeCallStack EMPTY_STACK(0, false);
return EMPTY_STACK;
}