25946
|
1 |
/*
|
|
2 |
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
|
|
26 |
#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
|
|
27 |
|
|
28 |
#if INCLUDE_NMT
|
|
29 |
|
|
30 |
#include "memory/allocation.hpp"
|
|
31 |
#include "runtime/atomic.hpp"
|
|
32 |
#include "services/allocationSite.hpp"
|
|
33 |
#include "services/mallocTracker.hpp"
|
|
34 |
#include "services/nmtCommon.hpp"
|
26144
|
35 |
#include "utilities/nativeCallStack.hpp"
|
25946
|
36 |
|
|
37 |
// MallocSite represents a code path that eventually calls
|
|
38 |
// os::malloc() to allocate memory
|
|
39 |
class MallocSite : public AllocationSite<MemoryCounter> {
|
|
40 |
public:
|
|
41 |
MallocSite() :
|
26144
|
42 |
AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK) { }
|
25946
|
43 |
|
|
44 |
MallocSite(const NativeCallStack& stack) :
|
|
45 |
AllocationSite<MemoryCounter>(stack) { }
|
|
46 |
|
|
47 |
void allocate(size_t size) { data()->allocate(size); }
|
|
48 |
void deallocate(size_t size) { data()->deallocate(size); }
|
|
49 |
|
|
50 |
// Memory allocated from this code path
|
|
51 |
size_t size() const { return peek()->size(); }
|
|
52 |
// The number of calls were made
|
|
53 |
size_t count() const { return peek()->count(); }
|
|
54 |
};
|
|
55 |
|
|
56 |
// Malloc site hashtable entry
|
|
57 |
class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
|
|
58 |
private:
|
|
59 |
MallocSite _malloc_site;
|
|
60 |
MallocSiteHashtableEntry* _next;
|
|
61 |
|
|
62 |
public:
|
|
63 |
MallocSiteHashtableEntry() : _next(NULL) { }
|
|
64 |
|
|
65 |
MallocSiteHashtableEntry(NativeCallStack stack):
|
|
66 |
_malloc_site(stack), _next(NULL) { }
|
|
67 |
|
|
68 |
inline const MallocSiteHashtableEntry* next() const {
|
|
69 |
return _next;
|
|
70 |
}
|
|
71 |
|
|
72 |
// Insert an entry atomically.
|
|
73 |
// Return true if the entry is inserted successfully.
|
|
74 |
// The operation can be failed due to contention from other thread.
|
|
75 |
bool atomic_insert(const MallocSiteHashtableEntry* entry) {
|
|
76 |
return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
|
|
77 |
NULL) == NULL);
|
|
78 |
}
|
|
79 |
|
|
80 |
void set_callsite(const MallocSite& site) {
|
|
81 |
_malloc_site = site;
|
|
82 |
}
|
|
83 |
|
|
84 |
inline const MallocSite* peek() const { return &_malloc_site; }
|
|
85 |
inline MallocSite* data() { return &_malloc_site; }
|
|
86 |
|
|
87 |
inline long hash() const { return _malloc_site.hash(); }
|
|
88 |
inline bool equals(const NativeCallStack& stack) const {
|
|
89 |
return _malloc_site.equals(stack);
|
|
90 |
}
|
|
91 |
// Allocation/deallocation on this allocation site
|
|
92 |
inline void allocate(size_t size) { _malloc_site.allocate(size); }
|
|
93 |
inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
|
|
94 |
// Memory counters
|
|
95 |
inline size_t size() const { return _malloc_site.size(); }
|
|
96 |
inline size_t count() const { return _malloc_site.count(); }
|
|
97 |
};
|
|
98 |
|
|
99 |
// The walker walks every entry on MallocSiteTable
|
|
100 |
class MallocSiteWalker : public StackObj {
|
|
101 |
public:
|
|
102 |
virtual bool do_malloc_site(const MallocSite* e) { return false; }
|
|
103 |
};
|
|
104 |
|
|
105 |
/*
|
|
106 |
* Native memory tracking call site table.
|
|
107 |
* The table is only needed when detail tracking is enabled.
|
|
108 |
*/
|
|
109 |
class MallocSiteTable : AllStatic {
|
|
110 |
private:
|
|
111 |
// The number of hash bucket in this hashtable. The number should
|
|
112 |
// be tuned if malloc activities changed significantly.
|
|
113 |
// The statistics data can be obtained via Jcmd
|
|
114 |
// jcmd <pid> VM.native_memory statistics.
|
|
115 |
|
|
116 |
// Currently, (number of buckets / number of entires) ratio is
|
|
117 |
// about 1 / 6
|
|
118 |
enum {
|
|
119 |
table_base_size = 128, // The base size is calculated from statistics to give
|
|
120 |
// table ratio around 1:6
|
|
121 |
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
|
|
122 |
};
|
|
123 |
|
|
124 |
|
|
125 |
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
|
|
126 |
// once exclusive access (exclusiveLock) is requested, all shared accesses are
|
|
127 |
// rejected forever.
|
|
128 |
class AccessLock : public StackObj {
|
|
129 |
enum LockState {
|
|
130 |
NoLock,
|
|
131 |
SharedLock,
|
|
132 |
ExclusiveLock
|
|
133 |
};
|
|
134 |
|
|
135 |
private:
|
|
136 |
// A very large negative number. The only possibility to "overflow"
|
|
137 |
// this number is when there are more than -min_jint threads in
|
|
138 |
// this process, which is not going to happen in foreseeable future.
|
|
139 |
const static int _MAGIC_ = min_jint;
|
|
140 |
|
|
141 |
LockState _lock_state;
|
|
142 |
volatile int* _lock;
|
|
143 |
public:
|
|
144 |
AccessLock(volatile int* lock) :
|
|
145 |
_lock(lock), _lock_state(NoLock) {
|
|
146 |
}
|
|
147 |
|
|
148 |
~AccessLock() {
|
|
149 |
if (_lock_state == SharedLock) {
|
|
150 |
Atomic::dec((volatile jint*)_lock);
|
|
151 |
}
|
|
152 |
}
|
|
153 |
// Acquire shared lock.
|
|
154 |
// Return true if shared access is granted.
|
|
155 |
inline bool sharedLock() {
|
|
156 |
jint res = Atomic::add(1, _lock);
|
|
157 |
if (res < 0) {
|
|
158 |
Atomic::add(-1, _lock);
|
|
159 |
return false;
|
|
160 |
}
|
|
161 |
_lock_state = SharedLock;
|
|
162 |
return true;
|
|
163 |
}
|
|
164 |
// Acquire exclusive lock
|
|
165 |
void exclusiveLock();
|
|
166 |
};
|
|
167 |
|
|
168 |
public:
|
|
169 |
static bool initialize();
|
|
170 |
static void shutdown();
|
|
171 |
|
|
172 |
NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
|
|
173 |
|
|
174 |
// Number of hash buckets
|
|
175 |
static inline int hash_buckets() { return (int)table_size; }
|
|
176 |
|
|
177 |
// Access and copy a call stack from this table. Shared lock should be
|
|
178 |
// acquired before access the entry.
|
|
179 |
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
|
|
180 |
size_t pos_idx) {
|
|
181 |
AccessLock locker(&_access_count);
|
|
182 |
if (locker.sharedLock()) {
|
|
183 |
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
|
|
184 |
MallocSite* site = malloc_site(bucket_idx, pos_idx);
|
|
185 |
if (site != NULL) {
|
|
186 |
stack = *site->call_stack();
|
|
187 |
return true;
|
|
188 |
}
|
|
189 |
}
|
|
190 |
return false;
|
|
191 |
}
|
|
192 |
|
|
193 |
// Record a new allocation from specified call path.
|
|
194 |
// Return true if the allocation is recorded successfully, bucket_idx
|
|
195 |
// and pos_idx are also updated to indicate the entry where the allocation
|
|
196 |
// information was recorded.
|
|
197 |
// Return false only occurs under rare scenarios:
|
|
198 |
// 1. out of memory
|
|
199 |
// 2. overflow hash bucket
|
|
200 |
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
|
|
201 |
size_t* bucket_idx, size_t* pos_idx) {
|
|
202 |
AccessLock locker(&_access_count);
|
|
203 |
if (locker.sharedLock()) {
|
|
204 |
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
|
|
205 |
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
|
|
206 |
if (site != NULL) site->allocate(size);
|
|
207 |
return site != NULL;
|
|
208 |
}
|
|
209 |
return false;
|
|
210 |
}
|
|
211 |
|
|
212 |
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
|
|
213 |
// information was recorded.
|
|
214 |
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
|
|
215 |
AccessLock locker(&_access_count);
|
|
216 |
if (locker.sharedLock()) {
|
|
217 |
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
|
|
218 |
MallocSite* site = malloc_site(bucket_idx, pos_idx);
|
|
219 |
if (site != NULL) {
|
|
220 |
site->deallocate(size);
|
|
221 |
return true;
|
|
222 |
}
|
|
223 |
}
|
|
224 |
return false;
|
|
225 |
}
|
|
226 |
|
|
227 |
// Walk this table.
|
|
228 |
static bool walk_malloc_site(MallocSiteWalker* walker);
|
|
229 |
|
|
230 |
private:
|
|
231 |
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
|
|
232 |
static void reset();
|
|
233 |
|
|
234 |
// Delete a bucket linked list
|
|
235 |
static void delete_linked_list(MallocSiteHashtableEntry* head);
|
|
236 |
|
|
237 |
static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
|
|
238 |
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
|
|
239 |
static bool walk(MallocSiteWalker* walker);
|
|
240 |
|
|
241 |
static inline int hash_to_index(int hash) {
|
|
242 |
hash = (hash > 0) ? hash : (-hash);
|
|
243 |
return (hash % table_size);
|
|
244 |
}
|
|
245 |
|
|
246 |
static inline const NativeCallStack* hash_entry_allocation_stack() {
|
|
247 |
return (NativeCallStack*)_hash_entry_allocation_stack;
|
|
248 |
}
|
|
249 |
|
|
250 |
private:
|
|
251 |
// Counter for counting concurrent access
|
|
252 |
static volatile int _access_count;
|
|
253 |
|
|
254 |
// The callsite hashtable. It has to be a static table,
|
|
255 |
// since malloc call can come from C runtime linker.
|
|
256 |
static MallocSiteHashtableEntry* _table[table_size];
|
|
257 |
|
|
258 |
|
|
259 |
// Reserve enough memory for placing the objects
|
|
260 |
|
|
261 |
// The memory for hashtable entry allocation stack object
|
|
262 |
static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
|
|
263 |
// The memory for hashtable entry allocation callsite object
|
|
264 |
static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
|
|
265 |
NOT_PRODUCT(static int _peak_count;)
|
|
266 |
};
|
|
267 |
|
|
268 |
#endif // INCLUDE_NMT
|
|
269 |
#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
|