25946
|
1 |
/*
|
|
2 |
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|
|
26 |
#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|
|
27 |
|
|
28 |
#if INCLUDE_NMT
|
|
29 |
|
|
30 |
#include "memory/allocation.hpp"
|
|
31 |
#include "runtime/atomic.hpp"
|
|
32 |
#include "services/nmtCommon.hpp"
|
|
33 |
#include "utilities/nativeCallStack.hpp"
|
|
34 |
|
|
35 |
/*
|
|
36 |
* This counter class counts memory allocation and deallocation,
|
|
37 |
* records total memory allocation size and number of allocations.
|
|
38 |
* The counters are updated atomically.
|
|
39 |
*/
|
|
40 |
class MemoryCounter VALUE_OBJ_CLASS_SPEC {
|
|
41 |
private:
|
|
42 |
size_t _count;
|
|
43 |
size_t _size;
|
|
44 |
|
|
45 |
DEBUG_ONLY(size_t _peak_count;)
|
|
46 |
DEBUG_ONLY(size_t _peak_size; )
|
|
47 |
|
|
48 |
public:
|
|
49 |
MemoryCounter() : _count(0), _size(0) {
|
|
50 |
DEBUG_ONLY(_peak_count = 0;)
|
|
51 |
DEBUG_ONLY(_peak_size = 0;)
|
|
52 |
}
|
|
53 |
|
|
54 |
inline void allocate(size_t sz) {
|
|
55 |
Atomic::add(1, (volatile MemoryCounterType*)&_count);
|
|
56 |
if (sz > 0) {
|
|
57 |
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
|
58 |
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
|
|
59 |
}
|
|
60 |
DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
|
|
61 |
}
|
|
62 |
|
|
63 |
inline void deallocate(size_t sz) {
|
|
64 |
assert(_count > 0, "Negative counter");
|
|
65 |
assert(_size >= sz, "Negative size");
|
|
66 |
Atomic::add(-1, (volatile MemoryCounterType*)&_count);
|
|
67 |
if (sz > 0) {
|
|
68 |
Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
|
69 |
}
|
|
70 |
}
|
|
71 |
|
|
72 |
inline void resize(long sz) {
|
|
73 |
if (sz != 0) {
|
|
74 |
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
|
|
75 |
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
|
|
76 |
}
|
|
77 |
}
|
|
78 |
|
|
79 |
inline size_t count() const { return _count; }
|
|
80 |
inline size_t size() const { return _size; }
|
|
81 |
DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
|
|
82 |
DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
|
|
83 |
|
|
84 |
};
|
|
85 |
|
|
86 |
/*
|
|
87 |
* Malloc memory used by a particular subsystem.
|
|
88 |
* It includes the memory acquired through os::malloc()
|
|
89 |
* call and arena's backing memory.
|
|
90 |
*/
|
|
91 |
class MallocMemory VALUE_OBJ_CLASS_SPEC {
|
|
92 |
private:
|
|
93 |
MemoryCounter _malloc;
|
|
94 |
MemoryCounter _arena;
|
|
95 |
|
|
96 |
public:
|
|
97 |
MallocMemory() { }
|
|
98 |
|
|
99 |
inline void record_malloc(size_t sz) {
|
|
100 |
_malloc.allocate(sz);
|
|
101 |
}
|
|
102 |
|
|
103 |
inline void record_free(size_t sz) {
|
|
104 |
_malloc.deallocate(sz);
|
|
105 |
}
|
|
106 |
|
|
107 |
inline void record_new_arena() {
|
|
108 |
_arena.allocate(0);
|
|
109 |
}
|
|
110 |
|
|
111 |
inline void record_arena_free() {
|
|
112 |
_arena.deallocate(0);
|
|
113 |
}
|
|
114 |
|
|
115 |
inline void record_arena_size_change(long sz) {
|
|
116 |
_arena.resize(sz);
|
|
117 |
}
|
|
118 |
|
|
119 |
inline size_t malloc_size() const { return _malloc.size(); }
|
|
120 |
inline size_t malloc_count() const { return _malloc.count();}
|
|
121 |
inline size_t arena_size() const { return _arena.size(); }
|
|
122 |
inline size_t arena_count() const { return _arena.count(); }
|
|
123 |
|
|
124 |
DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
|
|
125 |
DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
|
|
126 |
};
|
|
127 |
|
|
128 |
class MallocMemorySummary;
|
|
129 |
|
|
130 |
// A snapshot of malloc'd memory, includes malloc memory
|
|
131 |
// usage by types and memory used by tracking itself.
|
|
132 |
class MallocMemorySnapshot : public ResourceObj {
|
|
133 |
friend class MallocMemorySummary;
|
|
134 |
|
|
135 |
private:
|
|
136 |
MallocMemory _malloc[mt_number_of_types];
|
|
137 |
MemoryCounter _tracking_header;
|
|
138 |
|
|
139 |
|
|
140 |
public:
|
|
141 |
inline MallocMemory* by_type(MEMFLAGS flags) {
|
|
142 |
int index = NMTUtil::flag_to_index(flags);
|
|
143 |
return &_malloc[index];
|
|
144 |
}
|
|
145 |
|
|
146 |
inline MallocMemory* by_index(int index) {
|
|
147 |
assert(index >= 0, "Index out of bound");
|
|
148 |
assert(index < mt_number_of_types, "Index out of bound");
|
|
149 |
return &_malloc[index];
|
|
150 |
}
|
|
151 |
|
|
152 |
inline MemoryCounter* malloc_overhead() {
|
|
153 |
return &_tracking_header;
|
|
154 |
}
|
|
155 |
|
|
156 |
// Total malloc'd memory amount
|
|
157 |
size_t total() const;
|
|
158 |
// Total malloc'd memory used by arenas
|
|
159 |
size_t total_arena() const;
|
|
160 |
|
26288
|
161 |
inline size_t thread_count() const {
|
|
162 |
MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
|
|
163 |
return s->by_type(mtThreadStack)->malloc_count();
|
25946
|
164 |
}
|
|
165 |
|
|
166 |
void copy_to(MallocMemorySnapshot* s) {
|
|
167 |
s->_tracking_header = _tracking_header;
|
|
168 |
for (int index = 0; index < mt_number_of_types; index ++) {
|
|
169 |
s->_malloc[index] = _malloc[index];
|
|
170 |
}
|
|
171 |
}
|
|
172 |
|
|
173 |
// Make adjustment by subtracting chunks used by arenas
|
|
174 |
// from total chunks to get total free chunk size
|
|
175 |
void make_adjustment();
|
|
176 |
};
|
|
177 |
|
|
178 |
/*
|
|
179 |
* This class is for collecting malloc statistics at summary level
|
|
180 |
*/
|
|
181 |
class MallocMemorySummary : AllStatic {
|
|
182 |
private:
|
|
183 |
// Reserve memory for placement of MallocMemorySnapshot object
|
|
184 |
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
|
|
185 |
|
|
186 |
public:
|
|
187 |
static void initialize();
|
|
188 |
|
|
189 |
static inline void record_malloc(size_t size, MEMFLAGS flag) {
|
|
190 |
as_snapshot()->by_type(flag)->record_malloc(size);
|
|
191 |
}
|
|
192 |
|
|
193 |
static inline void record_free(size_t size, MEMFLAGS flag) {
|
|
194 |
as_snapshot()->by_type(flag)->record_free(size);
|
|
195 |
}
|
|
196 |
|
|
197 |
static inline void record_new_arena(MEMFLAGS flag) {
|
|
198 |
as_snapshot()->by_type(flag)->record_new_arena();
|
|
199 |
}
|
|
200 |
|
|
201 |
static inline void record_arena_free(MEMFLAGS flag) {
|
|
202 |
as_snapshot()->by_type(flag)->record_arena_free();
|
|
203 |
}
|
|
204 |
|
|
205 |
static inline void record_arena_size_change(long size, MEMFLAGS flag) {
|
|
206 |
as_snapshot()->by_type(flag)->record_arena_size_change(size);
|
|
207 |
}
|
|
208 |
|
|
209 |
static void snapshot(MallocMemorySnapshot* s) {
|
|
210 |
as_snapshot()->copy_to(s);
|
|
211 |
s->make_adjustment();
|
|
212 |
}
|
|
213 |
|
|
214 |
// Record memory used by malloc tracking header
|
|
215 |
static inline void record_new_malloc_header(size_t sz) {
|
|
216 |
as_snapshot()->malloc_overhead()->allocate(sz);
|
|
217 |
}
|
|
218 |
|
|
219 |
static inline void record_free_malloc_header(size_t sz) {
|
|
220 |
as_snapshot()->malloc_overhead()->deallocate(sz);
|
|
221 |
}
|
|
222 |
|
|
223 |
// The memory used by malloc tracking headers
|
|
224 |
static inline size_t tracking_overhead() {
|
|
225 |
return as_snapshot()->malloc_overhead()->size();
|
|
226 |
}
|
|
227 |
|
|
228 |
static MallocMemorySnapshot* as_snapshot() {
|
|
229 |
return (MallocMemorySnapshot*)_snapshot;
|
|
230 |
}
|
|
231 |
};
|
|
232 |
|
|
233 |
|
|
234 |
/*
|
|
235 |
* Malloc tracking header.
|
|
236 |
* To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
|
|
237 |
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
|
|
238 |
*/
|
|
239 |
|
|
240 |
class MallocHeader VALUE_OBJ_CLASS_SPEC {
|
|
241 |
#ifdef _LP64
|
|
242 |
size_t _size : 62;
|
|
243 |
size_t _level : 2;
|
|
244 |
size_t _flags : 8;
|
|
245 |
size_t _pos_idx : 16;
|
|
246 |
size_t _bucket_idx: 40;
|
|
247 |
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
|
|
248 |
#define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
|
|
249 |
#define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
|
|
250 |
#else
|
|
251 |
size_t _size : 30;
|
|
252 |
size_t _level : 2;
|
|
253 |
size_t _flags : 8;
|
|
254 |
size_t _pos_idx : 8;
|
|
255 |
size_t _bucket_idx: 16;
|
|
256 |
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
|
|
257 |
#define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
|
|
258 |
// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
|
|
259 |
#define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
|
|
260 |
#endif // _LP64
|
|
261 |
|
|
262 |
public:
|
|
263 |
// Summary tracking header
|
|
264 |
MallocHeader(size_t size, MEMFLAGS flags) {
|
|
265 |
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
266 |
"Wrong header size");
|
|
267 |
|
|
268 |
_level = NMT_summary;
|
|
269 |
_flags = flags;
|
|
270 |
set_size(size);
|
|
271 |
MallocMemorySummary::record_malloc(size, flags);
|
|
272 |
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
|
273 |
}
|
|
274 |
// Detail tracking header
|
|
275 |
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
|
|
276 |
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
277 |
"Wrong header size");
|
|
278 |
|
|
279 |
_level = NMT_detail;
|
|
280 |
_flags = flags;
|
|
281 |
set_size(size);
|
|
282 |
size_t bucket_idx;
|
|
283 |
size_t pos_idx;
|
|
284 |
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
|
|
285 |
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
|
|
286 |
assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
|
|
287 |
_bucket_idx = bucket_idx;
|
|
288 |
_pos_idx = pos_idx;
|
|
289 |
}
|
|
290 |
MallocMemorySummary::record_malloc(size, flags);
|
|
291 |
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
|
|
292 |
}
|
|
293 |
// Minimal tracking header
|
|
294 |
MallocHeader() {
|
|
295 |
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
|
|
296 |
"Wrong header size");
|
|
297 |
|
|
298 |
_level = (unsigned short)NMT_minimal;
|
|
299 |
}
|
|
300 |
|
|
301 |
inline NMT_TrackingLevel tracking_level() const {
|
|
302 |
return (NMT_TrackingLevel)_level;
|
|
303 |
}
|
|
304 |
|
|
305 |
inline size_t size() const { return _size; }
|
|
306 |
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
|
|
307 |
bool get_stack(NativeCallStack& stack) const;
|
|
308 |
|
|
309 |
// Cleanup tracking information before the memory is released.
|
|
310 |
void release() const;
|
|
311 |
|
|
312 |
private:
|
|
313 |
inline void set_size(size_t size) {
|
|
314 |
assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
|
|
315 |
_size = size;
|
|
316 |
}
|
|
317 |
bool record_malloc_site(const NativeCallStack& stack, size_t size,
|
|
318 |
size_t* bucket_idx, size_t* pos_idx) const;
|
|
319 |
};
|
|
320 |
|
|
321 |
|
|
322 |
// Main class called from MemTracker to track malloc activities
|
|
323 |
class MallocTracker : AllStatic {
|
|
324 |
public:
|
|
325 |
// Initialize malloc tracker for specific tracking level
|
|
326 |
static bool initialize(NMT_TrackingLevel level);
|
|
327 |
|
|
328 |
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
|
|
329 |
|
|
330 |
// malloc tracking header size for specific tracking level
|
|
331 |
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
|
|
332 |
return (level == NMT_off) ? 0 : sizeof(MallocHeader);
|
|
333 |
}
|
|
334 |
|
|
335 |
// Parameter name convention:
|
|
336 |
// memblock : the beginning address for user data
|
|
337 |
// malloc_base: the beginning address that includes malloc tracking header
|
|
338 |
//
|
|
339 |
// The relationship:
|
|
340 |
// memblock = (char*)malloc_base + sizeof(nmt header)
|
|
341 |
//
|
|
342 |
|
|
343 |
// Record malloc on specified memory block
|
|
344 |
static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
|
|
345 |
const NativeCallStack& stack, NMT_TrackingLevel level);
|
|
346 |
|
|
347 |
// Record free on specified memory block
|
|
348 |
static void* record_free(void* memblock);
|
|
349 |
|
|
350 |
// Get tracking level of specified memory block
|
|
351 |
static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
|
|
352 |
|
|
353 |
|
|
354 |
// Offset memory address to header address
|
|
355 |
static inline void* get_base(void* memblock);
|
|
356 |
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
|
|
357 |
if (memblock == NULL || level == NMT_off) return memblock;
|
|
358 |
return (char*)memblock - malloc_header_size(level);
|
|
359 |
}
|
|
360 |
|
|
361 |
// Get memory size
|
|
362 |
static inline size_t get_size(void* memblock) {
|
|
363 |
MallocHeader* header = malloc_header(memblock);
|
|
364 |
assert(header->tracking_level() >= NMT_summary,
|
|
365 |
"Wrong tracking level");
|
|
366 |
return header->size();
|
|
367 |
}
|
|
368 |
|
|
369 |
// Get memory type
|
|
370 |
static inline MEMFLAGS get_flags(void* memblock) {
|
|
371 |
MallocHeader* header = malloc_header(memblock);
|
|
372 |
assert(header->tracking_level() >= NMT_summary,
|
|
373 |
"Wrong tracking level");
|
|
374 |
return header->flags();
|
|
375 |
}
|
|
376 |
|
|
377 |
// Get header size
|
|
378 |
static inline size_t get_header_size(void* memblock) {
|
|
379 |
return (memblock == NULL) ? 0 : sizeof(MallocHeader);
|
|
380 |
}
|
|
381 |
|
|
382 |
static inline void record_new_arena(MEMFLAGS flags) {
|
|
383 |
MallocMemorySummary::record_new_arena(flags);
|
|
384 |
}
|
|
385 |
|
|
386 |
static inline void record_arena_free(MEMFLAGS flags) {
|
|
387 |
MallocMemorySummary::record_arena_free(flags);
|
|
388 |
}
|
|
389 |
|
|
390 |
static inline void record_arena_size_change(int size, MEMFLAGS flags) {
|
|
391 |
MallocMemorySummary::record_arena_size_change(size, flags);
|
|
392 |
}
|
|
393 |
private:
|
|
394 |
static inline MallocHeader* malloc_header(void *memblock) {
|
|
395 |
assert(memblock != NULL, "NULL pointer");
|
|
396 |
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
|
|
397 |
assert(header->tracking_level() >= NMT_minimal, "Bad header");
|
|
398 |
return header;
|
|
399 |
}
|
|
400 |
};
|
|
401 |
|
|
402 |
#endif // INCLUDE_NMT
|
|
403 |
|
|
404 |
|
|
405 |
#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
|