author | stefank |
Fri, 13 Feb 2015 14:37:35 +0100 | |
changeset 29081 | c61eb4914428 |
parent 27162 | 0a4a7276949b |
child 38935 | f7427b0e0d7c |
permissions | -rw-r--r-- |
25946 | 1 |
/* |
2 |
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP |
|
26 |
#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP |
|
27 |
||
28 |
#if INCLUDE_NMT |
|
29 |
||
30 |
#include "memory/allocation.hpp" |
|
31 |
#include "services/allocationSite.hpp" |
|
32 |
#include "services/nmtCommon.hpp" |
|
33 |
#include "utilities/linkedlist.hpp" |
|
34 |
#include "utilities/nativeCallStack.hpp" |
|
35 |
#include "utilities/ostream.hpp" |
|
36 |
||
37 |
||
38 |
/* |
|
39 |
* Virtual memory counter |
|
40 |
*/ |
|
41 |
class VirtualMemory VALUE_OBJ_CLASS_SPEC { |
|
42 |
private: |
|
43 |
size_t _reserved; |
|
44 |
size_t _committed; |
|
45 |
||
46 |
public: |
|
47 |
VirtualMemory() : _reserved(0), _committed(0) { } |
|
48 |
||
49 |
inline void reserve_memory(size_t sz) { _reserved += sz; } |
|
50 |
inline void commit_memory (size_t sz) { |
|
51 |
_committed += sz; |
|
52 |
assert(_committed <= _reserved, "Sanity check"); |
|
53 |
} |
|
54 |
||
55 |
inline void release_memory (size_t sz) { |
|
56 |
assert(_reserved >= sz, "Negative amount"); |
|
57 |
_reserved -= sz; |
|
58 |
} |
|
59 |
||
60 |
inline void uncommit_memory(size_t sz) { |
|
61 |
assert(_committed >= sz, "Negative amount"); |
|
62 |
_committed -= sz; |
|
63 |
} |
|
64 |
||
65 |
inline size_t reserved() const { return _reserved; } |
|
66 |
inline size_t committed() const { return _committed; } |
|
67 |
}; |
|
68 |
||
69 |
// Virtual memory allocation site, keeps track where the virtual memory is reserved. |
|
70 |
class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> { |
|
71 |
public: |
|
72 |
VirtualMemoryAllocationSite(const NativeCallStack& stack) : |
|
73 |
AllocationSite<VirtualMemory>(stack) { } |
|
74 |
||
75 |
inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); } |
|
76 |
inline void commit_memory (size_t sz) { data()->commit_memory(sz); } |
|
77 |
inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); } |
|
78 |
inline void release_memory(size_t sz) { data()->release_memory(sz); } |
|
79 |
inline size_t reserved() const { return peek()->reserved(); } |
|
80 |
inline size_t committed() const { return peek()->committed(); } |
|
81 |
}; |
|
82 |
||
83 |
class VirtualMemorySummary; |
|
84 |
||
85 |
// This class represents a snapshot of virtual memory at a given time. |
|
86 |
// The latest snapshot is saved in a static area. |
|
87 |
class VirtualMemorySnapshot : public ResourceObj { |
|
88 |
friend class VirtualMemorySummary; |
|
89 |
||
90 |
private: |
|
91 |
VirtualMemory _virtual_memory[mt_number_of_types]; |
|
92 |
||
93 |
public: |
|
94 |
inline VirtualMemory* by_type(MEMFLAGS flag) { |
|
95 |
int index = NMTUtil::flag_to_index(flag); |
|
96 |
return &_virtual_memory[index]; |
|
97 |
} |
|
98 |
||
99 |
inline VirtualMemory* by_index(int index) { |
|
100 |
assert(index >= 0, "Index out of bound"); |
|
101 |
assert(index < mt_number_of_types, "Index out of bound"); |
|
102 |
return &_virtual_memory[index]; |
|
103 |
} |
|
104 |
||
105 |
inline size_t total_reserved() const { |
|
106 |
size_t amount = 0; |
|
107 |
for (int index = 0; index < mt_number_of_types; index ++) { |
|
108 |
amount += _virtual_memory[index].reserved(); |
|
109 |
} |
|
110 |
return amount; |
|
111 |
} |
|
112 |
||
113 |
inline size_t total_committed() const { |
|
114 |
size_t amount = 0; |
|
115 |
for (int index = 0; index < mt_number_of_types; index ++) { |
|
116 |
amount += _virtual_memory[index].committed(); |
|
117 |
} |
|
118 |
return amount; |
|
119 |
} |
|
120 |
||
121 |
void copy_to(VirtualMemorySnapshot* s) { |
|
122 |
for (int index = 0; index < mt_number_of_types; index ++) { |
|
123 |
s->_virtual_memory[index] = _virtual_memory[index]; |
|
124 |
} |
|
125 |
} |
|
126 |
}; |
|
127 |
||
128 |
class VirtualMemorySummary : AllStatic { |
|
129 |
public: |
|
130 |
static void initialize(); |
|
131 |
||
132 |
static inline void record_reserved_memory(size_t size, MEMFLAGS flag) { |
|
133 |
as_snapshot()->by_type(flag)->reserve_memory(size); |
|
134 |
} |
|
135 |
||
136 |
static inline void record_committed_memory(size_t size, MEMFLAGS flag) { |
|
137 |
as_snapshot()->by_type(flag)->commit_memory(size); |
|
138 |
} |
|
139 |
||
140 |
static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) { |
|
141 |
as_snapshot()->by_type(flag)->uncommit_memory(size); |
|
142 |
} |
|
143 |
||
144 |
static inline void record_released_memory(size_t size, MEMFLAGS flag) { |
|
145 |
as_snapshot()->by_type(flag)->release_memory(size); |
|
146 |
} |
|
147 |
||
148 |
// Move virtual memory from one memory type to another. |
|
149 |
// Virtual memory can be reserved before it is associated with a memory type, and tagged |
|
150 |
// as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown' |
|
151 |
// type to specified memory type. |
|
152 |
static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { |
|
153 |
as_snapshot()->by_type(from)->release_memory(size); |
|
154 |
as_snapshot()->by_type(to)->reserve_memory(size); |
|
155 |
} |
|
156 |
||
157 |
static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { |
|
158 |
as_snapshot()->by_type(from)->uncommit_memory(size); |
|
159 |
as_snapshot()->by_type(to)->commit_memory(size); |
|
160 |
} |
|
161 |
||
162 |
static inline void snapshot(VirtualMemorySnapshot* s) { |
|
163 |
as_snapshot()->copy_to(s); |
|
164 |
} |
|
165 |
||
166 |
static VirtualMemorySnapshot* as_snapshot() { |
|
167 |
return (VirtualMemorySnapshot*)_snapshot; |
|
168 |
} |
|
169 |
||
170 |
private: |
|
171 |
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; |
|
172 |
}; |
|
173 |
||
174 |
||
175 |
||
176 |
/* |
|
177 |
* A virtual memory region |
|
178 |
*/ |
|
179 |
class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC { |
|
180 |
private: |
|
181 |
address _base_address; |
|
182 |
size_t _size; |
|
183 |
||
184 |
public: |
|
185 |
VirtualMemoryRegion(address addr, size_t size) : |
|
186 |
_base_address(addr), _size(size) { |
|
187 |
assert(addr != NULL, "Invalid address"); |
|
188 |
assert(size > 0, "Invalid size"); |
|
189 |
} |
|
190 |
||
191 |
inline address base() const { return _base_address; } |
|
192 |
inline address end() const { return base() + size(); } |
|
193 |
inline size_t size() const { return _size; } |
|
194 |
||
195 |
inline bool is_empty() const { return size() == 0; } |
|
196 |
||
197 |
inline bool contain_address(address addr) const { |
|
198 |
return (addr >= base() && addr < end()); |
|
199 |
} |
|
200 |
||
201 |
||
202 |
inline bool contain_region(address addr, size_t size) const { |
|
203 |
return contain_address(addr) && contain_address(addr + size - 1); |
|
204 |
} |
|
205 |
||
206 |
inline bool same_region(address addr, size_t sz) const { |
|
207 |
return (addr == base() && sz == size()); |
|
208 |
} |
|
209 |
||
210 |
||
211 |
inline bool overlap_region(address addr, size_t sz) const { |
|
212 |
VirtualMemoryRegion rgn(addr, sz); |
|
213 |
return contain_address(addr) || |
|
214 |
contain_address(addr + sz - 1) || |
|
215 |
rgn.contain_address(base()) || |
|
216 |
rgn.contain_address(end() - 1); |
|
217 |
} |
|
218 |
||
219 |
inline bool adjacent_to(address addr, size_t sz) const { |
|
220 |
return (addr == end() || (addr + sz) == base()); |
|
221 |
} |
|
222 |
||
223 |
void exclude_region(address addr, size_t sz) { |
|
224 |
assert(contain_region(addr, sz), "Not containment"); |
|
225 |
assert(addr == base() || addr + sz == end(), "Can not exclude from middle"); |
|
226 |
size_t new_size = size() - sz; |
|
227 |
||
228 |
if (addr == base()) { |
|
229 |
set_base(addr + sz); |
|
230 |
} |
|
231 |
set_size(new_size); |
|
232 |
} |
|
233 |
||
234 |
void expand_region(address addr, size_t sz) { |
|
235 |
assert(adjacent_to(addr, sz), "Not adjacent regions"); |
|
236 |
if (base() == addr + sz) { |
|
237 |
set_base(addr); |
|
238 |
} |
|
239 |
set_size(size() + sz); |
|
240 |
} |
|
241 |
||
242 |
protected: |
|
243 |
void set_base(address base) { |
|
244 |
assert(base != NULL, "Sanity check"); |
|
245 |
_base_address = base; |
|
246 |
} |
|
247 |
||
248 |
void set_size(size_t size) { |
|
249 |
assert(size > 0, "Sanity check"); |
|
250 |
_size = size; |
|
251 |
} |
|
252 |
}; |
|
253 |
||
254 |
||
255 |
class CommittedMemoryRegion : public VirtualMemoryRegion { |
|
256 |
private: |
|
257 |
NativeCallStack _stack; |
|
258 |
||
259 |
public: |
|
260 |
CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) : |
|
261 |
VirtualMemoryRegion(addr, size), _stack(stack) { } |
|
262 |
||
263 |
inline int compare(const CommittedMemoryRegion& rgn) const { |
|
264 |
if (overlap_region(rgn.base(), rgn.size()) || |
|
265 |
adjacent_to (rgn.base(), rgn.size())) { |
|
266 |
return 0; |
|
267 |
} else { |
|
268 |
if (base() == rgn.base()) { |
|
269 |
return 0; |
|
270 |
} else if (base() > rgn.base()) { |
|
271 |
return 1; |
|
272 |
} else { |
|
273 |
return -1; |
|
274 |
} |
|
275 |
} |
|
276 |
} |
|
277 |
||
278 |
inline bool equals(const CommittedMemoryRegion& rgn) const { |
|
279 |
return compare(rgn) == 0; |
|
280 |
} |
|
281 |
||
282 |
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } |
|
283 |
inline const NativeCallStack* call_stack() const { return &_stack; } |
|
284 |
}; |
|
285 |
||
286 |
||
287 |
typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator; |
|
288 |
||
289 |
int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&); |
|
290 |
class ReservedMemoryRegion : public VirtualMemoryRegion { |
|
291 |
private: |
|
292 |
SortedLinkedList<CommittedMemoryRegion, compare_committed_region> |
|
293 |
_committed_regions; |
|
294 |
||
295 |
NativeCallStack _stack; |
|
296 |
MEMFLAGS _flag; |
|
297 |
||
298 |
bool _all_committed; |
|
299 |
||
300 |
public: |
|
301 |
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack, |
|
302 |
MEMFLAGS flag = mtNone) : |
|
303 |
VirtualMemoryRegion(base, size), _stack(stack), _flag(flag), |
|
304 |
_all_committed(false) { } |
|
305 |
||
306 |
||
307 |
ReservedMemoryRegion(address base, size_t size) : |
|
26144 | 308 |
VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone), |
25946 | 309 |
_all_committed(false) { } |
310 |
||
311 |
// Copy constructor |
|
312 |
ReservedMemoryRegion(const ReservedMemoryRegion& rr) : |
|
313 |
VirtualMemoryRegion(rr.base(), rr.size()) { |
|
314 |
*this = rr; |
|
315 |
} |
|
316 |
||
317 |
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } |
|
318 |
inline const NativeCallStack* call_stack() const { return &_stack; } |
|
319 |
||
320 |
void set_flag(MEMFLAGS flag); |
|
321 |
inline MEMFLAGS flag() const { return _flag; } |
|
322 |
||
323 |
inline int compare(const ReservedMemoryRegion& rgn) const { |
|
324 |
if (overlap_region(rgn.base(), rgn.size())) { |
|
325 |
return 0; |
|
326 |
} else { |
|
327 |
if (base() == rgn.base()) { |
|
328 |
return 0; |
|
329 |
} else if (base() > rgn.base()) { |
|
330 |
return 1; |
|
331 |
} else { |
|
332 |
return -1; |
|
333 |
} |
|
334 |
} |
|
335 |
} |
|
336 |
||
337 |
inline bool equals(const ReservedMemoryRegion& rgn) const { |
|
338 |
return compare(rgn) == 0; |
|
339 |
} |
|
340 |
||
341 |
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack); |
|
342 |
bool remove_uncommitted_region(address addr, size_t size); |
|
343 |
||
344 |
size_t committed_size() const; |
|
345 |
||
346 |
// move committed regions that higher than specified address to |
|
347 |
// the new region |
|
348 |
void move_committed_regions(address addr, ReservedMemoryRegion& rgn); |
|
349 |
||
350 |
inline bool all_committed() const { return _all_committed; } |
|
351 |
void set_all_committed(bool b); |
|
352 |
||
353 |
CommittedRegionIterator iterate_committed_regions() const { |
|
354 |
return CommittedRegionIterator(_committed_regions.head()); |
|
355 |
} |
|
356 |
||
357 |
ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) { |
|
358 |
set_base(other.base()); |
|
359 |
set_size(other.size()); |
|
360 |
||
361 |
_stack = *other.call_stack(); |
|
362 |
_flag = other.flag(); |
|
363 |
_all_committed = other.all_committed(); |
|
364 |
if (other.all_committed()) { |
|
365 |
set_all_committed(true); |
|
366 |
} else { |
|
367 |
CommittedRegionIterator itr = other.iterate_committed_regions(); |
|
368 |
const CommittedMemoryRegion* rgn = itr.next(); |
|
369 |
while (rgn != NULL) { |
|
370 |
_committed_regions.add(*rgn); |
|
371 |
rgn = itr.next(); |
|
372 |
} |
|
373 |
} |
|
374 |
return *this; |
|
375 |
} |
|
376 |
||
377 |
private: |
|
378 |
// The committed region contains the uncommitted region, subtract the uncommitted |
|
379 |
// region from this committed region |
|
380 |
bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node, |
|
381 |
address addr, size_t sz); |
|
382 |
||
383 |
bool add_committed_region(const CommittedMemoryRegion& rgn) { |
|
384 |
assert(rgn.base() != NULL, "Invalid base address"); |
|
385 |
assert(size() > 0, "Invalid size"); |
|
386 |
return _committed_regions.add(rgn) != NULL; |
|
387 |
} |
|
388 |
}; |
|
389 |
||
390 |
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2); |
|
391 |
||
392 |
class VirtualMemoryWalker : public StackObj { |
|
393 |
public: |
|
394 |
virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; } |
|
395 |
}; |
|
396 |
||
397 |
// Main class called from MemTracker to track virtual memory allocations, commits and releases. |
|
398 |
class VirtualMemoryTracker : AllStatic { |
|
399 |
public: |
|
400 |
static bool initialize(NMT_TrackingLevel level); |
|
401 |
||
26136
254c226e4402
8054368: nsk/jdi/VirtualMachine/exit/exit002 crash with detail tracking on (NMT2)
zgu
parents:
25946
diff
changeset
|
402 |
// Late phase initialization |
254c226e4402
8054368: nsk/jdi/VirtualMachine/exit/exit002 crash with detail tracking on (NMT2)
zgu
parents:
25946
diff
changeset
|
403 |
static bool late_initialize(NMT_TrackingLevel level); |
254c226e4402
8054368: nsk/jdi/VirtualMachine/exit/exit002 crash with detail tracking on (NMT2)
zgu
parents:
25946
diff
changeset
|
404 |
|
25946 | 405 |
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, |
406 |
MEMFLAGS flag = mtNone, bool all_committed = false); |
|
407 |
||
408 |
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack); |
|
409 |
static bool remove_uncommitted_region (address base_addr, size_t size); |
|
410 |
static bool remove_released_region (address base_addr, size_t size); |
|
411 |
static void set_reserved_region_type (address addr, MEMFLAGS flag); |
|
412 |
||
413 |
// Walk virtual memory data structure for creating baseline, etc. |
|
414 |
static bool walk_virtual_memory(VirtualMemoryWalker* walker); |
|
415 |
||
416 |
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to); |
|
417 |
||
418 |
private: |
|
26136
254c226e4402
8054368: nsk/jdi/VirtualMachine/exit/exit002 crash with detail tracking on (NMT2)
zgu
parents:
25946
diff
changeset
|
419 |
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions; |
25946 | 420 |
}; |
421 |
||
422 |
||
423 |
#endif // INCLUDE_NMT |
|
424 |
||
425 |
#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP |