|
1 /* |
|
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "runtime/mutexLocker.hpp" |
|
27 #include "utilities/decoder.hpp" |
|
28 #include "services/memBaseline.hpp" |
|
29 #include "services/memPtr.hpp" |
|
30 #include "services/memPtrArray.hpp" |
|
31 #include "services/memSnapshot.hpp" |
|
32 #include "services/memTracker.hpp" |
|
33 |
|
34 |
|
35 // stagging data groups the data of a VM memory range, so we can consolidate |
|
36 // them into one record during the walk |
|
37 bool StagingWalker::consolidate_vm_records(VMMemRegionEx* vm_rec) { |
|
38 MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); |
|
39 assert(cur != NULL && cur->is_vm_pointer(), "not a virtual memory pointer"); |
|
40 |
|
41 jint cur_seq; |
|
42 jint next_seq; |
|
43 |
|
44 bool trackCallsite = MemTracker::track_callsite(); |
|
45 |
|
46 if (trackCallsite) { |
|
47 vm_rec->init((MemPointerRecordEx*)cur); |
|
48 cur_seq = ((SeqMemPointerRecordEx*)cur)->seq(); |
|
49 } else { |
|
50 vm_rec->init((MemPointerRecord*)cur); |
|
51 cur_seq = ((SeqMemPointerRecord*)cur)->seq(); |
|
52 } |
|
53 |
|
54 // only can consolidate when we have allocation record, |
|
55 // which contains virtual memory range |
|
56 if (!cur->is_allocation_record()) { |
|
57 _itr.next(); |
|
58 return true; |
|
59 } |
|
60 |
|
61 // allocation range |
|
62 address base = cur->addr(); |
|
63 address end = base + cur->size(); |
|
64 |
|
65 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); |
|
66 // if the memory range is alive |
|
67 bool live_vm_rec = true; |
|
68 while (next != NULL && next->is_vm_pointer()) { |
|
69 if (next->is_allocation_record()) { |
|
70 assert(next->addr() >= base, "sorting order or overlapping"); |
|
71 break; |
|
72 } |
|
73 |
|
74 if (trackCallsite) { |
|
75 next_seq = ((SeqMemPointerRecordEx*)next)->seq(); |
|
76 } else { |
|
77 next_seq = ((SeqMemPointerRecord*)next)->seq(); |
|
78 } |
|
79 |
|
80 if (next_seq < cur_seq) { |
|
81 _itr.next(); |
|
82 next = (MemPointerRecord*)_itr.peek_next(); |
|
83 continue; |
|
84 } |
|
85 |
|
86 if (next->is_deallocation_record()) { |
|
87 if (next->addr() == base && next->size() == cur->size()) { |
|
88 // the virtual memory range has been released |
|
89 _itr.next(); |
|
90 live_vm_rec = false; |
|
91 break; |
|
92 } else if (next->addr() < end) { // partial release |
|
93 vm_rec->partial_release(next->addr(), next->size()); |
|
94 _itr.next(); |
|
95 } else { |
|
96 break; |
|
97 } |
|
98 } else if (next->is_commit_record()) { |
|
99 if (next->addr() >= base && next->addr() + next->size() <= end) { |
|
100 vm_rec->commit(next->size()); |
|
101 _itr.next(); |
|
102 } else { |
|
103 assert(next->addr() >= base, "sorting order or overlapping"); |
|
104 break; |
|
105 } |
|
106 } else if (next->is_uncommit_record()) { |
|
107 if (next->addr() >= base && next->addr() + next->size() <= end) { |
|
108 vm_rec->uncommit(next->size()); |
|
109 _itr.next(); |
|
110 } else { |
|
111 assert(next->addr() >= end, "sorting order or overlapping"); |
|
112 break; |
|
113 } |
|
114 } else if (next->is_type_tagging_record()) { |
|
115 if (next->addr() >= base && next->addr() < end ) { |
|
116 vm_rec->tag(next->flags()); |
|
117 _itr.next(); |
|
118 } else { |
|
119 break; |
|
120 } |
|
121 } else { |
|
122 assert(false, "unknown record type"); |
|
123 } |
|
124 next = (MemPointerRecord*)_itr.peek_next(); |
|
125 } |
|
126 _itr.next(); |
|
127 return live_vm_rec; |
|
128 } |
|
129 |
|
130 MemPointer* StagingWalker::next() { |
|
131 MemPointerRecord* cur_p = (MemPointerRecord*)_itr.current(); |
|
132 if (cur_p == NULL) { |
|
133 _end_of_array = true; |
|
134 return NULL; |
|
135 } |
|
136 |
|
137 MemPointerRecord* next_p; |
|
138 if (cur_p->is_vm_pointer()) { |
|
139 _is_vm_record = true; |
|
140 if (!consolidate_vm_records(&_vm_record)) { |
|
141 return next(); |
|
142 } |
|
143 } else { // malloc-ed pointer |
|
144 _is_vm_record = false; |
|
145 next_p = (MemPointerRecord*)_itr.peek_next(); |
|
146 if (next_p != NULL && next_p->addr() == cur_p->addr()) { |
|
147 assert(cur_p->is_allocation_record(), "sorting order"); |
|
148 assert(!next_p->is_allocation_record(), "sorting order"); |
|
149 _itr.next(); |
|
150 if (cur_p->seq() < next_p->seq()) { |
|
151 cur_p = next_p; |
|
152 } |
|
153 } |
|
154 if (MemTracker::track_callsite()) { |
|
155 _malloc_record.init((MemPointerRecordEx*)cur_p); |
|
156 } else { |
|
157 _malloc_record.init((MemPointerRecord*)cur_p); |
|
158 } |
|
159 |
|
160 _itr.next(); |
|
161 } |
|
162 return current(); |
|
163 } |
|
164 |
|
165 MemSnapshot::MemSnapshot() { |
|
166 if (MemTracker::track_callsite()) { |
|
167 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>(); |
|
168 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true); |
|
169 _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>(); |
|
170 } else { |
|
171 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>(); |
|
172 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true); |
|
173 _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>(); |
|
174 } |
|
175 |
|
176 _lock = new (std::nothrow) Mutex(Monitor::native, "memSnapshotLock"); |
|
177 NOT_PRODUCT(_untracked_count = 0;) |
|
178 } |
|
179 |
|
180 MemSnapshot::~MemSnapshot() { |
|
181 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on"); |
|
182 { |
|
183 MutexLockerEx locker(_lock); |
|
184 if (_staging_area != NULL) { |
|
185 delete _staging_area; |
|
186 _staging_area = NULL; |
|
187 } |
|
188 |
|
189 if (_alloc_ptrs != NULL) { |
|
190 delete _alloc_ptrs; |
|
191 _alloc_ptrs = NULL; |
|
192 } |
|
193 |
|
194 if (_vm_ptrs != NULL) { |
|
195 delete _vm_ptrs; |
|
196 _vm_ptrs = NULL; |
|
197 } |
|
198 } |
|
199 |
|
200 if (_lock != NULL) { |
|
201 delete _lock; |
|
202 _lock = NULL; |
|
203 } |
|
204 } |
|
205 |
|
206 void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) { |
|
207 assert(dest != NULL && src != NULL, "Just check"); |
|
208 assert(dest->addr() == src->addr(), "Just check"); |
|
209 |
|
210 MEMFLAGS flags = dest->flags(); |
|
211 |
|
212 if (MemTracker::track_callsite()) { |
|
213 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src; |
|
214 } else { |
|
215 *dest = *src; |
|
216 } |
|
217 } |
|
218 |
|
219 |
|
220 // merge a per-thread memory recorder to the staging area |
|
221 bool MemSnapshot::merge(MemRecorder* rec) { |
|
222 assert(rec != NULL && !rec->out_of_memory(), "Just check"); |
|
223 |
|
224 // out of memory |
|
225 if (_staging_area == NULL || _staging_area->out_of_memory()) { |
|
226 return false; |
|
227 } |
|
228 |
|
229 SequencedRecordIterator itr(rec->pointer_itr()); |
|
230 |
|
231 MutexLockerEx lock(_lock, true); |
|
232 MemPointerIterator staging_itr(_staging_area); |
|
233 MemPointerRecord *p1, *p2; |
|
234 p1 = (MemPointerRecord*) itr.current(); |
|
235 while (p1 != NULL) { |
|
236 p2 = (MemPointerRecord*)staging_itr.locate(p1->addr()); |
|
237 // we have not seen this memory block, so just add to staging area |
|
238 if (p2 == NULL) { |
|
239 if (!staging_itr.insert(p1)) { |
|
240 return false; |
|
241 } |
|
242 } else if (p1->addr() == p2->addr()) { |
|
243 MemPointerRecord* staging_next = (MemPointerRecord*)staging_itr.peek_next(); |
|
244 // a memory block can have many tagging records, find right one to replace or |
|
245 // right position to insert |
|
246 while (staging_next != NULL && staging_next->addr() == p1->addr()) { |
|
247 if ((staging_next->flags() & MemPointerRecord::tag_masks) <= |
|
248 (p1->flags() & MemPointerRecord::tag_masks)) { |
|
249 p2 = (MemPointerRecord*)staging_itr.next(); |
|
250 staging_next = (MemPointerRecord*)staging_itr.peek_next(); |
|
251 } else { |
|
252 break; |
|
253 } |
|
254 } |
|
255 int df = (p1->flags() & MemPointerRecord::tag_masks) - |
|
256 (p2->flags() & MemPointerRecord::tag_masks); |
|
257 if (df == 0) { |
|
258 assert(p1->seq() > 0, "not sequenced"); |
|
259 assert(p2->seq() > 0, "not sequenced"); |
|
260 if (p1->seq() > p2->seq()) { |
|
261 copy_pointer(p2, p1); |
|
262 } |
|
263 } else if (df < 0) { |
|
264 if (!staging_itr.insert(p1)) { |
|
265 return false; |
|
266 } |
|
267 } else { |
|
268 if (!staging_itr.insert_after(p1)) { |
|
269 return false; |
|
270 } |
|
271 } |
|
272 } else if (p1->addr() < p2->addr()) { |
|
273 if (!staging_itr.insert(p1)) { |
|
274 return false; |
|
275 } |
|
276 } else { |
|
277 if (!staging_itr.insert_after(p1)) { |
|
278 return false; |
|
279 } |
|
280 } |
|
281 p1 = (MemPointerRecord*)itr.next(); |
|
282 } |
|
283 NOT_PRODUCT(void check_staging_data();) |
|
284 return true; |
|
285 } |
|
286 |
|
287 |
|
288 |
|
289 // promote data to next generation |
|
290 void MemSnapshot::promote() { |
|
291 assert(_alloc_ptrs != NULL && _staging_area != NULL && _vm_ptrs != NULL, |
|
292 "Just check"); |
|
293 MutexLockerEx lock(_lock, true); |
|
294 StagingWalker walker(_staging_area); |
|
295 MemPointerIterator malloc_itr(_alloc_ptrs); |
|
296 VMMemPointerIterator vm_itr(_vm_ptrs); |
|
297 MemPointer* cur = walker.current(); |
|
298 while (cur != NULL) { |
|
299 if (walker.is_vm_record()) { |
|
300 VMMemRegion* cur_vm = (VMMemRegion*)cur; |
|
301 VMMemRegion* p = (VMMemRegion*)vm_itr.locate(cur_vm->addr()); |
|
302 cur_vm = (VMMemRegion*)cur; |
|
303 if (p != NULL && (p->contains(cur_vm) || p->base() == cur_vm->base())) { |
|
304 assert(p->is_reserve_record() || |
|
305 p->is_commit_record(), "wrong vm record type"); |
|
306 // resize existing reserved range |
|
307 if (cur_vm->is_reserve_record() && p->base() == cur_vm->base()) { |
|
308 assert(cur_vm->size() >= p->committed_size(), "incorrect resizing"); |
|
309 p->set_reserved_size(cur_vm->size()); |
|
310 } else if (cur_vm->is_commit_record()) { |
|
311 p->commit(cur_vm->committed_size()); |
|
312 } else if (cur_vm->is_uncommit_record()) { |
|
313 p->uncommit(cur_vm->committed_size()); |
|
314 if (!p->is_reserve_record() && p->committed_size() == 0) { |
|
315 vm_itr.remove(); |
|
316 } |
|
317 } else if (cur_vm->is_type_tagging_record()) { |
|
318 p->tag(cur_vm->flags()); |
|
319 } else if (cur_vm->is_release_record()) { |
|
320 if (cur_vm->base() == p->base() && cur_vm->size() == p->size()) { |
|
321 // release the whole range |
|
322 vm_itr.remove(); |
|
323 } else { |
|
324 // partial release |
|
325 p->partial_release(cur_vm->base(), cur_vm->size()); |
|
326 } |
|
327 } else { |
|
328 // we do see multiple reserver on the same vm range |
|
329 assert((cur_vm->is_commit_record() || cur_vm->is_reserve_record()) && |
|
330 cur_vm->base() == p->base() && cur_vm->size() == p->size(), "bad record"); |
|
331 p->tag(cur_vm->flags()); |
|
332 } |
|
333 } else { |
|
334 if(cur_vm->is_reserve_record()) { |
|
335 if (p == NULL || p->base() > cur_vm->base()) { |
|
336 vm_itr.insert(cur_vm); |
|
337 } else { |
|
338 vm_itr.insert_after(cur_vm); |
|
339 } |
|
340 } else { |
|
341 #ifdef ASSERT |
|
342 // In theory, we should assert without conditions. However, in case of native |
|
343 // thread stack, NMT explicitly releases the thread stack in Thread's destructor, |
|
344 // due to platform dependent behaviors. On some platforms, we see uncommit/release |
|
345 // native thread stack, but some, we don't. |
|
346 if (!cur_vm->is_uncommit_record() && !cur_vm->is_deallocation_record()) { |
|
347 ShouldNotReachHere(); |
|
348 } |
|
349 #endif |
|
350 } |
|
351 } |
|
352 } else { |
|
353 MemPointerRecord* cur_p = (MemPointerRecord*)cur; |
|
354 MemPointerRecord* p = (MemPointerRecord*)malloc_itr.locate(cur->addr()); |
|
355 if (p != NULL && cur_p->addr() == p->addr()) { |
|
356 assert(p->is_allocation_record() || p->is_arena_size_record(), "untracked"); |
|
357 if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) { |
|
358 copy_pointer(p, cur_p); |
|
359 } else { // deallocation record |
|
360 assert(cur_p->is_deallocation_record(), "wrong record type"); |
|
361 |
|
362 // we are removing an arena record, we also need to remove its 'size' |
|
363 // record behind it |
|
364 if (p->is_arena_record()) { |
|
365 MemPointerRecord* next_p = (MemPointerRecord*)malloc_itr.peek_next(); |
|
366 if (next_p->is_arena_size_record()) { |
|
367 assert(next_p->is_size_record_of_arena(p), "arena records dont match"); |
|
368 malloc_itr.remove(); |
|
369 } |
|
370 } |
|
371 malloc_itr.remove(); |
|
372 } |
|
373 } else { |
|
374 if (cur_p->is_arena_size_record()) { |
|
375 MemPointerRecord* prev_p = (MemPointerRecord*)malloc_itr.peek_prev(); |
|
376 if (prev_p != NULL && |
|
377 (!prev_p->is_arena_record() || !cur_p->is_size_record_of_arena(prev_p))) { |
|
378 // arena already deallocated |
|
379 cur_p = NULL; |
|
380 } |
|
381 } |
|
382 if (cur_p != NULL) { |
|
383 if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) { |
|
384 if (p != NULL && cur_p->addr() > p->addr()) { |
|
385 malloc_itr.insert_after(cur); |
|
386 } else { |
|
387 malloc_itr.insert(cur); |
|
388 } |
|
389 } |
|
390 #ifndef PRODUCT |
|
391 else if (!has_allocation_record(cur_p->addr())){ |
|
392 // NMT can not track some startup memory, which allocated before NMT |
|
393 // is enabled |
|
394 _untracked_count ++; |
|
395 } |
|
396 #endif |
|
397 } |
|
398 } |
|
399 } |
|
400 |
|
401 cur = walker.next(); |
|
402 } |
|
403 NOT_PRODUCT(check_malloc_pointers();) |
|
404 _staging_area->shrink(); |
|
405 _staging_area->clear(); |
|
406 } |
|
407 |
|
408 |
|
409 #ifdef ASSERT |
|
410 void MemSnapshot::print_snapshot_stats(outputStream* st) { |
|
411 st->print_cr("Snapshot:"); |
|
412 st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(), |
|
413 (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K); |
|
414 |
|
415 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(), |
|
416 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K); |
|
417 |
|
418 st->print_cr("\tStaging: %d/%d [%5.2f%%] %dKB", _staging_area->length(), _staging_area->capacity(), |
|
419 (100.0 * (float)_staging_area->length()) / (float)_staging_area->capacity(), _staging_area->instance_size()/K); |
|
420 |
|
421 st->print_cr("\tUntracked allocation: %d", _untracked_count); |
|
422 } |
|
423 |
|
424 void MemSnapshot::check_malloc_pointers() { |
|
425 MemPointerArrayIteratorImpl mItr(_alloc_ptrs); |
|
426 MemPointerRecord* p = (MemPointerRecord*)mItr.current(); |
|
427 MemPointerRecord* prev = NULL; |
|
428 while (p != NULL) { |
|
429 if (prev != NULL) { |
|
430 assert(p->addr() >= prev->addr(), "sorting order"); |
|
431 } |
|
432 prev = p; |
|
433 p = (MemPointerRecord*)mItr.next(); |
|
434 } |
|
435 } |
|
436 |
|
437 void MemSnapshot::check_staging_data() { |
|
438 MemPointerArrayIteratorImpl itr(_staging_area); |
|
439 MemPointerRecord* cur = (MemPointerRecord*)itr.current(); |
|
440 MemPointerRecord* next = (MemPointerRecord*)itr.next(); |
|
441 while (next != NULL) { |
|
442 assert((next->addr() > cur->addr()) || |
|
443 ((next->flags() & MemPointerRecord::tag_masks) > |
|
444 (cur->flags() & MemPointerRecord::tag_masks)), |
|
445 "sorting order"); |
|
446 cur = next; |
|
447 next = (MemPointerRecord*)itr.next(); |
|
448 } |
|
449 } |
|
450 |
|
451 bool MemSnapshot::has_allocation_record(address addr) { |
|
452 MemPointerArrayIteratorImpl itr(_staging_area); |
|
453 MemPointerRecord* cur = (MemPointerRecord*)itr.current(); |
|
454 while (cur != NULL) { |
|
455 if (cur->addr() == addr && cur->is_allocation_record()) { |
|
456 return true; |
|
457 } |
|
458 cur = (MemPointerRecord*)itr.next(); |
|
459 } |
|
460 return false; |
|
461 } |
|
462 |
|
463 #endif |