|
1 /* |
|
2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 */ |
|
23 |
|
24 #include "precompiled.hpp" |
|
25 #include "gc/shared/oopStorage.hpp" |
|
26 #include "gc/z/zAddress.hpp" |
|
27 #include "gc/z/zGlobals.hpp" |
|
28 #include "gc/z/zHeap.inline.hpp" |
|
29 #include "gc/z/zHeapIterator.hpp" |
|
30 #include "gc/z/zList.inline.hpp" |
|
31 #include "gc/z/zLock.inline.hpp" |
|
32 #include "gc/z/zMark.inline.hpp" |
|
33 #include "gc/z/zOopClosures.inline.hpp" |
|
34 #include "gc/z/zPage.inline.hpp" |
|
35 #include "gc/z/zPageTable.inline.hpp" |
|
36 #include "gc/z/zRelocationSet.inline.hpp" |
|
37 #include "gc/z/zResurrection.hpp" |
|
38 #include "gc/z/zRootsIterator.hpp" |
|
39 #include "gc/z/zStat.hpp" |
|
40 #include "gc/z/zTask.hpp" |
|
41 #include "gc/z/zThread.hpp" |
|
42 #include "gc/z/zTracer.inline.hpp" |
|
43 #include "gc/z/zVirtualMemory.inline.hpp" |
|
44 #include "gc/z/zWorkers.inline.hpp" |
|
45 #include "logging/log.hpp" |
|
46 #include "memory/resourceArea.hpp" |
|
47 #include "oops/oop.inline.hpp" |
|
48 #include "runtime/safepoint.hpp" |
|
49 #include "runtime/thread.hpp" |
|
50 #include "utilities/align.hpp" |
|
51 #include "utilities/debug.hpp" |
|
52 |
|
53 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes); |
|
54 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes); |
|
55 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes); |
|
56 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes); |
|
57 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond); |
|
58 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond); |
|
59 |
|
60 ZHeap* ZHeap::_heap = NULL; |
|
61 |
|
62 ZHeap::ZHeap() : |
|
63 _workers(), |
|
64 _object_allocator(_workers.nworkers()), |
|
65 _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()), |
|
66 _pagetable(), |
|
67 _mark(&_workers, &_pagetable), |
|
68 _reference_processor(&_workers), |
|
69 _weak_roots_processor(&_workers), |
|
70 _relocate(&_workers), |
|
71 _relocation_set(), |
|
72 _serviceability(heap_min_size(), heap_max_size()) { |
|
73 // Install global heap instance |
|
74 assert(_heap == NULL, "Already initialized"); |
|
75 _heap = this; |
|
76 |
|
77 // Update statistics |
|
78 ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size()); |
|
79 } |
|
80 |
|
81 size_t ZHeap::heap_min_size() const { |
|
82 const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin); |
|
83 return MIN2(aligned_min_size, heap_max_size()); |
|
84 } |
|
85 |
|
86 size_t ZHeap::heap_max_size() const { |
|
87 const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin); |
|
88 return MIN2(aligned_max_size, ZAddressOffsetMax); |
|
89 } |
|
90 |
|
91 size_t ZHeap::heap_max_reserve_size() const { |
|
92 // Reserve one small page per worker plus one shared medium page. This is still just |
|
93 // an estimate and doesn't guarantee that we can't run out of memory during relocation. |
|
94 const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium; |
|
95 return MIN2(max_reserve_size, heap_max_size()); |
|
96 } |
|
97 |
|
98 bool ZHeap::is_initialized() const { |
|
99 return _page_allocator.is_initialized(); |
|
100 } |
|
101 |
|
102 size_t ZHeap::min_capacity() const { |
|
103 return heap_min_size(); |
|
104 } |
|
105 |
|
106 size_t ZHeap::max_capacity() const { |
|
107 return _page_allocator.max_capacity(); |
|
108 } |
|
109 |
|
110 size_t ZHeap::capacity() const { |
|
111 return _page_allocator.capacity(); |
|
112 } |
|
113 |
|
114 size_t ZHeap::max_reserve() const { |
|
115 return _page_allocator.max_reserve(); |
|
116 } |
|
117 |
|
118 size_t ZHeap::used_high() const { |
|
119 return _page_allocator.used_high(); |
|
120 } |
|
121 |
|
122 size_t ZHeap::used_low() const { |
|
123 return _page_allocator.used_low(); |
|
124 } |
|
125 |
|
126 size_t ZHeap::used() const { |
|
127 return _page_allocator.used(); |
|
128 } |
|
129 |
|
130 size_t ZHeap::allocated() const { |
|
131 return _page_allocator.allocated(); |
|
132 } |
|
133 |
|
134 size_t ZHeap::reclaimed() const { |
|
135 return _page_allocator.reclaimed(); |
|
136 } |
|
137 |
|
138 size_t ZHeap::tlab_capacity() const { |
|
139 return capacity(); |
|
140 } |
|
141 |
|
142 size_t ZHeap::tlab_used() const { |
|
143 return _object_allocator.used(); |
|
144 } |
|
145 |
|
146 size_t ZHeap::max_tlab_size() const { |
|
147 return ZObjectSizeLimitSmall; |
|
148 } |
|
149 |
|
150 size_t ZHeap::unsafe_max_tlab_alloc() const { |
|
151 size_t size = _object_allocator.remaining(); |
|
152 |
|
153 if (size < MinTLABSize) { |
|
154 // The remaining space in the allocator is not enough to |
|
155 // fit the smallest possible TLAB. This means that the next |
|
156 // TLAB allocation will force the allocator to get a new |
|
157 // backing page anyway, which in turn means that we can then |
|
158 // fit the larges possible TLAB. |
|
159 size = max_tlab_size(); |
|
160 } |
|
161 |
|
162 return MIN2(size, max_tlab_size()); |
|
163 } |
|
164 |
|
165 bool ZHeap::is_in(uintptr_t addr) const { |
|
166 if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) { |
|
167 return false; |
|
168 } |
|
169 |
|
170 const ZPage* const page = _pagetable.get(addr); |
|
171 if (page != NULL) { |
|
172 return page->is_in(addr); |
|
173 } |
|
174 |
|
175 return false; |
|
176 } |
|
177 |
|
178 uintptr_t ZHeap::block_start(uintptr_t addr) const { |
|
179 const ZPage* const page = _pagetable.get(addr); |
|
180 return page->block_start(addr); |
|
181 } |
|
182 |
|
183 size_t ZHeap::block_size(uintptr_t addr) const { |
|
184 const ZPage* const page = _pagetable.get(addr); |
|
185 return page->block_size(addr); |
|
186 } |
|
187 |
|
188 bool ZHeap::block_is_obj(uintptr_t addr) const { |
|
189 const ZPage* const page = _pagetable.get(addr); |
|
190 return page->block_is_obj(addr); |
|
191 } |
|
192 |
|
193 uint ZHeap::nconcurrent_worker_threads() const { |
|
194 return _workers.nconcurrent(); |
|
195 } |
|
196 |
|
197 uint ZHeap::nconcurrent_no_boost_worker_threads() const { |
|
198 return _workers.nconcurrent_no_boost(); |
|
199 } |
|
200 |
|
201 void ZHeap::set_boost_worker_threads(bool boost) { |
|
202 _workers.set_boost(boost); |
|
203 } |
|
204 |
|
205 void ZHeap::worker_threads_do(ThreadClosure* tc) const { |
|
206 _workers.threads_do(tc); |
|
207 } |
|
208 |
|
209 void ZHeap::print_worker_threads_on(outputStream* st) const { |
|
210 _workers.print_threads_on(st); |
|
211 } |
|
212 |
|
213 void ZHeap::out_of_memory() { |
|
214 ResourceMark rm; |
|
215 |
|
216 ZStatInc(ZCounterOutOfMemory); |
|
217 log_info(gc)("Out Of Memory (%s)", Thread::current()->name()); |
|
218 } |
|
219 |
|
220 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { |
|
221 ZPage* const page = _page_allocator.alloc_page(type, size, flags); |
|
222 if (page != NULL) { |
|
223 // Update pagetable |
|
224 _pagetable.insert(page); |
|
225 } |
|
226 |
|
227 return page; |
|
228 } |
|
229 |
|
230 void ZHeap::undo_alloc_page(ZPage* page) { |
|
231 assert(page->is_allocating(), "Invalid page state"); |
|
232 |
|
233 ZStatInc(ZCounterUndoPageAllocation); |
|
234 log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT, |
|
235 ZThread::id(), ZThread::name(), p2i(page), page->size()); |
|
236 |
|
237 release_page(page, false /* reclaimed */); |
|
238 } |
|
239 |
|
240 bool ZHeap::retain_page(ZPage* page) { |
|
241 return page->inc_refcount(); |
|
242 } |
|
243 |
|
244 void ZHeap::release_page(ZPage* page, bool reclaimed) { |
|
245 if (page->dec_refcount()) { |
|
246 _page_allocator.free_page(page, reclaimed); |
|
247 } |
|
248 } |
|
249 |
|
250 void ZHeap::flip_views() { |
|
251 // For debugging only |
|
252 if (ZUnmapBadViews) { |
|
253 // Flip pages |
|
254 ZPageTableIterator iter(&_pagetable); |
|
255 for (ZPage* page; iter.next(&page);) { |
|
256 if (!page->is_detached()) { |
|
257 _page_allocator.flip_page(page); |
|
258 } |
|
259 } |
|
260 |
|
261 // Flip pre-mapped memory |
|
262 _page_allocator.flip_pre_mapped(); |
|
263 } |
|
264 } |
|
265 |
|
266 void ZHeap::mark_start() { |
|
267 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); |
|
268 |
|
269 // Update statistics |
|
270 ZStatSample(ZSamplerHeapUsedBeforeMark, used()); |
|
271 |
|
272 // Retire TLABs |
|
273 _object_allocator.retire_tlabs(); |
|
274 |
|
275 // Flip address view |
|
276 ZAddressMasks::flip_to_marked(); |
|
277 flip_views(); |
|
278 |
|
279 // Reset allocated/reclaimed/used statistics |
|
280 _page_allocator.reset_statistics(); |
|
281 |
|
282 // Reset encountered/dropped/enqueued statistics |
|
283 _reference_processor.reset_statistics(); |
|
284 |
|
285 // Enter mark phase |
|
286 ZGlobalPhase = ZPhaseMark; |
|
287 |
|
288 // Reset marking information and mark roots |
|
289 _mark.start(); |
|
290 |
|
291 // Update statistics |
|
292 ZStatHeap::set_at_mark_start(capacity(), used()); |
|
293 } |
|
294 |
|
295 void ZHeap::mark() { |
|
296 _mark.mark(); |
|
297 } |
|
298 |
|
299 void ZHeap::mark_flush_and_free(Thread* thread) { |
|
300 _mark.flush_and_free(thread); |
|
301 } |
|
302 |
|
303 class ZFixupPartialLoadsTask : public ZTask { |
|
304 private: |
|
305 ZThreadRootsIterator _thread_roots; |
|
306 |
|
307 public: |
|
308 ZFixupPartialLoadsTask() : |
|
309 ZTask("ZFixupPartialLoadsTask"), |
|
310 _thread_roots() {} |
|
311 |
|
312 virtual void work() { |
|
313 ZMarkRootOopClosure cl; |
|
314 _thread_roots.oops_do(&cl); |
|
315 } |
|
316 }; |
|
317 |
|
318 void ZHeap::fixup_partial_loads() { |
|
319 ZFixupPartialLoadsTask task; |
|
320 _workers.run_parallel(&task); |
|
321 } |
|
322 |
|
323 bool ZHeap::mark_end() { |
|
324 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); |
|
325 |
|
326 // C2 can generate code where a safepoint poll is inserted |
|
327 // between a load and the associated load barrier. To handle |
|
328 // this case we need to rescan the thread stack here to make |
|
329 // sure such oops are marked. |
|
330 fixup_partial_loads(); |
|
331 |
|
332 // Try end marking |
|
333 if (!_mark.end()) { |
|
334 // Marking not completed, continue concurrent mark |
|
335 return false; |
|
336 } |
|
337 |
|
338 // Enter mark completed phase |
|
339 ZGlobalPhase = ZPhaseMarkCompleted; |
|
340 |
|
341 // Resize metaspace |
|
342 MetaspaceGC::compute_new_size(); |
|
343 |
|
344 // Update statistics |
|
345 ZStatSample(ZSamplerHeapUsedAfterMark, used()); |
|
346 ZStatHeap::set_at_mark_end(capacity(), allocated(), used()); |
|
347 |
|
348 // Block resurrection of weak/phantom references |
|
349 ZResurrection::block(); |
|
350 |
|
351 // Process weak roots |
|
352 _weak_roots_processor.process_weak_roots(); |
|
353 |
|
354 // Verification |
|
355 if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) { |
|
356 Universe::verify(); |
|
357 } |
|
358 |
|
359 return true; |
|
360 } |
|
361 |
|
362 void ZHeap::set_soft_reference_policy(bool clear) { |
|
363 _reference_processor.set_soft_reference_policy(clear); |
|
364 } |
|
365 |
|
366 void ZHeap::process_non_strong_references() { |
|
367 // Process Soft/Weak/Final/PhantomReferences |
|
368 _reference_processor.process_references(); |
|
369 |
|
370 // Process concurrent weak roots |
|
371 _weak_roots_processor.process_concurrent_weak_roots(); |
|
372 |
|
373 // Unblock resurrection of weak/phantom references |
|
374 ZResurrection::unblock(); |
|
375 |
|
376 // Enqueue Soft/Weak/Final/PhantomReferences. Note that this |
|
377 // must be done after unblocking resurrection. Otherwise the |
|
378 // Finalizer thread could call Reference.get() on the Finalizers |
|
379 // that were just enqueued, which would incorrectly return null |
|
380 // during the resurrection block window, since such referents |
|
381 // are only Finalizable marked. |
|
382 _reference_processor.enqueue_references(); |
|
383 } |
|
384 |
|
385 void ZHeap::destroy_detached_pages() { |
|
386 ZList<ZPage> list; |
|
387 |
|
388 _page_allocator.flush_detached_pages(&list); |
|
389 |
|
390 for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) { |
|
391 // Remove pagetable entry |
|
392 _pagetable.remove(page); |
|
393 |
|
394 // Delete the page |
|
395 _page_allocator.destroy_page(page); |
|
396 } |
|
397 } |
|
398 |
|
399 void ZHeap::select_relocation_set() { |
|
400 // Register relocatable pages with selector |
|
401 ZRelocationSetSelector selector; |
|
402 ZPageTableIterator iter(&_pagetable); |
|
403 for (ZPage* page; iter.next(&page);) { |
|
404 if (!page->is_relocatable()) { |
|
405 // Not relocatable, don't register |
|
406 continue; |
|
407 } |
|
408 |
|
409 if (page->is_marked()) { |
|
410 // Register live page |
|
411 selector.register_live_page(page); |
|
412 } else { |
|
413 // Register garbage page |
|
414 selector.register_garbage_page(page); |
|
415 |
|
416 // Reclaim page immediately |
|
417 release_page(page, true /* reclaimed */); |
|
418 } |
|
419 } |
|
420 |
|
421 // Select pages to relocate |
|
422 selector.select(&_relocation_set); |
|
423 |
|
424 // Update statistics |
|
425 ZStatRelocation::set_at_select_relocation_set(selector.relocating()); |
|
426 ZStatHeap::set_at_select_relocation_set(selector.live(), |
|
427 selector.garbage(), |
|
428 reclaimed()); |
|
429 } |
|
430 |
|
431 void ZHeap::prepare_relocation_set() { |
|
432 ZRelocationSetIterator iter(&_relocation_set); |
|
433 for (ZPage* page; iter.next(&page);) { |
|
434 // Prepare for relocation |
|
435 page->set_forwarding(); |
|
436 |
|
437 // Update pagetable |
|
438 _pagetable.set_relocating(page); |
|
439 } |
|
440 } |
|
441 |
|
442 void ZHeap::reset_relocation_set() { |
|
443 ZRelocationSetIterator iter(&_relocation_set); |
|
444 for (ZPage* page; iter.next(&page);) { |
|
445 // Reset relocation information |
|
446 page->reset_forwarding(); |
|
447 |
|
448 // Update pagetable |
|
449 _pagetable.clear_relocating(page); |
|
450 } |
|
451 } |
|
452 |
|
453 void ZHeap::relocate_start() { |
|
454 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); |
|
455 |
|
456 // Update statistics |
|
457 ZStatSample(ZSamplerHeapUsedBeforeRelocation, used()); |
|
458 |
|
459 // Flip address view |
|
460 ZAddressMasks::flip_to_remapped(); |
|
461 flip_views(); |
|
462 |
|
463 // Remap TLABs |
|
464 _object_allocator.remap_tlabs(); |
|
465 |
|
466 // Enter relocate phase |
|
467 ZGlobalPhase = ZPhaseRelocate; |
|
468 |
|
469 // Update statistics |
|
470 ZStatHeap::set_at_relocate_start(capacity(), allocated(), used()); |
|
471 |
|
472 // Remap/Relocate roots |
|
473 _relocate.start(); |
|
474 } |
|
475 |
|
476 uintptr_t ZHeap::relocate_object(uintptr_t addr) { |
|
477 assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed"); |
|
478 ZPage* const page = _pagetable.get(addr); |
|
479 const bool retained = retain_page(page); |
|
480 const uintptr_t new_addr = page->relocate_object(addr); |
|
481 if (retained) { |
|
482 release_page(page, true /* reclaimed */); |
|
483 } |
|
484 |
|
485 return new_addr; |
|
486 } |
|
487 |
|
488 uintptr_t ZHeap::forward_object(uintptr_t addr) { |
|
489 assert(ZGlobalPhase == ZPhaseMark || |
|
490 ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed"); |
|
491 ZPage* const page = _pagetable.get(addr); |
|
492 return page->forward_object(addr); |
|
493 } |
|
494 |
|
495 void ZHeap::relocate() { |
|
496 // Relocate relocation set |
|
497 const bool success = _relocate.relocate(&_relocation_set); |
|
498 |
|
499 // Update statistics |
|
500 ZStatSample(ZSamplerHeapUsedAfterRelocation, used()); |
|
501 ZStatRelocation::set_at_relocate_end(success); |
|
502 ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(), |
|
503 used(), used_high(), used_low()); |
|
504 } |
|
505 |
|
506 void ZHeap::object_iterate(ObjectClosure* cl) { |
|
507 // Should only be called in a safepoint after mark end. |
|
508 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); |
|
509 |
|
510 ZHeapIterator iter; |
|
511 iter.objects_do(cl); |
|
512 } |
|
513 |
|
514 void ZHeap::serviceability_initialize() { |
|
515 _serviceability.initialize(); |
|
516 } |
|
517 |
|
518 GCMemoryManager* ZHeap::serviceability_memory_manager() { |
|
519 return _serviceability.memory_manager(); |
|
520 } |
|
521 |
|
522 MemoryPool* ZHeap::serviceability_memory_pool() { |
|
523 return _serviceability.memory_pool(); |
|
524 } |
|
525 |
|
526 ZServiceabilityCounters* ZHeap::serviceability_counters() { |
|
527 return _serviceability.counters(); |
|
528 } |
|
529 |
|
530 void ZHeap::print_on(outputStream* st) const { |
|
531 st->print_cr(" ZHeap used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M", |
|
532 used() / M, |
|
533 capacity() / M, |
|
534 max_capacity() / M); |
|
535 MetaspaceUtils::print_on(st); |
|
536 } |
|
537 |
|
538 void ZHeap::print_extended_on(outputStream* st) const { |
|
539 print_on(st); |
|
540 st->cr(); |
|
541 |
|
542 ZPageTableIterator iter(&_pagetable); |
|
543 for (ZPage* page; iter.next(&page);) { |
|
544 page->print_on(st); |
|
545 } |
|
546 |
|
547 st->cr(); |
|
548 } |
|
549 |
|
550 class ZVerifyRootsTask : public ZTask { |
|
551 private: |
|
552 ZRootsIterator _strong_roots; |
|
553 ZWeakRootsIterator _weak_roots; |
|
554 |
|
555 public: |
|
556 ZVerifyRootsTask() : |
|
557 ZTask("ZVerifyRootsTask"), |
|
558 _strong_roots(), |
|
559 _weak_roots() {} |
|
560 |
|
561 virtual void work() { |
|
562 ZVerifyRootOopClosure cl; |
|
563 _strong_roots.oops_do(&cl); |
|
564 _weak_roots.oops_do(&cl); |
|
565 } |
|
566 }; |
|
567 |
|
568 void ZHeap::verify() { |
|
569 // Heap verification can only be done between mark end and |
|
570 // relocate start. This is the only window where all oop are |
|
571 // good and the whole heap is in a consistent state. |
|
572 guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase"); |
|
573 |
|
574 { |
|
575 ZVerifyRootsTask task; |
|
576 _workers.run_parallel(&task); |
|
577 } |
|
578 |
|
579 { |
|
580 ZVerifyObjectClosure cl; |
|
581 object_iterate(&cl); |
|
582 } |
|
583 } |