30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" |
31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" |
32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" |
32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" |
33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" |
33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" |
34 #include "gc_implementation/parallelScavenge/psOldGen.hpp" |
34 #include "gc_implementation/parallelScavenge/psOldGen.hpp" |
35 #include "gc_implementation/parallelScavenge/psPermGen.hpp" |
|
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" |
35 #include "gc_implementation/parallelScavenge/psScavenge.hpp" |
37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" |
36 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" |
38 #include "gc_implementation/shared/isGCActiveMark.hpp" |
37 #include "gc_implementation/shared/isGCActiveMark.hpp" |
|
38 #include "gc_implementation/shared/markSweep.hpp" |
39 #include "gc_implementation/shared/spaceDecorator.hpp" |
39 #include "gc_implementation/shared/spaceDecorator.hpp" |
40 #include "gc_interface/gcCause.hpp" |
40 #include "gc_interface/gcCause.hpp" |
41 #include "memory/gcLocker.inline.hpp" |
41 #include "memory/gcLocker.inline.hpp" |
42 #include "memory/referencePolicy.hpp" |
42 #include "memory/referencePolicy.hpp" |
43 #include "memory/referenceProcessor.hpp" |
43 #include "memory/referenceProcessor.hpp" |
50 #include "services/memoryService.hpp" |
50 #include "services/memoryService.hpp" |
51 #include "utilities/events.hpp" |
51 #include "utilities/events.hpp" |
52 #include "utilities/stack.inline.hpp" |
52 #include "utilities/stack.inline.hpp" |
53 |
53 |
54 elapsedTimer PSMarkSweep::_accumulated_time; |
54 elapsedTimer PSMarkSweep::_accumulated_time; |
55 unsigned int PSMarkSweep::_total_invocations = 0; |
|
56 jlong PSMarkSweep::_time_of_last_gc = 0; |
55 jlong PSMarkSweep::_time_of_last_gc = 0; |
57 CollectorCounters* PSMarkSweep::_counters = NULL; |
56 CollectorCounters* PSMarkSweep::_counters = NULL; |
58 |
57 |
59 void PSMarkSweep::initialize() { |
58 void PSMarkSweep::initialize() { |
60 MemRegion mr = Universe::heap()->reserved_region(); |
59 MemRegion mr = Universe::heap()->reserved_region(); |
117 // CollectorPolicy::_should_clear_all_soft_refs. |
116 // CollectorPolicy::_should_clear_all_soft_refs. |
118 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); |
117 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); |
119 |
118 |
120 PSYoungGen* young_gen = heap->young_gen(); |
119 PSYoungGen* young_gen = heap->young_gen(); |
121 PSOldGen* old_gen = heap->old_gen(); |
120 PSOldGen* old_gen = heap->old_gen(); |
122 PSPermGen* perm_gen = heap->perm_gen(); |
|
123 |
121 |
124 // Increment the invocation count |
122 // Increment the invocation count |
125 heap->increment_total_collections(true /* full */); |
123 heap->increment_total_collections(true /* full */); |
126 |
124 |
127 // Save information needed to minimize mangling |
125 // Save information needed to minimize mangling |
146 |
144 |
147 // Verify object start arrays |
145 // Verify object start arrays |
148 if (VerifyObjectStartArray && |
146 if (VerifyObjectStartArray && |
149 VerifyBeforeGC) { |
147 VerifyBeforeGC) { |
150 old_gen->verify_object_start_array(); |
148 old_gen->verify_object_start_array(); |
151 perm_gen->verify_object_start_array(); |
|
152 } |
149 } |
153 |
150 |
154 heap->pre_full_gc_dump(); |
151 heap->pre_full_gc_dump(); |
155 |
152 |
156 // Filled in below to track the state of the young gen after the collection. |
153 // Filled in below to track the state of the young gen after the collection. |
170 if (TraceGen1Time) accumulated_time()->start(); |
167 if (TraceGen1Time) accumulated_time()->start(); |
171 |
168 |
172 // Let the size policy know we're starting |
169 // Let the size policy know we're starting |
173 size_policy->major_collection_begin(); |
170 size_policy->major_collection_begin(); |
174 |
171 |
175 // When collecting the permanent generation methodOops may be moving, |
|
176 // so we either have to flush all bcp data or convert it into bci. |
|
177 CodeCache::gc_prologue(); |
172 CodeCache::gc_prologue(); |
178 Threads::gc_prologue(); |
173 Threads::gc_prologue(); |
179 BiasedLocking::preserve_marks(); |
174 BiasedLocking::preserve_marks(); |
180 |
175 |
181 // Capture heap size before collection for printing. |
176 // Capture heap size before collection for printing. |
182 size_t prev_used = heap->used(); |
177 size_t prev_used = heap->used(); |
183 |
178 |
184 // Capture perm gen size before collection for sizing. |
179 // Capture metadata size before collection for sizing. |
185 size_t perm_gen_prev_used = perm_gen->used_in_bytes(); |
180 size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); |
186 |
181 |
187 // For PrintGCDetails |
182 // For PrintGCDetails |
188 size_t old_gen_prev_used = old_gen->used_in_bytes(); |
183 size_t old_gen_prev_used = old_gen->used_in_bytes(); |
189 size_t young_gen_prev_used = young_gen->used_in_bytes(); |
184 size_t young_gen_prev_used = young_gen->used_in_bytes(); |
190 |
185 |
232 |
227 |
233 BarrierSet* bs = heap->barrier_set(); |
228 BarrierSet* bs = heap->barrier_set(); |
234 if (bs->is_a(BarrierSet::ModRef)) { |
229 if (bs->is_a(BarrierSet::ModRef)) { |
235 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; |
230 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; |
236 MemRegion old_mr = heap->old_gen()->reserved(); |
231 MemRegion old_mr = heap->old_gen()->reserved(); |
237 MemRegion perm_mr = heap->perm_gen()->reserved(); |
|
238 assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); |
|
239 |
|
240 if (young_gen_empty) { |
232 if (young_gen_empty) { |
241 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); |
233 modBS->clear(MemRegion(old_mr.start(), old_mr.end())); |
242 } else { |
234 } else { |
243 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); |
235 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); |
244 } |
236 } |
245 } |
237 } |
|
238 |
|
239 // Delete metaspaces for unloaded class loaders and clean up loader_data graph |
|
240 ClassLoaderDataGraph::purge(); |
246 |
241 |
247 BiasedLocking::restore_marks(); |
242 BiasedLocking::restore_marks(); |
248 Threads::gc_epilogue(); |
243 Threads::gc_epilogue(); |
249 CodeCache::gc_epilogue(); |
244 CodeCache::gc_epilogue(); |
250 JvmtiExport::gc_epilogue(); |
245 JvmtiExport::gc_epilogue(); |
265 gclog_or_tty->print("AdaptiveSizeStart: "); |
260 gclog_or_tty->print("AdaptiveSizeStart: "); |
266 gclog_or_tty->stamp(); |
261 gclog_or_tty->stamp(); |
267 gclog_or_tty->print_cr(" collection: %d ", |
262 gclog_or_tty->print_cr(" collection: %d ", |
268 heap->total_collections()); |
263 heap->total_collections()); |
269 if (Verbose) { |
264 if (Verbose) { |
270 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" |
265 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", |
271 " perm_gen_capacity: %d ", |
266 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); |
272 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), |
|
273 perm_gen->capacity_in_bytes()); |
|
274 } |
267 } |
275 } |
268 } |
276 |
269 |
277 // Don't check if the size_policy is ready here. Let |
270 // Don't check if the size_policy is ready here. Let |
278 // the size_policy check that internally. |
271 // the size_policy check that internally. |
288 young_gen->from_space()->capacity_in_bytes() - |
281 young_gen->from_space()->capacity_in_bytes() - |
289 young_gen->to_space()->capacity_in_bytes(); |
282 young_gen->to_space()->capacity_in_bytes(); |
290 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), |
283 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), |
291 young_gen->eden_space()->used_in_bytes(), |
284 young_gen->eden_space()->used_in_bytes(), |
292 old_gen->used_in_bytes(), |
285 old_gen->used_in_bytes(), |
293 perm_gen->used_in_bytes(), |
|
294 young_gen->eden_space()->capacity_in_bytes(), |
286 young_gen->eden_space()->capacity_in_bytes(), |
295 old_gen->max_gen_size(), |
287 old_gen->max_gen_size(), |
296 max_eden_size, |
288 max_eden_size, |
297 true /* full gc*/, |
289 true /* full gc*/, |
298 gc_cause, |
290 gc_cause, |
321 young_gen->capacity_in_bytes()); |
313 young_gen->capacity_in_bytes()); |
322 } |
314 } |
323 |
315 |
324 heap->resize_all_tlabs(); |
316 heap->resize_all_tlabs(); |
325 |
317 |
326 // We collected the perm gen, so we'll resize it here. |
318 // We collected the heap, recalculate the metaspace capacity |
327 perm_gen->compute_new_size(perm_gen_prev_used); |
319 MetaspaceGC::compute_new_size(); |
328 |
320 |
329 if (TraceGen1Time) accumulated_time()->stop(); |
321 if (TraceGen1Time) accumulated_time()->stop(); |
330 |
322 |
331 if (PrintGC) { |
323 if (PrintGC) { |
332 if (PrintGCDetails) { |
324 if (PrintGCDetails) { |
334 // would be confusing. |
326 // would be confusing. |
335 young_gen->print_used_change(young_gen_prev_used); |
327 young_gen->print_used_change(young_gen_prev_used); |
336 old_gen->print_used_change(old_gen_prev_used); |
328 old_gen->print_used_change(old_gen_prev_used); |
337 } |
329 } |
338 heap->print_heap_change(prev_used); |
330 heap->print_heap_change(prev_used); |
339 // Do perm gen after heap becase prev_used does |
|
340 // not include the perm gen (done this way in the other |
|
341 // collectors). |
|
342 if (PrintGCDetails) { |
331 if (PrintGCDetails) { |
343 perm_gen->print_used_change(perm_gen_prev_used); |
332 MetaspaceAux::print_metaspace_change(metadata_prev_used); |
344 } |
333 } |
345 } |
334 } |
346 |
335 |
347 // Track memory usage and detect low memory |
336 // Track memory usage and detect low memory |
348 MemoryService::track_memory_usage(); |
337 MemoryService::track_memory_usage(); |
357 |
346 |
358 // Re-verify object start arrays |
347 // Re-verify object start arrays |
359 if (VerifyObjectStartArray && |
348 if (VerifyObjectStartArray && |
360 VerifyAfterGC) { |
349 VerifyAfterGC) { |
361 old_gen->verify_object_start_array(); |
350 old_gen->verify_object_start_array(); |
362 perm_gen->verify_object_start_array(); |
|
363 } |
351 } |
364 |
352 |
365 if (ZapUnusedHeapArea) { |
353 if (ZapUnusedHeapArea) { |
366 old_gen->object_space()->check_mangled_unused_area_complete(); |
354 old_gen->object_space()->check_mangled_unused_area_complete(); |
367 perm_gen->object_space()->check_mangled_unused_area_complete(); |
|
368 } |
355 } |
369 |
356 |
370 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
357 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
371 |
358 |
372 heap->print_heap_after_gc(); |
359 heap->print_heap_after_gc(); |
488 void PSMarkSweep::deallocate_stacks() { |
475 void PSMarkSweep::deallocate_stacks() { |
489 _preserved_mark_stack.clear(true); |
476 _preserved_mark_stack.clear(true); |
490 _preserved_oop_stack.clear(true); |
477 _preserved_oop_stack.clear(true); |
491 _marking_stack.clear(); |
478 _marking_stack.clear(); |
492 _objarray_stack.clear(true); |
479 _objarray_stack.clear(true); |
493 _revisit_klass_stack.clear(true); |
|
494 _revisit_mdo_stack.clear(true); |
|
495 } |
480 } |
496 |
481 |
497 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { |
482 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { |
498 // Recursively traverse all live objects and mark them |
483 // Recursively traverse all live objects and mark them |
499 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); |
484 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); |
500 trace(" 1"); |
485 trace(" 1"); |
501 |
486 |
502 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
487 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
503 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
488 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
489 |
|
490 // Need to clear claim bits before the tracing starts. |
|
491 ClassLoaderDataGraph::clear_claimed_marks(); |
504 |
492 |
505 // General strong roots. |
493 // General strong roots. |
506 { |
494 { |
507 ParallelScavengeHeap::ParStrongRootsScope psrs; |
495 ParallelScavengeHeap::ParStrongRootsScope psrs; |
508 Universe::oops_do(mark_and_push_closure()); |
496 Universe::oops_do(mark_and_push_closure()); |
512 ObjectSynchronizer::oops_do(mark_and_push_closure()); |
500 ObjectSynchronizer::oops_do(mark_and_push_closure()); |
513 FlatProfiler::oops_do(mark_and_push_closure()); |
501 FlatProfiler::oops_do(mark_and_push_closure()); |
514 Management::oops_do(mark_and_push_closure()); |
502 Management::oops_do(mark_and_push_closure()); |
515 JvmtiExport::oops_do(mark_and_push_closure()); |
503 JvmtiExport::oops_do(mark_and_push_closure()); |
516 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); |
504 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); |
|
505 ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true); |
517 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. |
506 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. |
518 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); |
507 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); |
519 } |
508 } |
520 |
509 |
521 // Flush marking stack. |
510 // Flush marking stack. |
535 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), |
524 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), |
536 purged_class); |
525 purged_class); |
537 follow_stack(); // Flush marking stack |
526 follow_stack(); // Flush marking stack |
538 |
527 |
539 // Update subklass/sibling/implementor links of live klasses |
528 // Update subklass/sibling/implementor links of live klasses |
540 follow_weak_klass_links(); |
529 Klass::clean_weak_klass_links(&is_alive); |
541 assert(_marking_stack.is_empty(), "just drained"); |
|
542 |
|
543 // Visit memoized mdo's and clear unmarked weak refs |
|
544 follow_mdo_weak_refs(); |
|
545 assert(_marking_stack.is_empty(), "just drained"); |
530 assert(_marking_stack.is_empty(), "just drained"); |
546 |
531 |
547 // Visit interned string tables and delete unmarked oops |
532 // Visit interned string tables and delete unmarked oops |
548 StringTable::unlink(is_alive_closure()); |
533 StringTable::unlink(is_alive_closure()); |
549 // Clean up unreferenced symbols in symbol table. |
534 // Clean up unreferenced symbols in symbol table. |
557 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); |
542 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); |
558 trace("2"); |
543 trace("2"); |
559 |
544 |
560 // Now all live objects are marked, compute the new object addresses. |
545 // Now all live objects are marked, compute the new object addresses. |
561 |
546 |
562 // It is imperative that we traverse perm_gen LAST. If dead space is |
|
563 // allowed a range of dead object may get overwritten by a dead int |
|
564 // array. If perm_gen is not traversed last a klassOop may get |
|
565 // overwritten. This is fine since it is dead, but if the class has dead |
|
566 // instances we have to skip them, and in order to find their size we |
|
567 // need the klassOop! |
|
568 // |
|
569 // It is not required that we traverse spaces in the same order in |
547 // It is not required that we traverse spaces in the same order in |
570 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops |
548 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops |
571 // tracking expects us to do so. See comment under phase4. |
549 // tracking expects us to do so. See comment under phase4. |
572 |
550 |
573 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
551 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
574 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
552 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
575 |
553 |
576 PSOldGen* old_gen = heap->old_gen(); |
554 PSOldGen* old_gen = heap->old_gen(); |
577 PSPermGen* perm_gen = heap->perm_gen(); |
|
578 |
555 |
579 // Begin compacting into the old gen |
556 // Begin compacting into the old gen |
580 PSMarkSweepDecorator::set_destination_decorator_tenured(); |
557 PSMarkSweepDecorator::set_destination_decorator_tenured(); |
581 |
558 |
582 // This will also compact the young gen spaces. |
559 // This will also compact the young gen spaces. |
583 old_gen->precompact(); |
560 old_gen->precompact(); |
584 |
|
585 // Compact the perm gen into the perm gen |
|
586 PSMarkSweepDecorator::set_destination_decorator_perm_gen(); |
|
587 |
|
588 perm_gen->precompact(); |
|
589 } |
561 } |
590 |
562 |
591 // This should be moved to the shared markSweep code! |
563 // This should be moved to the shared markSweep code! |
592 class PSAlwaysTrueClosure: public BoolObjectClosure { |
564 class PSAlwaysTrueClosure: public BoolObjectClosure { |
593 public: |
565 public: |
604 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
576 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
605 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
577 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
606 |
578 |
607 PSYoungGen* young_gen = heap->young_gen(); |
579 PSYoungGen* young_gen = heap->young_gen(); |
608 PSOldGen* old_gen = heap->old_gen(); |
580 PSOldGen* old_gen = heap->old_gen(); |
609 PSPermGen* perm_gen = heap->perm_gen(); |
581 |
|
582 // Need to clear claim bits before the tracing starts. |
|
583 ClassLoaderDataGraph::clear_claimed_marks(); |
610 |
584 |
611 // General strong roots. |
585 // General strong roots. |
612 Universe::oops_do(adjust_root_pointer_closure()); |
586 Universe::oops_do(adjust_root_pointer_closure()); |
613 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles |
587 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles |
614 Threads::oops_do(adjust_root_pointer_closure(), NULL); |
588 Threads::oops_do(adjust_root_pointer_closure(), NULL); |
616 FlatProfiler::oops_do(adjust_root_pointer_closure()); |
590 FlatProfiler::oops_do(adjust_root_pointer_closure()); |
617 Management::oops_do(adjust_root_pointer_closure()); |
591 Management::oops_do(adjust_root_pointer_closure()); |
618 JvmtiExport::oops_do(adjust_root_pointer_closure()); |
592 JvmtiExport::oops_do(adjust_root_pointer_closure()); |
619 // SO_AllClasses |
593 // SO_AllClasses |
620 SystemDictionary::oops_do(adjust_root_pointer_closure()); |
594 SystemDictionary::oops_do(adjust_root_pointer_closure()); |
|
595 ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); |
621 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); |
596 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); |
622 |
597 |
623 // Now adjust pointers in remaining weak roots. (All of which should |
598 // Now adjust pointers in remaining weak roots. (All of which should |
624 // have been cleared if they pointed to non-surviving objects.) |
599 // have been cleared if they pointed to non-surviving objects.) |
625 // Global (weak) JNI handles |
600 // Global (weak) JNI handles |
632 |
607 |
633 adjust_marks(); |
608 adjust_marks(); |
634 |
609 |
635 young_gen->adjust_pointers(); |
610 young_gen->adjust_pointers(); |
636 old_gen->adjust_pointers(); |
611 old_gen->adjust_pointers(); |
637 perm_gen->adjust_pointers(); |
|
638 } |
612 } |
639 |
613 |
640 void PSMarkSweep::mark_sweep_phase4() { |
614 void PSMarkSweep::mark_sweep_phase4() { |
641 EventMark m("4 compact heap"); |
615 EventMark m("4 compact heap"); |
642 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); |
616 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); |
643 trace("4"); |
617 trace("4"); |
644 |
618 |
645 // All pointers are now adjusted, move objects accordingly |
619 // All pointers are now adjusted, move objects accordingly |
646 |
620 |
647 // It is imperative that we traverse perm_gen first in phase4. All |
|
648 // classes must be allocated earlier than their instances, and traversing |
|
649 // perm_gen first makes sure that all klassOops have moved to their new |
|
650 // location before any instance does a dispatch through it's klass! |
|
651 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
621 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
652 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
622 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
653 |
623 |
654 PSYoungGen* young_gen = heap->young_gen(); |
624 PSYoungGen* young_gen = heap->young_gen(); |
655 PSOldGen* old_gen = heap->old_gen(); |
625 PSOldGen* old_gen = heap->old_gen(); |
656 PSPermGen* perm_gen = heap->perm_gen(); |
626 |
657 |
|
658 perm_gen->compact(); |
|
659 old_gen->compact(); |
627 old_gen->compact(); |
660 young_gen->compact(); |
628 young_gen->compact(); |
661 } |
629 } |
662 |
630 |
663 jlong PSMarkSweep::millis_since_last_gc() { |
631 jlong PSMarkSweep::millis_since_last_gc() { |