570 \ |
573 \ |
571 /* save the compaction_top of the compaction space. */ \ |
574 /* save the compaction_top of the compaction space. */ \ |
572 cp->space->set_compaction_top(compact_top); \ |
575 cp->space->set_compaction_top(compact_top); \ |
573 } |
576 } |
574 |
577 |
575 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ |
578 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ |
576 /* adjust all the interior pointers to point at the new locations of objects \ |
579 /* adjust all the interior pointers to point at the new locations of objects \ |
577 * Used by MarkSweep::mark_sweep_phase3() */ \ |
580 * Used by MarkSweep::mark_sweep_phase3() */ \ |
578 \ |
581 \ |
579 HeapWord* q = bottom(); \ |
582 HeapWord* q = bottom(); \ |
580 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ |
583 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ |
581 \ |
584 \ |
582 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ |
585 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ |
583 \ |
586 \ |
584 if (q < t && _first_dead > q && \ |
587 if (q < t && _first_dead > q && \ |
585 !oop(q)->is_gc_marked()) { \ |
588 !oop(q)->is_gc_marked()) { \ |
586 /* we have a chunk of the space which hasn't moved and we've \ |
589 /* we have a chunk of the space which hasn't moved and we've \ |
587 * reinitialized the mark word during the previous pass, so we can't \ |
590 * reinitialized the mark word during the previous pass, so we can't \ |
588 * use is_gc_marked for the traversal. */ \ |
591 * use is_gc_marked for the traversal. */ \ |
589 HeapWord* end = _first_dead; \ |
592 HeapWord* end = _first_dead; \ |
590 \ |
593 \ |
591 while (q < end) { \ |
594 while (q < end) { \ |
592 /* I originally tried to conjoin "block_start(q) == q" to the \ |
595 /* I originally tried to conjoin "block_start(q) == q" to the \ |
593 * assertion below, but that doesn't work, because you can't \ |
596 * assertion below, but that doesn't work, because you can't \ |
594 * accurately traverse previous objects to get to the current one \ |
597 * accurately traverse previous objects to get to the current one \ |
595 * after their pointers (including pointers into permGen) have been \ |
598 * after their pointers (including pointers into permGen) have been \ |
596 * updated, until the actual compaction is done. dld, 4/00 */ \ |
599 * updated, until the actual compaction is done. dld, 4/00 */ \ |
597 assert(block_is_obj(q), \ |
600 assert(block_is_obj(q), \ |
598 "should be at block boundaries, and should be looking at objs"); \ |
601 "should be at block boundaries, and should be looking at objs"); \ |
599 \ |
602 \ |
600 debug_only(MarkSweep::track_interior_pointers(oop(q))); \ |
603 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ |
601 \ |
604 \ |
602 /* point all the oops to the new location */ \ |
605 /* point all the oops to the new location */ \ |
603 size_t size = oop(q)->adjust_pointers(); \ |
606 size_t size = oop(q)->adjust_pointers(); \ |
604 size = adjust_obj_size(size); \ |
607 size = adjust_obj_size(size); \ |
605 \ |
608 \ |
606 debug_only(MarkSweep::check_interior_pointers()); \ |
609 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ |
607 \ |
610 \ |
608 debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ |
611 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ |
609 \ |
612 \ |
610 q += size; \ |
613 q += size; \ |
611 } \ |
614 } \ |
612 \ |
615 \ |
613 if (_first_dead == t) { \ |
616 if (_first_dead == t) { \ |
614 q = t; \ |
617 q = t; \ |
615 } else { \ |
618 } else { \ |
616 /* $$$ This is funky. Using this to read the previously written \ |
619 /* $$$ This is funky. Using this to read the previously written \ |
617 * LiveRange. See also use below. */ \ |
620 * LiveRange. See also use below. */ \ |
618 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ |
621 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ |
619 } \ |
622 } \ |
620 } \ |
623 } \ |
621 \ |
624 \ |
622 const intx interval = PrefetchScanIntervalInBytes; \ |
625 const intx interval = PrefetchScanIntervalInBytes; \ |
623 \ |
626 \ |
624 debug_only(HeapWord* prev_q = NULL); \ |
627 debug_only(HeapWord* prev_q = NULL); \ |
625 while (q < t) { \ |
628 while (q < t) { \ |
626 /* prefetch beyond q */ \ |
629 /* prefetch beyond q */ \ |
627 Prefetch::write(q, interval); \ |
630 Prefetch::write(q, interval); \ |
628 if (oop(q)->is_gc_marked()) { \ |
631 if (oop(q)->is_gc_marked()) { \ |
629 /* q is alive */ \ |
632 /* q is alive */ \ |
630 debug_only(MarkSweep::track_interior_pointers(oop(q))); \ |
633 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ |
631 /* point all the oops to the new location */ \ |
634 /* point all the oops to the new location */ \ |
632 size_t size = oop(q)->adjust_pointers(); \ |
635 size_t size = oop(q)->adjust_pointers(); \ |
633 size = adjust_obj_size(size); \ |
636 size = adjust_obj_size(size); \ |
634 debug_only(MarkSweep::check_interior_pointers()); \ |
637 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ |
635 debug_only(MarkSweep::validate_live_oop(oop(q), size)); \ |
638 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ |
636 debug_only(prev_q = q); \ |
639 debug_only(prev_q = q); \ |
637 q += size; \ |
640 q += size; \ |
638 } else { \ |
641 } else { \ |
639 /* q is not a live object, so its mark should point at the next \ |
642 /* q is not a live object, so its mark should point at the next \ |
640 * live object */ \ |
643 * live object */ \ |
641 debug_only(prev_q = q); \ |
644 debug_only(prev_q = q); \ |
642 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
645 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
643 assert(q > prev_q, "we should be moving forward through memory"); \ |
646 assert(q > prev_q, "we should be moving forward through memory"); \ |
644 } \ |
647 } \ |
645 } \ |
648 } \ |
646 \ |
649 \ |
647 assert(q == t, "just checking"); \ |
650 assert(q == t, "just checking"); \ |
648 } |
651 } |
649 |
652 |
650 #define SCAN_AND_COMPACT(obj_size) { \ |
653 #define SCAN_AND_COMPACT(obj_size) { \ |
651 /* Copy all live objects to their new location \ |
654 /* Copy all live objects to their new location \ |
652 * Used by MarkSweep::mark_sweep_phase4() */ \ |
655 * Used by MarkSweep::mark_sweep_phase4() */ \ |
653 \ |
656 \ |
654 HeapWord* q = bottom(); \ |
657 HeapWord* q = bottom(); \ |
655 HeapWord* const t = _end_of_live; \ |
658 HeapWord* const t = _end_of_live; \ |
656 debug_only(HeapWord* prev_q = NULL); \ |
659 debug_only(HeapWord* prev_q = NULL); \ |
657 \ |
660 \ |
658 if (q < t && _first_dead > q && \ |
661 if (q < t && _first_dead > q && \ |
659 !oop(q)->is_gc_marked()) { \ |
662 !oop(q)->is_gc_marked()) { \ |
660 debug_only( \ |
663 debug_only( \ |
661 /* we have a chunk of the space which hasn't moved and we've reinitialized the \ |
664 /* we have a chunk of the space which hasn't moved and we've reinitialized \ |
662 * mark word during the previous pass, so we can't use is_gc_marked for the \ |
665 * the mark word during the previous pass, so we can't use is_gc_marked for \ |
663 * traversal. */ \ |
666 * the traversal. */ \ |
664 HeapWord* const end = _first_dead; \ |
667 HeapWord* const end = _first_dead; \ |
665 \ |
668 \ |
666 while (q < end) { \ |
669 while (q < end) { \ |
667 size_t size = obj_size(q); \ |
670 size_t size = obj_size(q); \ |
668 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \ |
671 assert(!oop(q)->is_gc_marked(), \ |
669 debug_only(MarkSweep::live_oop_moved_to(q, size, q)); \ |
672 "should be unmarked (special dense prefix handling)"); \ |
670 debug_only(prev_q = q); \ |
673 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \ |
|
674 debug_only(prev_q = q); \ |
671 q += size; \ |
675 q += size; \ |
672 } \ |
676 } \ |
673 ) /* debug_only */ \ |
677 ) /* debug_only */ \ |
674 \ |
678 \ |
675 if (_first_dead == t) { \ |
679 if (_first_dead == t) { \ |
676 q = t; \ |
680 q = t; \ |
677 } else { \ |
681 } else { \ |
678 /* $$$ Funky */ \ |
682 /* $$$ Funky */ \ |
679 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ |
683 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ |
680 } \ |
684 } \ |
681 } \ |
685 } \ |
682 \ |
686 \ |
683 const intx scan_interval = PrefetchScanIntervalInBytes; \ |
687 const intx scan_interval = PrefetchScanIntervalInBytes; \ |
684 const intx copy_interval = PrefetchCopyIntervalInBytes; \ |
688 const intx copy_interval = PrefetchCopyIntervalInBytes; \ |
685 while (q < t) { \ |
689 while (q < t) { \ |
686 if (!oop(q)->is_gc_marked()) { \ |
690 if (!oop(q)->is_gc_marked()) { \ |
687 /* mark is pointer to next marked oop */ \ |
691 /* mark is pointer to next marked oop */ \ |
688 debug_only(prev_q = q); \ |
692 debug_only(prev_q = q); \ |
689 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
693 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
690 assert(q > prev_q, "we should be moving forward through memory"); \ |
694 assert(q > prev_q, "we should be moving forward through memory"); \ |
691 } else { \ |
695 } else { \ |
692 /* prefetch beyond q */ \ |
696 /* prefetch beyond q */ \ |
693 Prefetch::read(q, scan_interval); \ |
697 Prefetch::read(q, scan_interval); \ |
694 \ |
698 \ |
695 /* size and destination */ \ |
699 /* size and destination */ \ |
696 size_t size = obj_size(q); \ |
700 size_t size = obj_size(q); \ |
697 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ |
701 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ |
698 \ |
702 \ |
699 /* prefetch beyond compaction_top */ \ |
703 /* prefetch beyond compaction_top */ \ |
700 Prefetch::write(compaction_top, copy_interval); \ |
704 Prefetch::write(compaction_top, copy_interval); \ |
701 \ |
705 \ |
702 /* copy object and reinit its mark */ \ |
706 /* copy object and reinit its mark */ \ |
703 debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); \ |
707 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ |
704 assert(q != compaction_top, "everything in this pass should be moving"); \ |
708 compaction_top)); \ |
705 Copy::aligned_conjoint_words(q, compaction_top, size); \ |
709 assert(q != compaction_top, "everything in this pass should be moving"); \ |
706 oop(compaction_top)->init_mark(); \ |
710 Copy::aligned_conjoint_words(q, compaction_top, size); \ |
707 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ |
711 oop(compaction_top)->init_mark(); \ |
708 \ |
712 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ |
709 debug_only(prev_q = q); \ |
713 \ |
|
714 debug_only(prev_q = q); \ |
710 q += size; \ |
715 q += size; \ |
711 } \ |
716 } \ |
712 } \ |
717 } \ |
713 \ |
718 \ |
714 /* Reset space after compaction is complete */ \ |
719 /* Reset space after compaction is complete */ \ |
715 reset_after_compaction(); \ |
720 reset_after_compaction(); \ |
716 /* We do this clear, below, since it has overloaded meanings for some */ \ |
721 /* We do this clear, below, since it has overloaded meanings for some */ \ |
717 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ |
722 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ |
718 /* compacted into will have had their offset table thresholds updated */ \ |
723 /* compacted into will have had their offset table thresholds updated */ \ |
719 /* continuously, but those that weren't need to have their thresholds */ \ |
724 /* continuously, but those that weren't need to have their thresholds */ \ |
720 /* re-initialized. Also mangles unused area for debugging. */ \ |
725 /* re-initialized. Also mangles unused area for debugging. */ \ |