210 |
210 |
211 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end), |
211 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end), |
212 use_adaptive_freelists, |
212 use_adaptive_freelists, |
213 dictionaryChoice); |
213 dictionaryChoice); |
214 NOT_PRODUCT(debug_cms_space = _cmsSpace;) |
214 NOT_PRODUCT(debug_cms_space = _cmsSpace;) |
215 _cmsSpace->_gen = this; |
215 _cmsSpace->_old_gen = this; |
216 |
216 |
217 _gc_stats = new CMSGCStats(); |
217 _gc_stats = new CMSGCStats(); |
218 |
218 |
219 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass |
219 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass |
220 // offsets match. The ability to tell free chunks from objects |
220 // offsets match. The ability to tell free chunks from objects |
357 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
357 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
358 size_t expected_promotion = MIN2(gch->young_gen()->capacity(), |
358 size_t expected_promotion = MIN2(gch->young_gen()->capacity(), |
359 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); |
359 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); |
360 if (cms_free > expected_promotion) { |
360 if (cms_free > expected_promotion) { |
361 // Start a cms collection if there isn't enough space to promote |
361 // Start a cms collection if there isn't enough space to promote |
362 // for the next minor collection. Use the padded average as |
362 // for the next young collection. Use the padded average as |
363 // a safety factor. |
363 // a safety factor. |
364 cms_free -= expected_promotion; |
364 cms_free -= expected_promotion; |
365 |
365 |
366 // Adjust by the safety factor. |
366 // Adjust by the safety factor. |
367 double cms_free_dbl = (double)cms_free; |
367 double cms_free_dbl = (double)cms_free; |
368 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0; |
368 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0; |
369 // Apply a further correction factor which tries to adjust |
369 // Apply a further correction factor which tries to adjust |
370 // for recent occurance of concurrent mode failures. |
370 // for recent occurance of concurrent mode failures. |
371 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); |
371 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); |
372 cms_free_dbl = cms_free_dbl * cms_adjustment; |
372 cms_free_dbl = cms_free_dbl * cms_adjustment; |
373 |
373 |
529 |
529 |
530 // Support for multi-threaded concurrent phases |
530 // Support for multi-threaded concurrent phases |
531 if (CMSConcurrentMTEnabled) { |
531 if (CMSConcurrentMTEnabled) { |
532 if (FLAG_IS_DEFAULT(ConcGCThreads)) { |
532 if (FLAG_IS_DEFAULT(ConcGCThreads)) { |
533 // just for now |
533 // just for now |
534 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); |
534 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4); |
535 } |
535 } |
536 if (ConcGCThreads > 1) { |
536 if (ConcGCThreads > 1) { |
537 _conc_workers = new YieldingFlexibleWorkGang("CMS Thread", |
537 _conc_workers = new YieldingFlexibleWorkGang("CMS Thread", |
538 ConcGCThreads, true); |
538 ConcGCThreads, true); |
539 if (_conc_workers == NULL) { |
539 if (_conc_workers == NULL) { |
611 if (gch->supports_inline_contig_alloc()) { |
611 if (gch->supports_inline_contig_alloc()) { |
612 _top_addr = gch->top_addr(); |
612 _top_addr = gch->top_addr(); |
613 _end_addr = gch->end_addr(); |
613 _end_addr = gch->end_addr(); |
614 assert(_young_gen != NULL, "no _young_gen"); |
614 assert(_young_gen != NULL, "no _young_gen"); |
615 _eden_chunk_index = 0; |
615 _eden_chunk_index = 0; |
616 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; |
616 _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain; |
617 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); |
617 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); |
618 } |
618 } |
619 |
619 |
620 // Support for parallelizing survivor space rescan |
620 // Support for parallelizing survivor space rescan |
621 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) { |
621 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) { |
793 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); |
793 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); |
794 if (PrintGCDetails && Verbose) { |
794 if (PrintGCDetails && Verbose) { |
795 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); |
795 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); |
796 gclog_or_tty->print_cr("\nFrom compute_new_size: "); |
796 gclog_or_tty->print_cr("\nFrom compute_new_size: "); |
797 gclog_or_tty->print_cr(" Free fraction %f", free_percentage); |
797 gclog_or_tty->print_cr(" Free fraction %f", free_percentage); |
798 gclog_or_tty->print_cr(" Desired free fraction %f", |
798 gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage); |
799 desired_free_percentage); |
799 gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage); |
800 gclog_or_tty->print_cr(" Maximum free fraction %f", |
800 gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000); |
801 maximum_free_percentage); |
801 gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); |
802 gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity()/1000); |
|
803 gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, |
|
804 desired_capacity/1000); |
|
805 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
802 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
806 assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); |
803 assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); |
807 size_t young_size = gch->young_gen()->capacity(); |
804 size_t young_size = gch->young_gen()->capacity(); |
808 gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000); |
805 gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000); |
809 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, |
806 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); |
810 unsafe_max_alloc_nogc()/1000); |
807 gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); |
811 gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, |
808 gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); |
812 contiguous_available()/1000); |
|
813 gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", |
|
814 expand_bytes); |
|
815 } |
809 } |
816 // safe if expansion fails |
810 // safe if expansion fails |
817 expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); |
811 expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); |
818 if (PrintGCDetails && Verbose) { |
812 if (PrintGCDetails && Verbose) { |
819 gclog_or_tty->print_cr(" Expanded free fraction %f", |
813 gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); |
820 ((double) free()) / capacity()); |
|
821 } |
814 } |
822 } else { |
815 } else { |
823 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); |
816 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); |
824 assert(desired_capacity <= capacity(), "invalid expansion size"); |
817 assert(desired_capacity <= capacity(), "invalid expansion size"); |
825 size_t shrink_bytes = capacity() - desired_capacity; |
818 size_t shrink_bytes = capacity() - desired_capacity; |
832 |
825 |
833 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { |
826 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { |
834 return cmsSpace()->freelistLock(); |
827 return cmsSpace()->freelistLock(); |
835 } |
828 } |
836 |
829 |
837 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, |
830 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) { |
838 bool tlab) { |
|
839 CMSSynchronousYieldRequest yr; |
831 CMSSynchronousYieldRequest yr; |
840 MutexLockerEx x(freelistLock(), |
832 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); |
841 Mutex::_no_safepoint_check_flag); |
|
842 return have_lock_and_allocate(size, tlab); |
833 return have_lock_and_allocate(size, tlab); |
843 } |
834 } |
844 |
835 |
845 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, |
836 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, |
846 bool tlab /* ignored */) { |
837 bool tlab /* ignored */) { |
847 assert_lock_strong(freelistLock()); |
838 assert_lock_strong(freelistLock()); |
848 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); |
839 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); |
849 HeapWord* res = cmsSpace()->allocate(adjustedSize); |
840 HeapWord* res = cmsSpace()->allocate(adjustedSize); |
850 // Allocate the object live (grey) if the background collector has |
841 // Allocate the object live (grey) if the background collector has |
851 // started marking. This is necessary because the marker may |
842 // started marking. This is necessary because the marker may |
2424 { |
2415 { |
2425 StrongRootsScope srs(1); |
2416 StrongRootsScope srs(1); |
2426 |
2417 |
2427 gch->gen_process_roots(&srs, |
2418 gch->gen_process_roots(&srs, |
2428 GenCollectedHeap::OldGen, |
2419 GenCollectedHeap::OldGen, |
2429 true, // younger gens are roots |
2420 true, // young gen as roots |
2430 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
2421 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
2431 should_unload_classes(), |
2422 should_unload_classes(), |
2432 ¬Older, |
2423 ¬Older, |
2433 NULL, |
2424 NULL, |
2434 NULL); |
2425 NULL); |
2496 { |
2487 { |
2497 StrongRootsScope srs(1); |
2488 StrongRootsScope srs(1); |
2498 |
2489 |
2499 gch->gen_process_roots(&srs, |
2490 gch->gen_process_roots(&srs, |
2500 GenCollectedHeap::OldGen, |
2491 GenCollectedHeap::OldGen, |
2501 true, // younger gens are roots |
2492 true, // young gen as roots |
2502 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
2493 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
2503 should_unload_classes(), |
2494 should_unload_classes(), |
2504 ¬Older, |
2495 ¬Older, |
2505 NULL, |
2496 NULL, |
2506 &cld_closure); |
2497 &cld_closure); |
2950 |
2941 |
2951 void CMSCollector::checkpointRootsInitialWork() { |
2942 void CMSCollector::checkpointRootsInitialWork() { |
2952 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); |
2943 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); |
2953 assert(_collectorState == InitialMarking, "just checking"); |
2944 assert(_collectorState == InitialMarking, "just checking"); |
2954 |
2945 |
2955 // If there has not been a GC[n-1] since last GC[n] cycle completed, |
2946 // Already have locks. |
2956 // precede our marking with a collection of all |
|
2957 // younger generations to keep floating garbage to a minimum. |
|
2958 // XXX: we won't do this for now -- it's an optimization to be done later. |
|
2959 |
|
2960 // already have locks |
|
2961 assert_lock_strong(bitMapLock()); |
2947 assert_lock_strong(bitMapLock()); |
2962 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); |
2948 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); |
2963 |
2949 |
2964 // Setup the verification and class unloading state for this |
2950 // Setup the verification and class unloading state for this |
2965 // CMS collection cycle. |
2951 // CMS collection cycle. |
3025 |
3011 |
3026 StrongRootsScope srs(1); |
3012 StrongRootsScope srs(1); |
3027 |
3013 |
3028 gch->gen_process_roots(&srs, |
3014 gch->gen_process_roots(&srs, |
3029 GenCollectedHeap::OldGen, |
3015 GenCollectedHeap::OldGen, |
3030 true, // younger gens are roots |
3016 true, // young gen as roots |
3031 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
3017 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
3032 should_unload_classes(), |
3018 should_unload_classes(), |
3033 ¬Older, |
3019 ¬Older, |
3034 NULL, |
3020 NULL, |
3035 &cld_closure); |
3021 &cld_closure); |
3036 } |
3022 } |
3037 } |
3023 } |
3038 |
3024 |
3039 // Clear mod-union table; it will be dirtied in the prologue of |
3025 // Clear mod-union table; it will be dirtied in the prologue of |
3040 // CMS generation per each younger generation collection. |
3026 // CMS generation per each young generation collection. |
3041 |
3027 |
3042 assert(_modUnionTable.isAllClear(), |
3028 assert(_modUnionTable.isAllClear(), |
3043 "Was cleared in most recent final checkpoint phase" |
3029 "Was cleared in most recent final checkpoint phase" |
3044 " or no bits are set in the gc_prologue before the start of the next " |
3030 " or no bits are set in the gc_prologue before the start of the next " |
3045 "subsequent marking phase."); |
3031 "subsequent marking phase."); |
3055 bool CMSCollector::markFromRoots() { |
3041 bool CMSCollector::markFromRoots() { |
3056 // we might be tempted to assert that: |
3042 // we might be tempted to assert that: |
3057 // assert(!SafepointSynchronize::is_at_safepoint(), |
3043 // assert(!SafepointSynchronize::is_at_safepoint(), |
3058 // "inconsistent argument?"); |
3044 // "inconsistent argument?"); |
3059 // However that wouldn't be right, because it's possible that |
3045 // However that wouldn't be right, because it's possible that |
3060 // a safepoint is indeed in progress as a younger generation |
3046 // a safepoint is indeed in progress as a young generation |
3061 // stop-the-world GC happens even as we mark in this generation. |
3047 // stop-the-world GC happens even as we mark in this generation. |
3062 assert(_collectorState == Marking, "inconsistent state?"); |
3048 assert(_collectorState == Marking, "inconsistent state?"); |
3063 check_correct_thread_executing(); |
3049 check_correct_thread_executing(); |
3064 verify_overflow_empty(); |
3050 verify_overflow_empty(); |
3065 |
3051 |
3066 // Weak ref discovery note: We may be discovering weak |
3052 // Weak ref discovery note: We may be discovering weak |
3067 // refs in this generation concurrent (but interleaved) with |
3053 // refs in this generation concurrent (but interleaved) with |
3068 // weak ref discovery by a younger generation collector. |
3054 // weak ref discovery by the young generation collector. |
3069 |
3055 |
3070 CMSTokenSyncWithLocks ts(true, bitMapLock()); |
3056 CMSTokenSyncWithLocks ts(true, bitMapLock()); |
3071 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
3057 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
3072 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); |
3058 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); |
3073 bool res = markFromRootsWork(); |
3059 bool res = markFromRootsWork(); |
3093 // push oop on marking stack |
3079 // push oop on marking stack |
3094 // . drain the marking stack |
3080 // . drain the marking stack |
3095 |
3081 |
3096 // Note that when we do a marking step we need to hold the |
3082 // Note that when we do a marking step we need to hold the |
3097 // bit map lock -- recall that direct allocation (by mutators) |
3083 // bit map lock -- recall that direct allocation (by mutators) |
3098 // and promotion (by younger generation collectors) is also |
3084 // and promotion (by the young generation collector) is also |
3099 // marking the bit map. [the so-called allocate live policy.] |
3085 // marking the bit map. [the so-called allocate live policy.] |
3100 // Because the implementation of bit map marking is not |
3086 // Because the implementation of bit map marking is not |
3101 // robust wrt simultaneous marking of bits in the same word, |
3087 // robust wrt simultaneous marking of bits in the same word, |
3102 // we need to make sure that there is no such interference |
3088 // we need to make sure that there is no such interference |
3103 // between concurrent such updates. |
3089 // between concurrent such updates. |
4047 // NOTE: preclean_mod_union_table() and preclean_card_table() |
4033 // NOTE: preclean_mod_union_table() and preclean_card_table() |
4048 // further below are largely identical; if you need to modify |
4034 // further below are largely identical; if you need to modify |
4049 // one of these methods, please check the other method too. |
4035 // one of these methods, please check the other method too. |
4050 |
4036 |
4051 size_t CMSCollector::preclean_mod_union_table( |
4037 size_t CMSCollector::preclean_mod_union_table( |
4052 ConcurrentMarkSweepGeneration* gen, |
4038 ConcurrentMarkSweepGeneration* old_gen, |
4053 ScanMarkedObjectsAgainCarefullyClosure* cl) { |
4039 ScanMarkedObjectsAgainCarefullyClosure* cl) { |
4054 verify_work_stacks_empty(); |
4040 verify_work_stacks_empty(); |
4055 verify_overflow_empty(); |
4041 verify_overflow_empty(); |
4056 |
4042 |
4057 // strategy: starting with the first card, accumulate contiguous |
4043 // strategy: starting with the first card, accumulate contiguous |
4062 // that, in case the generations expand while we are precleaning. |
4048 // that, in case the generations expand while we are precleaning. |
4063 // It might also be fine to just use the committed part of the |
4049 // It might also be fine to just use the committed part of the |
4064 // generation, but we might potentially miss cards when the |
4050 // generation, but we might potentially miss cards when the |
4065 // generation is rapidly expanding while we are in the midst |
4051 // generation is rapidly expanding while we are in the midst |
4066 // of precleaning. |
4052 // of precleaning. |
4067 HeapWord* startAddr = gen->reserved().start(); |
4053 HeapWord* startAddr = old_gen->reserved().start(); |
4068 HeapWord* endAddr = gen->reserved().end(); |
4054 HeapWord* endAddr = old_gen->reserved().end(); |
4069 |
4055 |
4070 cl->setFreelistLock(gen->freelistLock()); // needed for yielding |
4056 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding |
4071 |
4057 |
4072 size_t numDirtyCards, cumNumDirtyCards; |
4058 size_t numDirtyCards, cumNumDirtyCards; |
4073 HeapWord *nextAddr, *lastAddr; |
4059 HeapWord *nextAddr, *lastAddr; |
4074 for (cumNumDirtyCards = numDirtyCards = 0, |
4060 for (cumNumDirtyCards = numDirtyCards = 0, |
4075 nextAddr = lastAddr = startAddr; |
4061 nextAddr = lastAddr = startAddr; |
4107 if (!dirtyRegion.is_empty()) { |
4093 if (!dirtyRegion.is_empty()) { |
4108 assert(numDirtyCards > 0, "consistency check"); |
4094 assert(numDirtyCards > 0, "consistency check"); |
4109 HeapWord* stop_point = NULL; |
4095 HeapWord* stop_point = NULL; |
4110 stopTimer(); |
4096 stopTimer(); |
4111 // Potential yield point |
4097 // Potential yield point |
4112 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), |
4098 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), |
4113 bitMapLock()); |
4099 bitMapLock()); |
4114 startTimer(); |
4100 startTimer(); |
4115 { |
4101 { |
4116 verify_work_stacks_empty(); |
4102 verify_work_stacks_empty(); |
4117 verify_overflow_empty(); |
4103 verify_overflow_empty(); |
4118 sample_eden(); |
4104 sample_eden(); |
4119 stop_point = |
4105 stop_point = |
4120 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
4106 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
4121 } |
4107 } |
4122 if (stop_point != NULL) { |
4108 if (stop_point != NULL) { |
4123 // The careful iteration stopped early either because it found an |
4109 // The careful iteration stopped early either because it found an |
4124 // uninitialized object, or because we were in the midst of an |
4110 // uninitialized object, or because we were in the midst of an |
4125 // "abortable preclean", which should now be aborted. Redirty |
4111 // "abortable preclean", which should now be aborted. Redirty |
4150 |
4136 |
4151 // NOTE: preclean_mod_union_table() above and preclean_card_table() |
4137 // NOTE: preclean_mod_union_table() above and preclean_card_table() |
4152 // below are largely identical; if you need to modify |
4138 // below are largely identical; if you need to modify |
4153 // one of these methods, please check the other method too. |
4139 // one of these methods, please check the other method too. |
4154 |
4140 |
4155 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen, |
4141 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen, |
4156 ScanMarkedObjectsAgainCarefullyClosure* cl) { |
4142 ScanMarkedObjectsAgainCarefullyClosure* cl) { |
4157 // strategy: it's similar to precleamModUnionTable above, in that |
4143 // strategy: it's similar to precleamModUnionTable above, in that |
4158 // we accumulate contiguous ranges of dirty cards, mark these cards |
4144 // we accumulate contiguous ranges of dirty cards, mark these cards |
4159 // precleaned, then scan the region covered by these cards. |
4145 // precleaned, then scan the region covered by these cards. |
4160 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high()); |
4146 HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high()); |
4161 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low()); |
4147 HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low()); |
4162 |
4148 |
4163 cl->setFreelistLock(gen->freelistLock()); // needed for yielding |
4149 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding |
4164 |
4150 |
4165 size_t numDirtyCards, cumNumDirtyCards; |
4151 size_t numDirtyCards, cumNumDirtyCards; |
4166 HeapWord *lastAddr, *nextAddr; |
4152 HeapWord *lastAddr, *nextAddr; |
4167 |
4153 |
4168 for (cumNumDirtyCards = numDirtyCards = 0, |
4154 for (cumNumDirtyCards = numDirtyCards = 0, |
4195 numDirtyCards = |
4181 numDirtyCards = |
4196 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words; |
4182 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words; |
4197 |
4183 |
4198 if (!dirtyRegion.is_empty()) { |
4184 if (!dirtyRegion.is_empty()) { |
4199 stopTimer(); |
4185 stopTimer(); |
4200 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); |
4186 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock()); |
4201 startTimer(); |
4187 startTimer(); |
4202 sample_eden(); |
4188 sample_eden(); |
4203 verify_work_stacks_empty(); |
4189 verify_work_stacks_empty(); |
4204 verify_overflow_empty(); |
4190 verify_overflow_empty(); |
4205 HeapWord* stop_point = |
4191 HeapWord* stop_point = |
4206 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
4192 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); |
4207 if (stop_point != NULL) { |
4193 if (stop_point != NULL) { |
4208 assert((_collectorState == AbortablePreclean && should_abort_preclean()), |
4194 assert((_collectorState == AbortablePreclean && should_abort_preclean()), |
4209 "Should only be AbortablePreclean."); |
4195 "Should only be AbortablePreclean."); |
4210 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); |
4196 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); |
4211 if (should_abort_preclean()) { |
4197 if (should_abort_preclean()) { |
5084 // parallelize such a scan), but rather will be broken up into |
5070 // parallelize such a scan), but rather will be broken up into |
5085 // a set of parallel tasks (via the sampling that the [abortable] |
5071 // a set of parallel tasks (via the sampling that the [abortable] |
5086 // preclean phase did of eden, plus the [two] tasks of |
5072 // preclean phase did of eden, plus the [two] tasks of |
5087 // scanning the [two] survivor spaces. Further fine-grain |
5073 // scanning the [two] survivor spaces. Further fine-grain |
5088 // parallelization of the scanning of the survivor spaces |
5074 // parallelization of the scanning of the survivor spaces |
5089 // themselves, and of precleaning of the younger gen itself |
5075 // themselves, and of precleaning of the young gen itself |
5090 // is deferred to the future. |
5076 // is deferred to the future. |
5091 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); |
5077 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); |
5092 |
5078 |
5093 // The dirty card rescan work is broken up into a "sequence" |
5079 // The dirty card rescan work is broken up into a "sequence" |
5094 // of parallel tasks (per constituent space) that are dynamically |
5080 // of parallel tasks (per constituent space) that are dynamically |
5175 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
5161 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
5176 StrongRootsScope srs(1); |
5162 StrongRootsScope srs(1); |
5177 |
5163 |
5178 gch->gen_process_roots(&srs, |
5164 gch->gen_process_roots(&srs, |
5179 GenCollectedHeap::OldGen, |
5165 GenCollectedHeap::OldGen, |
5180 true, // younger gens as roots |
5166 true, // young gen as roots |
5181 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
5167 GenCollectedHeap::ScanningOption(roots_scanning_options()), |
5182 should_unload_classes(), |
5168 should_unload_classes(), |
5183 &mrias_cl, |
5169 &mrias_cl, |
5184 NULL, |
5170 NULL, |
5185 NULL); // The dirty klasses will be handled below |
5171 NULL); // The dirty klasses will be handled below |
5659 // Gather statistics on the young generation collection. |
5645 // Gather statistics on the young generation collection. |
5660 collector()->stats().record_gc0_end(used()); |
5646 collector()->stats().record_gc0_end(used()); |
5661 } |
5647 } |
5662 } |
5648 } |
5663 |
5649 |
5664 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) { |
5650 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) { |
5665 // We iterate over the space(s) underlying this generation, |
5651 // We iterate over the space(s) underlying this generation, |
5666 // checking the mark bit map to see if the bits corresponding |
5652 // checking the mark bit map to see if the bits corresponding |
5667 // to specific blocks are marked or not. Blocks that are |
5653 // to specific blocks are marked or not. Blocks that are |
5668 // marked are live and are not swept up. All remaining blocks |
5654 // marked are live and are not swept up. All remaining blocks |
5669 // are swept up, with coalescing on-the-fly as we sweep up |
5655 // are swept up, with coalescing on-the-fly as we sweep up |
5688 // as well take the bit map lock for the entire duration |
5674 // as well take the bit map lock for the entire duration |
5689 |
5675 |
5690 // check that we hold the requisite locks |
5676 // check that we hold the requisite locks |
5691 assert(have_cms_token(), "Should hold cms token"); |
5677 assert(have_cms_token(), "Should hold cms token"); |
5692 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); |
5678 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); |
5693 assert_lock_strong(gen->freelistLock()); |
5679 assert_lock_strong(old_gen->freelistLock()); |
5694 assert_lock_strong(bitMapLock()); |
5680 assert_lock_strong(bitMapLock()); |
5695 |
5681 |
5696 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); |
5682 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); |
5697 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); |
5683 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); |
5698 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
5684 old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
5699 _inter_sweep_estimate.padded_average(), |
5685 _inter_sweep_estimate.padded_average(), |
5700 _intra_sweep_estimate.padded_average()); |
5686 _intra_sweep_estimate.padded_average()); |
5701 gen->setNearLargestChunk(); |
5687 old_gen->setNearLargestChunk(); |
5702 |
5688 |
5703 { |
5689 { |
5704 SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield); |
5690 SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield); |
5705 gen->cmsSpace()->blk_iterate_careful(&sweepClosure); |
5691 old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure); |
5706 // We need to free-up/coalesce garbage/blocks from a |
5692 // We need to free-up/coalesce garbage/blocks from a |
5707 // co-terminal free run. This is done in the SweepClosure |
5693 // co-terminal free run. This is done in the SweepClosure |
5708 // destructor; so, do not remove this scope, else the |
5694 // destructor; so, do not remove this scope, else the |
5709 // end-of-sweep-census below will be off by a little bit. |
5695 // end-of-sweep-census below will be off by a little bit. |
5710 } |
5696 } |
5711 gen->cmsSpace()->sweep_completed(); |
5697 old_gen->cmsSpace()->sweep_completed(); |
5712 gen->cmsSpace()->endSweepFLCensus(sweep_count()); |
5698 old_gen->cmsSpace()->endSweepFLCensus(sweep_count()); |
5713 if (should_unload_classes()) { // unloaded classes this cycle, |
5699 if (should_unload_classes()) { // unloaded classes this cycle, |
5714 _concurrent_cycles_since_last_unload = 0; // ... reset count |
5700 _concurrent_cycles_since_last_unload = 0; // ... reset count |
5715 } else { // did not unload classes, |
5701 } else { // did not unload classes, |
5716 _concurrent_cycles_since_last_unload++; // ... increment count |
5702 _concurrent_cycles_since_last_unload++; // ... increment count |
5717 } |
5703 } |