190 ReservedSpace rs, size_t initial_byte_size, int level, |
190 ReservedSpace rs, size_t initial_byte_size, int level, |
191 CardTableRS* ct, bool use_adaptive_freelists, |
191 CardTableRS* ct, bool use_adaptive_freelists, |
192 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : |
192 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : |
193 CardGeneration(rs, initial_byte_size, level, ct), |
193 CardGeneration(rs, initial_byte_size, level, ct), |
194 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), |
194 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), |
195 _debug_collection_type(Concurrent_collection_type), |
195 _debug_concurrent_cycle(true), |
196 _did_compact(false) |
196 _did_compact(false) |
197 { |
197 { |
198 HeapWord* bottom = (HeapWord*) _virtual_space.low(); |
198 HeapWord* bottom = (HeapWord*) _virtual_space.low(); |
199 HeapWord* end = (HeapWord*) _virtual_space.high(); |
199 HeapWord* end = (HeapWord*) _virtual_space.high(); |
200 |
200 |
1439 void CMSCollector::collect(bool full, |
1431 void CMSCollector::collect(bool full, |
1440 bool clear_all_soft_refs, |
1432 bool clear_all_soft_refs, |
1441 size_t size, |
1433 size_t size, |
1442 bool tlab) |
1434 bool tlab) |
1443 { |
1435 { |
1444 if (!UseCMSCollectionPassing && _collectorState > Idling) { |
|
1445 // For debugging purposes skip the collection if the state |
|
1446 // is not currently idle |
|
1447 if (TraceCMSState) { |
|
1448 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", |
|
1449 Thread::current(), full, _collectorState); |
|
1450 } |
|
1451 return; |
|
1452 } |
|
1453 |
|
1454 // The following "if" branch is present for defensive reasons. |
1436 // The following "if" branch is present for defensive reasons. |
1455 // In the current uses of this interface, it can be replaced with: |
1437 // In the current uses of this interface, it can be replaced with: |
1456 // assert(!GC_locker.is_active(), "Can't be called otherwise"); |
1438 // assert(!GC_locker.is_active(), "Can't be called otherwise"); |
1457 // But I am not placing that assert here to allow future |
1439 // But I am not placing that assert here to allow future |
1458 // generality in invoking this interface. |
1440 // generality in invoking this interface. |
1634 gclog_or_tty->print_cr("CMS foreground collector has asked for control " |
1615 gclog_or_tty->print_cr("CMS foreground collector has asked for control " |
1635 INTPTR_FORMAT " with first state %d", Thread::current(), first_state); |
1616 INTPTR_FORMAT " with first state %d", Thread::current(), first_state); |
1636 gclog_or_tty->print_cr(" gets control with state %d", _collectorState); |
1617 gclog_or_tty->print_cr(" gets control with state %d", _collectorState); |
1637 } |
1618 } |
1638 |
1619 |
1639 // Check if we need to do a compaction, or if not, whether |
1620 // Inform cms gen if this was due to partial collection failing. |
1640 // we need to start the mark-sweep from scratch. |
1621 // The CMS gen may use this fact to determine its expansion policy. |
1641 bool should_compact = false; |
1622 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
1642 bool should_start_over = false; |
1623 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { |
1643 decide_foreground_collection_type(clear_all_soft_refs, |
1624 assert(!_cmsGen->incremental_collection_failed(), |
1644 &should_compact, &should_start_over); |
1625 "Should have been noticed, reacted to and cleared"); |
1645 |
1626 _cmsGen->set_incremental_collection_failed(); |
1646 NOT_PRODUCT( |
1627 } |
1647 if (RotateCMSCollectionTypes) { |
|
1648 if (_cmsGen->debug_collection_type() == |
|
1649 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) { |
|
1650 should_compact = true; |
|
1651 } else if (_cmsGen->debug_collection_type() == |
|
1652 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) { |
|
1653 should_compact = false; |
|
1654 } |
|
1655 } |
|
1656 ) |
|
1657 |
1628 |
1658 if (first_state > Idling) { |
1629 if (first_state > Idling) { |
1659 report_concurrent_mode_interruption(); |
1630 report_concurrent_mode_interruption(); |
1660 } |
1631 } |
1661 |
1632 |
1662 set_did_compact(should_compact); |
1633 set_did_compact(true); |
1663 if (should_compact) { |
1634 |
1664 // If the collection is being acquired from the background |
1635 // If the collection is being acquired from the background |
1665 // collector, there may be references on the discovered |
1636 // collector, there may be references on the discovered |
1666 // references lists that have NULL referents (being those |
1637 // references lists that have NULL referents (being those |
1667 // that were concurrently cleared by a mutator) or |
1638 // that were concurrently cleared by a mutator) or |
1668 // that are no longer active (having been enqueued concurrently |
1639 // that are no longer active (having been enqueued concurrently |
1669 // by the mutator). |
1640 // by the mutator). |
1670 // Scrub the list of those references because Mark-Sweep-Compact |
1641 // Scrub the list of those references because Mark-Sweep-Compact |
1671 // code assumes referents are not NULL and that all discovered |
1642 // code assumes referents are not NULL and that all discovered |
1672 // Reference objects are active. |
1643 // Reference objects are active. |
1673 ref_processor()->clean_up_discovered_references(); |
1644 ref_processor()->clean_up_discovered_references(); |
1674 |
1645 |
1675 if (first_state > Idling) { |
1646 if (first_state > Idling) { |
1676 save_heap_summary(); |
1647 save_heap_summary(); |
1677 } |
1648 } |
1678 |
1649 |
1679 do_compaction_work(clear_all_soft_refs); |
1650 do_compaction_work(clear_all_soft_refs); |
1680 |
1651 |
1681 // Has the GC time limit been exceeded? |
1652 // Has the GC time limit been exceeded? |
1682 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); |
1653 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); |
1683 size_t max_eden_size = young_gen->max_capacity() - |
1654 size_t max_eden_size = young_gen->max_capacity() - |
1684 young_gen->to()->capacity() - |
1655 young_gen->to()->capacity() - |
1685 young_gen->from()->capacity(); |
1656 young_gen->from()->capacity(); |
1686 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
1657 GCCause::Cause gc_cause = gch->gc_cause(); |
1687 GCCause::Cause gc_cause = gch->gc_cause(); |
1658 size_policy()->check_gc_overhead_limit(_young_gen->used(), |
1688 size_policy()->check_gc_overhead_limit(_young_gen->used(), |
1659 young_gen->eden()->used(), |
1689 young_gen->eden()->used(), |
1660 _cmsGen->max_capacity(), |
1690 _cmsGen->max_capacity(), |
1661 max_eden_size, |
1691 max_eden_size, |
1662 full, |
1692 full, |
1663 gc_cause, |
1693 gc_cause, |
1664 gch->collector_policy()); |
1694 gch->collector_policy()); |
1665 |
1695 } else { |
|
1696 do_mark_sweep_work(clear_all_soft_refs, first_state, |
|
1697 should_start_over); |
|
1698 } |
|
1699 // Reset the expansion cause, now that we just completed |
1666 // Reset the expansion cause, now that we just completed |
1700 // a collection cycle. |
1667 // a collection cycle. |
1701 clear_expansion_cause(); |
1668 clear_expansion_cause(); |
1702 _foregroundGCIsActive = false; |
1669 _foregroundGCIsActive = false; |
1703 return; |
1670 return; |
1711 FreelistLocker z(this); |
1678 FreelistLocker z(this); |
1712 MetaspaceGC::compute_new_size(); |
1679 MetaspaceGC::compute_new_size(); |
1713 _cmsGen->compute_new_size_free_list(); |
1680 _cmsGen->compute_new_size_free_list(); |
1714 } |
1681 } |
1715 |
1682 |
1716 // A work method used by foreground collection to determine |
|
1717 // what type of collection (compacting or not, continuing or fresh) |
|
1718 // it should do. |
|
1719 // NOTE: the intent is to make UseCMSCompactAtFullCollection |
|
1720 // and CMSCompactWhenClearAllSoftRefs the default in the future |
|
1721 // and do away with the flags after a suitable period. |
|
1722 void CMSCollector::decide_foreground_collection_type( |
|
1723 bool clear_all_soft_refs, bool* should_compact, |
|
1724 bool* should_start_over) { |
|
1725 // Normally, we'll compact only if the UseCMSCompactAtFullCollection |
|
1726 // flag is set, and we have either requested a System.gc() or |
|
1727 // the number of full gc's since the last concurrent cycle |
|
1728 // has exceeded the threshold set by CMSFullGCsBeforeCompaction, |
|
1729 // or if an incremental collection has failed |
|
1730 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
1731 assert(gch->collector_policy()->is_generation_policy(), |
|
1732 "You may want to check the correctness of the following"); |
|
1733 // Inform cms gen if this was due to partial collection failing. |
|
1734 // The CMS gen may use this fact to determine its expansion policy. |
|
1735 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { |
|
1736 assert(!_cmsGen->incremental_collection_failed(), |
|
1737 "Should have been noticed, reacted to and cleared"); |
|
1738 _cmsGen->set_incremental_collection_failed(); |
|
1739 } |
|
1740 *should_compact = |
|
1741 UseCMSCompactAtFullCollection && |
|
1742 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || |
|
1743 GCCause::is_user_requested_gc(gch->gc_cause()) || |
|
1744 gch->incremental_collection_will_fail(true /* consult_young */)); |
|
1745 *should_start_over = false; |
|
1746 if (clear_all_soft_refs && !*should_compact) { |
|
1747 // We are about to do a last ditch collection attempt |
|
1748 // so it would normally make sense to do a compaction |
|
1749 // to reclaim as much space as possible. |
|
1750 if (CMSCompactWhenClearAllSoftRefs) { |
|
1751 // Default: The rationale is that in this case either |
|
1752 // we are past the final marking phase, in which case |
|
1753 // we'd have to start over, or so little has been done |
|
1754 // that there's little point in saving that work. Compaction |
|
1755 // appears to be the sensible choice in either case. |
|
1756 *should_compact = true; |
|
1757 } else { |
|
1758 // We have been asked to clear all soft refs, but not to |
|
1759 // compact. Make sure that we aren't past the final checkpoint |
|
1760 // phase, for that is where we process soft refs. If we are already |
|
1761 // past that phase, we'll need to redo the refs discovery phase and |
|
1762 // if necessary clear soft refs that weren't previously |
|
1763 // cleared. We do so by remembering the phase in which |
|
1764 // we came in, and if we are past the refs processing |
|
1765 // phase, we'll choose to just redo the mark-sweep |
|
1766 // collection from scratch. |
|
1767 if (_collectorState > FinalMarking) { |
|
1768 // We are past the refs processing phase; |
|
1769 // start over and do a fresh synchronous CMS cycle |
|
1770 _collectorState = Resetting; // skip to reset to start new cycle |
|
1771 reset(false /* == !asynch */); |
|
1772 *should_start_over = true; |
|
1773 } // else we can continue a possibly ongoing current cycle |
|
1774 } |
|
1775 } |
|
1776 } |
|
1777 |
|
1778 // A work method used by the foreground collector to do |
1683 // A work method used by the foreground collector to do |
1779 // a mark-sweep-compact. |
1684 // a mark-sweep-compact. |
1780 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { |
1685 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { |
1781 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
1686 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
1782 |
1687 |
1872 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
1773 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
1873 |
1774 |
1874 // For a mark-sweep-compact, compute_new_size() will be called |
1775 // For a mark-sweep-compact, compute_new_size() will be called |
1875 // in the heap's do_collection() method. |
1776 // in the heap's do_collection() method. |
1876 } |
1777 } |
1877 |
|
1878 // A work method used by the foreground collector to do |
|
1879 // a mark-sweep, after taking over from a possibly on-going |
|
1880 // concurrent mark-sweep collection. |
|
1881 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs, |
|
1882 CollectorState first_state, bool should_start_over) { |
|
1883 if (PrintGC && Verbose) { |
|
1884 gclog_or_tty->print_cr("Pass concurrent collection to foreground " |
|
1885 "collector with count %d", |
|
1886 _full_gcs_since_conc_gc); |
|
1887 } |
|
1888 switch (_collectorState) { |
|
1889 case Idling: |
|
1890 if (first_state == Idling || should_start_over) { |
|
1891 // The background GC was not active, or should |
|
1892 // restarted from scratch; start the cycle. |
|
1893 _collectorState = InitialMarking; |
|
1894 } |
|
1895 // If first_state was not Idling, then a background GC |
|
1896 // was in progress and has now finished. No need to do it |
|
1897 // again. Leave the state as Idling. |
|
1898 break; |
|
1899 case Precleaning: |
|
1900 // In the foreground case don't do the precleaning since |
|
1901 // it is not done concurrently and there is extra work |
|
1902 // required. |
|
1903 _collectorState = FinalMarking; |
|
1904 } |
|
1905 collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause()); |
|
1906 |
|
1907 // For a mark-sweep, compute_new_size() will be called |
|
1908 // in the heap's do_collection() method. |
|
1909 } |
|
1910 |
|
1911 |
1778 |
1912 void CMSCollector::print_eden_and_survivor_chunk_arrays() { |
1779 void CMSCollector::print_eden_and_survivor_chunk_arrays() { |
1913 DefNewGeneration* dng = _young_gen->as_DefNewGeneration(); |
1780 DefNewGeneration* dng = _young_gen->as_DefNewGeneration(); |
1914 ContiguousSpace* eden_space = dng->eden(); |
1781 ContiguousSpace* eden_space = dng->eden(); |
1915 ContiguousSpace* from_space = dng->from(); |
1782 ContiguousSpace* from_space = dng->from(); |
2251 } |
2105 } |
2252 |
2106 |
2253 void CMSCollector::report_heap_summary(GCWhen::Type when) { |
2107 void CMSCollector::report_heap_summary(GCWhen::Type when) { |
2254 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary); |
2108 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary); |
2255 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary); |
2109 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary); |
2256 } |
|
2257 |
|
2258 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) { |
|
2259 assert(_foregroundGCIsActive && !_foregroundGCShouldWait, |
|
2260 "Foreground collector should be waiting, not executing"); |
|
2261 assert(Thread::current()->is_VM_thread(), "A foreground collection" |
|
2262 "may only be done by the VM Thread with the world stopped"); |
|
2263 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), |
|
2264 "VM thread should have CMS token"); |
|
2265 |
|
2266 // The gc id is created in register_foreground_gc_start if this collection is synchronous |
|
2267 const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id(); |
|
2268 NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, |
|
2269 true, NULL, gc_id);) |
|
2270 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); |
|
2271 |
|
2272 HandleMark hm; // Discard invalid handles created during verification |
|
2273 |
|
2274 if (VerifyBeforeGC && |
|
2275 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
2276 Universe::verify(); |
|
2277 } |
|
2278 |
|
2279 // Snapshot the soft reference policy to be used in this collection cycle. |
|
2280 ref_processor()->setup_policy(clear_all_soft_refs); |
|
2281 |
|
2282 // Decide if class unloading should be done |
|
2283 update_should_unload_classes(); |
|
2284 |
|
2285 bool init_mark_was_synchronous = false; // until proven otherwise |
|
2286 while (_collectorState != Idling) { |
|
2287 if (TraceCMSState) { |
|
2288 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", |
|
2289 Thread::current(), _collectorState); |
|
2290 } |
|
2291 switch (_collectorState) { |
|
2292 case InitialMarking: |
|
2293 register_foreground_gc_start(cause); |
|
2294 init_mark_was_synchronous = true; // fact to be exploited in re-mark |
|
2295 checkpointRootsInitial(false); |
|
2296 assert(_collectorState == Marking, "Collector state should have changed" |
|
2297 " within checkpointRootsInitial()"); |
|
2298 break; |
|
2299 case Marking: |
|
2300 // initial marking in checkpointRootsInitialWork has been completed |
|
2301 if (VerifyDuringGC && |
|
2302 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
2303 Universe::verify("Verify before initial mark: "); |
|
2304 } |
|
2305 { |
|
2306 bool res = markFromRoots(false); |
|
2307 assert(res && _collectorState == FinalMarking, "Collector state should " |
|
2308 "have changed"); |
|
2309 break; |
|
2310 } |
|
2311 case FinalMarking: |
|
2312 if (VerifyDuringGC && |
|
2313 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
2314 Universe::verify("Verify before re-mark: "); |
|
2315 } |
|
2316 checkpointRootsFinal(false, clear_all_soft_refs, |
|
2317 init_mark_was_synchronous); |
|
2318 assert(_collectorState == Sweeping, "Collector state should not " |
|
2319 "have changed within checkpointRootsFinal()"); |
|
2320 break; |
|
2321 case Sweeping: |
|
2322 // final marking in checkpointRootsFinal has been completed |
|
2323 if (VerifyDuringGC && |
|
2324 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
2325 Universe::verify("Verify before sweep: "); |
|
2326 } |
|
2327 sweep(false); |
|
2328 assert(_collectorState == Resizing, "Incorrect state"); |
|
2329 break; |
|
2330 case Resizing: { |
|
2331 // Sweeping has been completed; the actual resize in this case |
|
2332 // is done separately; nothing to be done in this state. |
|
2333 _collectorState = Resetting; |
|
2334 break; |
|
2335 } |
|
2336 case Resetting: |
|
2337 // The heap has been resized. |
|
2338 if (VerifyDuringGC && |
|
2339 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
2340 Universe::verify("Verify before reset: "); |
|
2341 } |
|
2342 save_heap_summary(); |
|
2343 reset(false); |
|
2344 assert(_collectorState == Idling, "Collector state should " |
|
2345 "have changed"); |
|
2346 break; |
|
2347 case Precleaning: |
|
2348 case AbortablePreclean: |
|
2349 // Elide the preclean phase |
|
2350 _collectorState = FinalMarking; |
|
2351 break; |
|
2352 default: |
|
2353 ShouldNotReachHere(); |
|
2354 } |
|
2355 if (TraceCMSState) { |
|
2356 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", |
|
2357 Thread::current(), _collectorState); |
|
2358 } |
|
2359 } |
|
2360 |
|
2361 if (VerifyAfterGC && |
|
2362 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
2363 Universe::verify(); |
|
2364 } |
|
2365 if (TraceCMSState) { |
|
2366 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT |
|
2367 " exiting collection CMS state %d", |
|
2368 Thread::current(), _collectorState); |
|
2369 } |
|
2370 } |
2110 } |
2371 |
2111 |
2372 bool CMSCollector::waitForForegroundGC() { |
2112 bool CMSCollector::waitForForegroundGC() { |
2373 bool res = false; |
2113 bool res = false; |
2374 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
2114 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
3343 }; |
3083 }; |
3344 |
3084 |
3345 // Checkpoint the roots into this generation from outside |
3085 // Checkpoint the roots into this generation from outside |
3346 // this generation. [Note this initial checkpoint need only |
3086 // this generation. [Note this initial checkpoint need only |
3347 // be approximate -- we'll do a catch up phase subsequently.] |
3087 // be approximate -- we'll do a catch up phase subsequently.] |
3348 void CMSCollector::checkpointRootsInitial(bool asynch) { |
3088 void CMSCollector::checkpointRootsInitial() { |
3349 assert(_collectorState == InitialMarking, "Wrong collector state"); |
3089 assert(_collectorState == InitialMarking, "Wrong collector state"); |
3350 check_correct_thread_executing(); |
3090 check_correct_thread_executing(); |
3351 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); |
3091 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); |
3352 |
3092 |
3353 save_heap_summary(); |
3093 save_heap_summary(); |
3354 report_heap_summary(GCWhen::BeforeGC); |
3094 report_heap_summary(GCWhen::BeforeGC); |
3355 |
3095 |
3356 ReferenceProcessor* rp = ref_processor(); |
3096 ReferenceProcessor* rp = ref_processor(); |
3357 SpecializationStats::clear(); |
3097 SpecializationStats::clear(); |
3358 assert(_restart_addr == NULL, "Control point invariant"); |
3098 assert(_restart_addr == NULL, "Control point invariant"); |
3359 if (asynch) { |
3099 { |
3360 // acquire locks for subsequent manipulations |
3100 // acquire locks for subsequent manipulations |
3361 MutexLockerEx x(bitMapLock(), |
3101 MutexLockerEx x(bitMapLock(), |
3362 Mutex::_no_safepoint_check_flag); |
3102 Mutex::_no_safepoint_check_flag); |
3363 checkpointRootsInitialWork(asynch); |
3103 checkpointRootsInitialWork(); |
3364 // enable ("weak") refs discovery |
3104 // enable ("weak") refs discovery |
3365 rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/); |
3105 rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/); |
3366 _collectorState = Marking; |
3106 _collectorState = Marking; |
3367 } else { |
|
3368 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection |
|
3369 // which recognizes if we are a CMS generation, and doesn't try to turn on |
|
3370 // discovery; verify that they aren't meddling. |
|
3371 assert(!rp->discovery_is_atomic(), |
|
3372 "incorrect setting of discovery predicate"); |
|
3373 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control " |
|
3374 "ref discovery for this generation kind"); |
|
3375 // already have locks |
|
3376 checkpointRootsInitialWork(asynch); |
|
3377 // now enable ("weak") refs discovery |
|
3378 rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/); |
|
3379 _collectorState = Marking; |
|
3380 } |
3107 } |
3381 SpecializationStats::print(); |
3108 SpecializationStats::print(); |
3382 } |
3109 } |
3383 |
3110 |
3384 void CMSCollector::checkpointRootsInitialWork(bool asynch) { |
3111 void CMSCollector::checkpointRootsInitialWork() { |
3385 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); |
3112 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); |
3386 assert(_collectorState == InitialMarking, "just checking"); |
3113 assert(_collectorState == InitialMarking, "just checking"); |
3387 |
3114 |
3388 // If there has not been a GC[n-1] since last GC[n] cycle completed, |
3115 // If there has not been a GC[n-1] since last GC[n] cycle completed, |
3389 // precede our marking with a collection of all |
3116 // precede our marking with a collection of all |
3481 // to be used to limit the extent of sweep in each generation. |
3208 // to be used to limit the extent of sweep in each generation. |
3482 save_sweep_limits(); |
3209 save_sweep_limits(); |
3483 verify_overflow_empty(); |
3210 verify_overflow_empty(); |
3484 } |
3211 } |
3485 |
3212 |
3486 bool CMSCollector::markFromRoots(bool asynch) { |
3213 bool CMSCollector::markFromRoots() { |
3487 // we might be tempted to assert that: |
3214 // we might be tempted to assert that: |
3488 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), |
3215 // assert(!SafepointSynchronize::is_at_safepoint(), |
3489 // "inconsistent argument?"); |
3216 // "inconsistent argument?"); |
3490 // However that wouldn't be right, because it's possible that |
3217 // However that wouldn't be right, because it's possible that |
3491 // a safepoint is indeed in progress as a younger generation |
3218 // a safepoint is indeed in progress as a younger generation |
3492 // stop-the-world GC happens even as we mark in this generation. |
3219 // stop-the-world GC happens even as we mark in this generation. |
3493 assert(_collectorState == Marking, "inconsistent state?"); |
3220 assert(_collectorState == Marking, "inconsistent state?"); |
3494 check_correct_thread_executing(); |
3221 check_correct_thread_executing(); |
3495 verify_overflow_empty(); |
3222 verify_overflow_empty(); |
3496 |
3223 |
3497 bool res; |
3224 // Weak ref discovery note: We may be discovering weak |
3498 if (asynch) { |
3225 // refs in this generation concurrent (but interleaved) with |
3499 // Weak ref discovery note: We may be discovering weak |
3226 // weak ref discovery by a younger generation collector. |
3500 // refs in this generation concurrent (but interleaved) with |
3227 |
3501 // weak ref discovery by a younger generation collector. |
3228 CMSTokenSyncWithLocks ts(true, bitMapLock()); |
3502 |
3229 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
3503 CMSTokenSyncWithLocks ts(true, bitMapLock()); |
3230 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); |
3504 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
3231 bool res = markFromRootsWork(); |
3505 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); |
3232 if (res) { |
3506 res = markFromRootsWork(asynch); |
3233 _collectorState = Precleaning; |
3507 if (res) { |
3234 } else { // We failed and a foreground collection wants to take over |
3508 _collectorState = Precleaning; |
3235 assert(_foregroundGCIsActive, "internal state inconsistency"); |
3509 } else { // We failed and a foreground collection wants to take over |
3236 assert(_restart_addr == NULL, "foreground will restart from scratch"); |
3510 assert(_foregroundGCIsActive, "internal state inconsistency"); |
3237 if (PrintGCDetails) { |
3511 assert(_restart_addr == NULL, "foreground will restart from scratch"); |
3238 gclog_or_tty->print_cr("bailing out to foreground collection"); |
3512 if (PrintGCDetails) { |
3239 } |
3513 gclog_or_tty->print_cr("bailing out to foreground collection"); |
|
3514 } |
|
3515 } |
|
3516 } else { |
|
3517 assert(SafepointSynchronize::is_at_safepoint(), |
|
3518 "inconsistent with asynch == false"); |
|
3519 // already have locks |
|
3520 res = markFromRootsWork(asynch); |
|
3521 _collectorState = FinalMarking; |
|
3522 } |
3240 } |
3523 verify_overflow_empty(); |
3241 verify_overflow_empty(); |
3524 return res; |
3242 return res; |
3525 } |
3243 } |
3526 |
3244 |
3527 bool CMSCollector::markFromRootsWork(bool asynch) { |
3245 bool CMSCollector::markFromRootsWork() { |
3528 // iterate over marked bits in bit map, doing a full scan and mark |
3246 // iterate over marked bits in bit map, doing a full scan and mark |
3529 // from these roots using the following algorithm: |
3247 // from these roots using the following algorithm: |
3530 // . if oop is to the right of the current scan pointer, |
3248 // . if oop is to the right of the current scan pointer, |
3531 // mark corresponding bit (we'll process it later) |
3249 // mark corresponding bit (we'll process it later) |
3532 // . else (oop is to left of current scan pointer) |
3250 // . else (oop is to left of current scan pointer) |
3610 CMSConcMarkingTerminatorTerminator _term_term; |
3327 CMSConcMarkingTerminatorTerminator _term_term; |
3611 |
3328 |
3612 public: |
3329 public: |
3613 CMSConcMarkingTask(CMSCollector* collector, |
3330 CMSConcMarkingTask(CMSCollector* collector, |
3614 CompactibleFreeListSpace* cms_space, |
3331 CompactibleFreeListSpace* cms_space, |
3615 bool asynch, |
|
3616 YieldingFlexibleWorkGang* workers, |
3332 YieldingFlexibleWorkGang* workers, |
3617 OopTaskQueueSet* task_queues): |
3333 OopTaskQueueSet* task_queues): |
3618 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), |
3334 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), |
3619 _collector(collector), |
3335 _collector(collector), |
3620 _cms_space(cms_space), |
3336 _cms_space(cms_space), |
3621 _asynch(asynch), _n_workers(0), _result(true), |
3337 _n_workers(0), _result(true), |
3622 _task_queues(task_queues), |
3338 _task_queues(task_queues), |
3623 _term(_n_workers, task_queues, _collector), |
3339 _term(_n_workers, task_queues, _collector), |
3624 _bit_map_lock(collector->bitMapLock()) |
3340 _bit_map_lock(collector->bitMapLock()) |
3625 { |
3341 { |
3626 _requested_size = _n_workers; |
3342 _requested_size = _n_workers; |
3876 // the last argument to the constructor indicates whether the |
3591 // the last argument to the constructor indicates whether the |
3877 // iteration should be incremental with periodic yields. |
3592 // iteration should be incremental with periodic yields. |
3878 Par_MarkFromRootsClosure cl(this, _collector, my_span, |
3593 Par_MarkFromRootsClosure cl(this, _collector, my_span, |
3879 &_collector->_markBitMap, |
3594 &_collector->_markBitMap, |
3880 work_queue(i), |
3595 work_queue(i), |
3881 &_collector->_markStack, |
3596 &_collector->_markStack); |
3882 _asynch); |
|
3883 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); |
3597 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); |
3884 } // else nothing to do for this task |
3598 } // else nothing to do for this task |
3885 } // else nothing to do for this task |
3599 } // else nothing to do for this task |
3886 } |
3600 } |
3887 // We'd be tempted to assert here that since there are no |
3601 // We'd be tempted to assert here that since there are no |
4123 // and is deferred for now; see CR# TBF. 07252005YSR. XXX |
3836 // and is deferred for now; see CR# TBF. 07252005YSR. XXX |
4124 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); |
3837 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); |
4125 // If _restart_addr is non-NULL, a marking stack overflow |
3838 // If _restart_addr is non-NULL, a marking stack overflow |
4126 // occurred; we need to do a fresh marking iteration from the |
3839 // occurred; we need to do a fresh marking iteration from the |
4127 // indicated restart address. |
3840 // indicated restart address. |
4128 if (_foregroundGCIsActive && asynch) { |
3841 if (_foregroundGCIsActive) { |
4129 // We may be running into repeated stack overflows, having |
3842 // We may be running into repeated stack overflows, having |
4130 // reached the limit of the stack size, while making very |
3843 // reached the limit of the stack size, while making very |
4131 // slow forward progress. It may be best to bail out and |
3844 // slow forward progress. It may be best to bail out and |
4132 // let the foreground collector do its job. |
3845 // let the foreground collector do its job. |
4133 // Clear _restart_addr, so that foreground GC |
3846 // Clear _restart_addr, so that foreground GC |
4152 assert(tsk.completed(), "Inconsistency"); |
3865 assert(tsk.completed(), "Inconsistency"); |
4153 assert(tsk.result() == true, "Inconsistency"); |
3866 assert(tsk.result() == true, "Inconsistency"); |
4154 return true; |
3867 return true; |
4155 } |
3868 } |
4156 |
3869 |
4157 bool CMSCollector::do_marking_st(bool asynch) { |
3870 bool CMSCollector::do_marking_st() { |
4158 ResourceMark rm; |
3871 ResourceMark rm; |
4159 HandleMark hm; |
3872 HandleMark hm; |
4160 |
3873 |
4161 // Temporarily make refs discovery single threaded (non-MT) |
3874 // Temporarily make refs discovery single threaded (non-MT) |
4162 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
3875 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
4163 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, |
3876 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, |
4164 &_markStack, CMSYield && asynch); |
3877 &_markStack, CMSYield); |
4165 // the last argument to iterate indicates whether the iteration |
3878 // the last argument to iterate indicates whether the iteration |
4166 // should be incremental with periodic yields. |
3879 // should be incremental with periodic yields. |
4167 _markBitMap.iterate(&markFromRootsClosure); |
3880 _markBitMap.iterate(&markFromRootsClosure); |
4168 // If _restart_addr is non-NULL, a marking stack overflow |
3881 // If _restart_addr is non-NULL, a marking stack overflow |
4169 // occurred; we need to do a fresh iteration from the |
3882 // occurred; we need to do a fresh iteration from the |
4170 // indicated restart address. |
3883 // indicated restart address. |
4171 while (_restart_addr != NULL) { |
3884 while (_restart_addr != NULL) { |
4172 if (_foregroundGCIsActive && asynch) { |
3885 if (_foregroundGCIsActive) { |
4173 // We may be running into repeated stack overflows, having |
3886 // We may be running into repeated stack overflows, having |
4174 // reached the limit of the stack size, while making very |
3887 // reached the limit of the stack size, while making very |
4175 // slow forward progress. It may be best to bail out and |
3888 // slow forward progress. It may be best to bail out and |
4176 // let the foreground collector do its job. |
3889 // let the foreground collector do its job. |
4177 // Clear _restart_addr, so that foreground GC |
3890 // Clear _restart_addr, so that foreground GC |
4740 } |
4452 } |
4741 } |
4453 } |
4742 FreelistLocker x(this); |
4454 FreelistLocker x(this); |
4743 MutexLockerEx y(bitMapLock(), |
4455 MutexLockerEx y(bitMapLock(), |
4744 Mutex::_no_safepoint_check_flag); |
4456 Mutex::_no_safepoint_check_flag); |
4745 assert(!init_mark_was_synchronous, "but that's impossible!"); |
4457 checkpointRootsFinalWork(); |
4746 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); |
|
4747 } else { |
|
4748 // already have all the locks |
|
4749 checkpointRootsFinalWork(asynch, clear_all_soft_refs, |
|
4750 init_mark_was_synchronous); |
|
4751 } |
4458 } |
4752 verify_work_stacks_empty(); |
4459 verify_work_stacks_empty(); |
4753 verify_overflow_empty(); |
4460 verify_overflow_empty(); |
4754 SpecializationStats::print(); |
4461 SpecializationStats::print(); |
4755 } |
4462 } |
4756 |
4463 |
4757 void CMSCollector::checkpointRootsFinalWork(bool asynch, |
4464 void CMSCollector::checkpointRootsFinalWork() { |
4758 bool clear_all_soft_refs, bool init_mark_was_synchronous) { |
|
4759 |
|
4760 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) |
4465 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) |
4761 |
4466 |
4762 assert(haveFreelistLocks(), "must have free list locks"); |
4467 assert(haveFreelistLocks(), "must have free list locks"); |
4763 assert_lock_strong(bitMapLock()); |
4468 assert_lock_strong(bitMapLock()); |
4764 |
4469 |
4771 CodeCache::gc_prologue(); |
4476 CodeCache::gc_prologue(); |
4772 } |
4477 } |
4773 assert(haveFreelistLocks(), "must have free list locks"); |
4478 assert(haveFreelistLocks(), "must have free list locks"); |
4774 assert_lock_strong(bitMapLock()); |
4479 assert_lock_strong(bitMapLock()); |
4775 |
4480 |
4776 if (!init_mark_was_synchronous) { |
4481 // We might assume that we need not fill TLAB's when |
4777 // We might assume that we need not fill TLAB's when |
4482 // CMSScavengeBeforeRemark is set, because we may have just done |
4778 // CMSScavengeBeforeRemark is set, because we may have just done |
4483 // a scavenge which would have filled all TLAB's -- and besides |
4779 // a scavenge which would have filled all TLAB's -- and besides |
4484 // Eden would be empty. This however may not always be the case -- |
4780 // Eden would be empty. This however may not always be the case -- |
4485 // for instance although we asked for a scavenge, it may not have |
4781 // for instance although we asked for a scavenge, it may not have |
4486 // happened because of a JNI critical section. We probably need |
4782 // happened because of a JNI critical section. We probably need |
4487 // a policy for deciding whether we can in that case wait until |
4783 // a policy for deciding whether we can in that case wait until |
4488 // the critical section releases and then do the remark following |
4784 // the critical section releases and then do the remark following |
4489 // the scavenge, and skip it here. In the absence of that policy, |
4785 // the scavenge, and skip it here. In the absence of that policy, |
4490 // or of an indication of whether the scavenge did indeed occur, |
4786 // or of an indication of whether the scavenge did indeed occur, |
4491 // we cannot rely on TLAB's having been filled and must do |
4787 // we cannot rely on TLAB's having been filled and must do |
4492 // so here just in case a scavenge did not happen. |
4788 // so here just in case a scavenge did not happen. |
4493 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them |
4789 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them |
4494 // Update the saved marks which may affect the root scans. |
4790 // Update the saved marks which may affect the root scans. |
4495 gch->save_marks(); |
4791 gch->save_marks(); |
4496 |
4792 |
4497 if (CMSPrintEdenSurvivorChunks) { |
4793 if (CMSPrintEdenSurvivorChunks) { |
4498 print_eden_and_survivor_chunk_arrays(); |
4794 print_eden_and_survivor_chunk_arrays(); |
4499 } |
4795 } |
4500 |
4796 |
4501 { |
4797 { |
4502 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) |
4798 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) |
4503 |
4799 |
4504 // Note on the role of the mod union table: |
4800 // Note on the role of the mod union table: |
4505 // Since the marker in "markFromRoots" marks concurrently with |
4801 // Since the marker in "markFromRoots" marks concurrently with |
4506 // mutators, it is possible for some reachable objects not to have been |
4802 // mutators, it is possible for some reachable objects not to have been |
4507 // scanned. For instance, an only reference to an object A was |
4803 // scanned. For instance, an only reference to an object A was |
4508 // placed in object B after the marker scanned B. Unless B is rescanned, |
4804 // placed in object B after the marker scanned B. Unless B is rescanned, |
4509 // A would be collected. Such updates to references in marked objects |
4805 // A would be collected. Such updates to references in marked objects |
4510 // are detected via the mod union table which is the set of all cards |
4806 // are detected via the mod union table which is the set of all cards |
4511 // dirtied since the first checkpoint in this GC cycle and prior to |
4807 // dirtied since the first checkpoint in this GC cycle and prior to |
4512 // the most recent young generation GC, minus those cleaned up by the |
4808 // the most recent young generation GC, minus those cleaned up by the |
4513 // concurrent precleaning. |
4809 // concurrent precleaning. |
4514 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { |
4810 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { |
4515 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); |
4811 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); |
4516 do_remark_parallel(); |
4812 do_remark_parallel(); |
4517 } else { |
4813 } else { |
4518 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, |
4814 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, |
4519 _gc_timer_cm, _gc_tracer_cm->gc_id()); |
4815 _gc_timer_cm, _gc_tracer_cm->gc_id()); |
4520 do_remark_non_parallel(); |
4816 do_remark_non_parallel(); |
4521 } |
4817 } |
|
4818 } |
|
4819 } else { |
|
4820 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); |
|
4821 // The initial mark was stop-world, so there's no rescanning to |
|
4822 // do; go straight on to the next step below. |
|
4823 } |
4522 } |
4824 verify_work_stacks_empty(); |
4523 verify_work_stacks_empty(); |
4825 verify_overflow_empty(); |
4524 verify_overflow_empty(); |
4826 |
4525 |
4827 { |
4526 { |
4828 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) |
4527 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) |
4829 refProcessingWork(asynch, clear_all_soft_refs); |
4528 refProcessingWork(); |
4830 } |
4529 } |
4831 verify_work_stacks_empty(); |
4530 verify_work_stacks_empty(); |
4832 verify_overflow_empty(); |
4531 verify_overflow_empty(); |
4833 |
4532 |
4834 if (should_unload_classes()) { |
4533 if (should_unload_classes()) { |
5870 assert(workers != NULL, "Need parallel worker threads."); |
5569 assert(workers != NULL, "Need parallel worker threads."); |
5871 CMSRefEnqueueTaskProxy enq_task(task); |
5570 CMSRefEnqueueTaskProxy enq_task(task); |
5872 workers->run_task(&enq_task); |
5571 workers->run_task(&enq_task); |
5873 } |
5572 } |
5874 |
5573 |
5875 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { |
5574 void CMSCollector::refProcessingWork() { |
5876 |
|
5877 ResourceMark rm; |
5575 ResourceMark rm; |
5878 HandleMark hm; |
5576 HandleMark hm; |
5879 |
5577 |
5880 ReferenceProcessor* rp = ref_processor(); |
5578 ReferenceProcessor* rp = ref_processor(); |
5881 assert(rp->span().equals(_span), "Spans should be equal"); |
5579 assert(rp->span().equals(_span), "Spans should be equal"); |
5882 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); |
5580 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); |
5883 // Process weak references. |
5581 // Process weak references. |
5884 rp->setup_policy(clear_all_soft_refs); |
5582 rp->setup_policy(false); |
5885 verify_work_stacks_empty(); |
5583 verify_work_stacks_empty(); |
5886 |
5584 |
5887 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, |
5585 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, |
5888 &_markStack, false /* !preclean */); |
5586 &_markStack, false /* !preclean */); |
5889 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, |
5587 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, |
6017 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); |
5715 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); |
6018 |
5716 |
6019 assert(!_intra_sweep_timer.is_active(), "Should not be active"); |
5717 assert(!_intra_sweep_timer.is_active(), "Should not be active"); |
6020 _intra_sweep_timer.reset(); |
5718 _intra_sweep_timer.reset(); |
6021 _intra_sweep_timer.start(); |
5719 _intra_sweep_timer.start(); |
6022 if (asynch) { |
5720 { |
6023 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
5721 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
6024 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); |
5722 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); |
6025 // First sweep the old gen |
5723 // First sweep the old gen |
6026 { |
5724 { |
6027 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), |
5725 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), |
6028 bitMapLock()); |
5726 bitMapLock()); |
6029 sweepWork(_cmsGen, asynch); |
5727 sweepWork(_cmsGen); |
6030 } |
5728 } |
6031 |
5729 |
6032 // Update Universe::_heap_*_at_gc figures. |
5730 // Update Universe::_heap_*_at_gc figures. |
6033 // We need all the free list locks to make the abstract state |
5731 // We need all the free list locks to make the abstract state |
6034 // transition from Sweeping to Resetting. See detailed note |
5732 // transition from Sweeping to Resetting. See detailed note |
6139 } |
5830 } |
6140 } |
5831 } |
6141 |
5832 |
6142 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { |
5833 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { |
6143 if (PrintGCDetails && Verbose) { |
5834 if (PrintGCDetails && Verbose) { |
6144 gclog_or_tty->print("Rotate from %d ", _debug_collection_type); |
5835 if (_debug_concurrent_cycle) { |
6145 } |
5836 gclog_or_tty->print_cr("Rotate from concurrent to STW collections"); |
6146 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1); |
5837 } else { |
6147 _debug_collection_type = |
5838 gclog_or_tty->print_cr("Rotate from STW to concurrent collections"); |
6148 (CollectionTypes) (_debug_collection_type % Unknown_collection_type); |
5839 } |
6149 if (PrintGCDetails && Verbose) { |
5840 } |
6150 gclog_or_tty->print_cr("to %d ", _debug_collection_type); |
5841 _debug_concurrent_cycle = !_debug_concurrent_cycle; |
6151 } |
5842 } |
6152 } |
5843 |
6153 |
5844 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) { |
6154 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, |
|
6155 bool asynch) { |
|
6156 // We iterate over the space(s) underlying this generation, |
5845 // We iterate over the space(s) underlying this generation, |
6157 // checking the mark bit map to see if the bits corresponding |
5846 // checking the mark bit map to see if the bits corresponding |
6158 // to specific blocks are marked or not. Blocks that are |
5847 // to specific blocks are marked or not. Blocks that are |
6159 // marked are live and are not swept up. All remaining blocks |
5848 // marked are live and are not swept up. All remaining blocks |
6160 // are swept up, with coalescing on-the-fly as we sweep up |
5849 // are swept up, with coalescing on-the-fly as we sweep up |
6178 // GC's while we do a sweeping step. For the same reason, we might |
5867 // GC's while we do a sweeping step. For the same reason, we might |
6179 // as well take the bit map lock for the entire duration |
5868 // as well take the bit map lock for the entire duration |
6180 |
5869 |
6181 // check that we hold the requisite locks |
5870 // check that we hold the requisite locks |
6182 assert(have_cms_token(), "Should hold cms token"); |
5871 assert(have_cms_token(), "Should hold cms token"); |
6183 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token()) |
5872 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); |
6184 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), |
|
6185 "Should possess CMS token to sweep"); |
|
6186 assert_lock_strong(gen->freelistLock()); |
5873 assert_lock_strong(gen->freelistLock()); |
6187 assert_lock_strong(bitMapLock()); |
5874 assert_lock_strong(bitMapLock()); |
6188 |
5875 |
6189 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); |
5876 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); |
6190 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); |
5877 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); |
6192 _inter_sweep_estimate.padded_average(), |
5879 _inter_sweep_estimate.padded_average(), |
6193 _intra_sweep_estimate.padded_average()); |
5880 _intra_sweep_estimate.padded_average()); |
6194 gen->setNearLargestChunk(); |
5881 gen->setNearLargestChunk(); |
6195 |
5882 |
6196 { |
5883 { |
6197 SweepClosure sweepClosure(this, gen, &_markBitMap, |
5884 SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield); |
6198 CMSYield && asynch); |
|
6199 gen->cmsSpace()->blk_iterate_careful(&sweepClosure); |
5885 gen->cmsSpace()->blk_iterate_careful(&sweepClosure); |
6200 // We need to free-up/coalesce garbage/blocks from a |
5886 // We need to free-up/coalesce garbage/blocks from a |
6201 // co-terminal free run. This is done in the SweepClosure |
5887 // co-terminal free run. This is done in the SweepClosure |
6202 // destructor; so, do not remove this scope, else the |
5888 // destructor; so, do not remove this scope, else the |
6203 // end-of-sweep-census below will be off by a little bit. |
5889 // end-of-sweep-census below will be off by a little bit. |
7191 |
6875 |
7192 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, |
6876 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, |
7193 CMSCollector* collector, MemRegion span, |
6877 CMSCollector* collector, MemRegion span, |
7194 CMSBitMap* bit_map, |
6878 CMSBitMap* bit_map, |
7195 OopTaskQueue* work_queue, |
6879 OopTaskQueue* work_queue, |
7196 CMSMarkStack* overflow_stack, |
6880 CMSMarkStack* overflow_stack): |
7197 bool should_yield): |
|
7198 _collector(collector), |
6881 _collector(collector), |
7199 _whole_span(collector->_span), |
6882 _whole_span(collector->_span), |
7200 _span(span), |
6883 _span(span), |
7201 _bit_map(bit_map), |
6884 _bit_map(bit_map), |
7202 _mut(&collector->_modUnionTable), |
6885 _mut(&collector->_modUnionTable), |
7203 _work_queue(work_queue), |
6886 _work_queue(work_queue), |
7204 _overflow_stack(overflow_stack), |
6887 _overflow_stack(overflow_stack), |
7205 _yield(should_yield), |
|
7206 _skip_bits(0), |
6888 _skip_bits(0), |
7207 _task(task) |
6889 _task(task) |
7208 { |
6890 { |
7209 assert(_work_queue->size() == 0, "work_queue should be empty"); |
6891 assert(_work_queue->size() == 0, "work_queue should be empty"); |
7210 _finger = span.start(); |
6892 _finger = span.start(); |