hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
changeset 27686 f91c482793e6
parent 27625 07829380b8cd
child 27689 660857fcef8c
equal deleted inserted replaced
27685:26a697375de3 27686:f91c482793e6
   190      ReservedSpace rs, size_t initial_byte_size, int level,
   190      ReservedSpace rs, size_t initial_byte_size, int level,
   191      CardTableRS* ct, bool use_adaptive_freelists,
   191      CardTableRS* ct, bool use_adaptive_freelists,
   192      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   192      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   193   CardGeneration(rs, initial_byte_size, level, ct),
   193   CardGeneration(rs, initial_byte_size, level, ct),
   194   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   194   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   195   _debug_collection_type(Concurrent_collection_type),
   195   _debug_concurrent_cycle(true),
   196   _did_compact(false)
   196   _did_compact(false)
   197 {
   197 {
   198   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   198   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   199   HeapWord* end    = (HeapWord*) _virtual_space.high();
   199   HeapWord* end    = (HeapWord*) _virtual_space.high();
   200 
   200 
   610   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
   610   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
   611 
   611 
   612   // Clip CMSBootstrapOccupancy between 0 and 100.
   612   // Clip CMSBootstrapOccupancy between 0 and 100.
   613   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
   613   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
   614 
   614 
   615   _full_gcs_since_conc_gc = 0;
       
   616 
       
   617   // Now tell CMS generations the identity of their collector
   615   // Now tell CMS generations the identity of their collector
   618   ConcurrentMarkSweepGeneration::set_collector(this);
   616   ConcurrentMarkSweepGeneration::set_collector(this);
   619 
   617 
   620   // Create & start a CMS thread for this CMS collector
   618   // Create & start a CMS thread for this CMS collector
   621   _cmsThread = ConcurrentMarkSweepThread::start(this);
   619   _cmsThread = ConcurrentMarkSweepThread::start(this);
  1246     }
  1244     }
  1247     return true;
  1245     return true;
  1248   }
  1246   }
  1249 
  1247 
  1250   // For debugging purposes, change the type of collection.
  1248   // For debugging purposes, change the type of collection.
  1251   // If the rotation is not on the concurrent collection
  1249   // Rotate between concurrent and stop-the-world full GCs.
  1252   // type, don't start a concurrent collection.
       
  1253   NOT_PRODUCT(
  1250   NOT_PRODUCT(
  1254     if (RotateCMSCollectionTypes &&
  1251     if (RotateCMSCollectionTypes) {
  1255         (_cmsGen->debug_collection_type() !=
  1252       return _cmsGen->debug_concurrent_cycle();
  1256           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
       
  1257       assert(_cmsGen->debug_collection_type() !=
       
  1258         ConcurrentMarkSweepGeneration::Unknown_collection_type,
       
  1259         "Bad cms collection type");
       
  1260       return false;
       
  1261     }
  1253     }
  1262   )
  1254   )
  1263 
  1255 
  1264   FreelistLocker x(this);
  1256   FreelistLocker x(this);
  1265   // ------------------------------------------------------------------
  1257   // ------------------------------------------------------------------
  1439 void CMSCollector::collect(bool   full,
  1431 void CMSCollector::collect(bool   full,
  1440                            bool   clear_all_soft_refs,
  1432                            bool   clear_all_soft_refs,
  1441                            size_t size,
  1433                            size_t size,
  1442                            bool   tlab)
  1434                            bool   tlab)
  1443 {
  1435 {
  1444   if (!UseCMSCollectionPassing && _collectorState > Idling) {
       
  1445     // For debugging purposes skip the collection if the state
       
  1446     // is not currently idle
       
  1447     if (TraceCMSState) {
       
  1448       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
       
  1449         Thread::current(), full, _collectorState);
       
  1450     }
       
  1451     return;
       
  1452   }
       
  1453 
       
  1454   // The following "if" branch is present for defensive reasons.
  1436   // The following "if" branch is present for defensive reasons.
  1455   // In the current uses of this interface, it can be replaced with:
  1437   // In the current uses of this interface, it can be replaced with:
  1456   // assert(!GC_locker.is_active(), "Can't be called otherwise");
  1438   // assert(!GC_locker.is_active(), "Can't be called otherwise");
  1457   // But I am not placing that assert here to allow future
  1439   // But I am not placing that assert here to allow future
  1458   // generality in invoking this interface.
  1440   // generality in invoking this interface.
  1464     // Need the free list locks for the call to free() in compute_new_size()
  1446     // Need the free list locks for the call to free() in compute_new_size()
  1465     compute_new_size();
  1447     compute_new_size();
  1466     return;
  1448     return;
  1467   }
  1449   }
  1468   acquire_control_and_collect(full, clear_all_soft_refs);
  1450   acquire_control_and_collect(full, clear_all_soft_refs);
  1469   _full_gcs_since_conc_gc++;
       
  1470 }
  1451 }
  1471 
  1452 
  1472 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
  1453 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
  1473   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1454   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1474   unsigned int gc_count = gch->total_full_collections();
  1455   unsigned int gc_count = gch->total_full_collections();
  1634     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
  1615     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
  1635       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
  1616       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
  1636     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
  1617     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
  1637   }
  1618   }
  1638 
  1619 
  1639   // Check if we need to do a compaction, or if not, whether
  1620   // Inform cms gen if this was due to partial collection failing.
  1640   // we need to start the mark-sweep from scratch.
  1621   // The CMS gen may use this fact to determine its expansion policy.
  1641   bool should_compact    = false;
  1622   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1642   bool should_start_over = false;
  1623   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
  1643   decide_foreground_collection_type(clear_all_soft_refs,
  1624     assert(!_cmsGen->incremental_collection_failed(),
  1644     &should_compact, &should_start_over);
  1625            "Should have been noticed, reacted to and cleared");
  1645 
  1626     _cmsGen->set_incremental_collection_failed();
  1646 NOT_PRODUCT(
  1627   }
  1647   if (RotateCMSCollectionTypes) {
       
  1648     if (_cmsGen->debug_collection_type() ==
       
  1649         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
       
  1650       should_compact = true;
       
  1651     } else if (_cmsGen->debug_collection_type() ==
       
  1652                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
       
  1653       should_compact = false;
       
  1654     }
       
  1655   }
       
  1656 )
       
  1657 
  1628 
  1658   if (first_state > Idling) {
  1629   if (first_state > Idling) {
  1659     report_concurrent_mode_interruption();
  1630     report_concurrent_mode_interruption();
  1660   }
  1631   }
  1661 
  1632 
  1662   set_did_compact(should_compact);
  1633   set_did_compact(true);
  1663   if (should_compact) {
  1634 
  1664     // If the collection is being acquired from the background
  1635   // If the collection is being acquired from the background
  1665     // collector, there may be references on the discovered
  1636   // collector, there may be references on the discovered
  1666     // references lists that have NULL referents (being those
  1637   // references lists that have NULL referents (being those
  1667     // that were concurrently cleared by a mutator) or
  1638   // that were concurrently cleared by a mutator) or
  1668     // that are no longer active (having been enqueued concurrently
  1639   // that are no longer active (having been enqueued concurrently
  1669     // by the mutator).
  1640   // by the mutator).
  1670     // Scrub the list of those references because Mark-Sweep-Compact
  1641   // Scrub the list of those references because Mark-Sweep-Compact
  1671     // code assumes referents are not NULL and that all discovered
  1642   // code assumes referents are not NULL and that all discovered
  1672     // Reference objects are active.
  1643   // Reference objects are active.
  1673     ref_processor()->clean_up_discovered_references();
  1644   ref_processor()->clean_up_discovered_references();
  1674 
  1645 
  1675     if (first_state > Idling) {
  1646   if (first_state > Idling) {
  1676       save_heap_summary();
  1647     save_heap_summary();
  1677     }
  1648   }
  1678 
  1649 
  1679     do_compaction_work(clear_all_soft_refs);
  1650   do_compaction_work(clear_all_soft_refs);
  1680 
  1651 
  1681     // Has the GC time limit been exceeded?
  1652   // Has the GC time limit been exceeded?
  1682     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
  1653   DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
  1683     size_t max_eden_size = young_gen->max_capacity() -
  1654   size_t max_eden_size = young_gen->max_capacity() -
  1684                            young_gen->to()->capacity() -
  1655                          young_gen->to()->capacity() -
  1685                            young_gen->from()->capacity();
  1656                          young_gen->from()->capacity();
  1686     GenCollectedHeap* gch = GenCollectedHeap::heap();
  1657   GCCause::Cause gc_cause = gch->gc_cause();
  1687     GCCause::Cause gc_cause = gch->gc_cause();
  1658   size_policy()->check_gc_overhead_limit(_young_gen->used(),
  1688     size_policy()->check_gc_overhead_limit(_young_gen->used(),
  1659                                          young_gen->eden()->used(),
  1689                                            young_gen->eden()->used(),
  1660                                          _cmsGen->max_capacity(),
  1690                                            _cmsGen->max_capacity(),
  1661                                          max_eden_size,
  1691                                            max_eden_size,
  1662                                          full,
  1692                                            full,
  1663                                          gc_cause,
  1693                                            gc_cause,
  1664                                          gch->collector_policy());
  1694                                            gch->collector_policy());
  1665 
  1695   } else {
       
  1696     do_mark_sweep_work(clear_all_soft_refs, first_state,
       
  1697       should_start_over);
       
  1698   }
       
  1699   // Reset the expansion cause, now that we just completed
  1666   // Reset the expansion cause, now that we just completed
  1700   // a collection cycle.
  1667   // a collection cycle.
  1701   clear_expansion_cause();
  1668   clear_expansion_cause();
  1702   _foregroundGCIsActive = false;
  1669   _foregroundGCIsActive = false;
  1703   return;
  1670   return;
  1711   FreelistLocker z(this);
  1678   FreelistLocker z(this);
  1712   MetaspaceGC::compute_new_size();
  1679   MetaspaceGC::compute_new_size();
  1713   _cmsGen->compute_new_size_free_list();
  1680   _cmsGen->compute_new_size_free_list();
  1714 }
  1681 }
  1715 
  1682 
  1716 // A work method used by foreground collection to determine
       
  1717 // what type of collection (compacting or not, continuing or fresh)
       
  1718 // it should do.
       
  1719 // NOTE: the intent is to make UseCMSCompactAtFullCollection
       
  1720 // and CMSCompactWhenClearAllSoftRefs the default in the future
       
  1721 // and do away with the flags after a suitable period.
       
  1722 void CMSCollector::decide_foreground_collection_type(
       
  1723   bool clear_all_soft_refs, bool* should_compact,
       
  1724   bool* should_start_over) {
       
  1725   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
       
  1726   // flag is set, and we have either requested a System.gc() or
       
  1727   // the number of full gc's since the last concurrent cycle
       
  1728   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
       
  1729   // or if an incremental collection has failed
       
  1730   GenCollectedHeap* gch = GenCollectedHeap::heap();
       
  1731   assert(gch->collector_policy()->is_generation_policy(),
       
  1732          "You may want to check the correctness of the following");
       
  1733   // Inform cms gen if this was due to partial collection failing.
       
  1734   // The CMS gen may use this fact to determine its expansion policy.
       
  1735   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
       
  1736     assert(!_cmsGen->incremental_collection_failed(),
       
  1737            "Should have been noticed, reacted to and cleared");
       
  1738     _cmsGen->set_incremental_collection_failed();
       
  1739   }
       
  1740   *should_compact =
       
  1741     UseCMSCompactAtFullCollection &&
       
  1742     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
       
  1743      GCCause::is_user_requested_gc(gch->gc_cause()) ||
       
  1744      gch->incremental_collection_will_fail(true /* consult_young */));
       
  1745   *should_start_over = false;
       
  1746   if (clear_all_soft_refs && !*should_compact) {
       
  1747     // We are about to do a last ditch collection attempt
       
  1748     // so it would normally make sense to do a compaction
       
  1749     // to reclaim as much space as possible.
       
  1750     if (CMSCompactWhenClearAllSoftRefs) {
       
  1751       // Default: The rationale is that in this case either
       
  1752       // we are past the final marking phase, in which case
       
  1753       // we'd have to start over, or so little has been done
       
  1754       // that there's little point in saving that work. Compaction
       
  1755       // appears to be the sensible choice in either case.
       
  1756       *should_compact = true;
       
  1757     } else {
       
  1758       // We have been asked to clear all soft refs, but not to
       
  1759       // compact. Make sure that we aren't past the final checkpoint
       
  1760       // phase, for that is where we process soft refs. If we are already
       
  1761       // past that phase, we'll need to redo the refs discovery phase and
       
  1762       // if necessary clear soft refs that weren't previously
       
  1763       // cleared. We do so by remembering the phase in which
       
  1764       // we came in, and if we are past the refs processing
       
  1765       // phase, we'll choose to just redo the mark-sweep
       
  1766       // collection from scratch.
       
  1767       if (_collectorState > FinalMarking) {
       
  1768         // We are past the refs processing phase;
       
  1769         // start over and do a fresh synchronous CMS cycle
       
  1770         _collectorState = Resetting; // skip to reset to start new cycle
       
  1771         reset(false /* == !asynch */);
       
  1772         *should_start_over = true;
       
  1773       } // else we can continue a possibly ongoing current cycle
       
  1774     }
       
  1775   }
       
  1776 }
       
  1777 
       
  1778 // A work method used by the foreground collector to do
  1683 // A work method used by the foreground collector to do
  1779 // a mark-sweep-compact.
  1684 // a mark-sweep-compact.
  1780 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
  1685 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
  1781   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1686   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1782 
  1687 
  1785 
  1690 
  1786   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
  1691   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
  1787   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
  1692   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
  1788 
  1693 
  1789   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
  1694   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
  1790   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
       
  1791     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
       
  1792       "collections passed to foreground collector", _full_gcs_since_conc_gc);
       
  1793   }
       
  1794 
  1695 
  1795   // Temporarily widen the span of the weak reference processing to
  1696   // Temporarily widen the span of the weak reference processing to
  1796   // the entire heap.
  1697   // the entire heap.
  1797   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
  1698   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
  1798   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
  1699   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
  1850          "There should be at most 2 free chunks after compaction");
  1751          "There should be at most 2 free chunks after compaction");
  1851   #endif // ASSERT
  1752   #endif // ASSERT
  1852   _collectorState = Resetting;
  1753   _collectorState = Resetting;
  1853   assert(_restart_addr == NULL,
  1754   assert(_restart_addr == NULL,
  1854          "Should have been NULL'd before baton was passed");
  1755          "Should have been NULL'd before baton was passed");
  1855   reset(false /* == !asynch */);
  1756   reset(false /* == !concurrent */);
  1856   _cmsGen->reset_after_compaction();
  1757   _cmsGen->reset_after_compaction();
  1857   _concurrent_cycles_since_last_unload = 0;
  1758   _concurrent_cycles_since_last_unload = 0;
  1858 
  1759 
  1859   // Clear any data recorded in the PLAB chunk arrays.
  1760   // Clear any data recorded in the PLAB chunk arrays.
  1860   if (_survivor_plab_array != NULL) {
  1761   if (_survivor_plab_array != NULL) {
  1872   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  1773   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  1873 
  1774 
  1874   // For a mark-sweep-compact, compute_new_size() will be called
  1775   // For a mark-sweep-compact, compute_new_size() will be called
  1875   // in the heap's do_collection() method.
  1776   // in the heap's do_collection() method.
  1876 }
  1777 }
  1877 
       
  1878 // A work method used by the foreground collector to do
       
  1879 // a mark-sweep, after taking over from a possibly on-going
       
  1880 // concurrent mark-sweep collection.
       
  1881 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
       
  1882   CollectorState first_state, bool should_start_over) {
       
  1883   if (PrintGC && Verbose) {
       
  1884     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
       
  1885       "collector with count %d",
       
  1886       _full_gcs_since_conc_gc);
       
  1887   }
       
  1888   switch (_collectorState) {
       
  1889     case Idling:
       
  1890       if (first_state == Idling || should_start_over) {
       
  1891         // The background GC was not active, or should
       
  1892         // restarted from scratch;  start the cycle.
       
  1893         _collectorState = InitialMarking;
       
  1894       }
       
  1895       // If first_state was not Idling, then a background GC
       
  1896       // was in progress and has now finished.  No need to do it
       
  1897       // again.  Leave the state as Idling.
       
  1898       break;
       
  1899     case Precleaning:
       
  1900       // In the foreground case don't do the precleaning since
       
  1901       // it is not done concurrently and there is extra work
       
  1902       // required.
       
  1903       _collectorState = FinalMarking;
       
  1904   }
       
  1905   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
       
  1906 
       
  1907   // For a mark-sweep, compute_new_size() will be called
       
  1908   // in the heap's do_collection() method.
       
  1909 }
       
  1910 
       
  1911 
  1778 
  1912 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
  1779 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
  1913   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
  1780   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
  1914   ContiguousSpace* eden_space = dng->eden();
  1781   ContiguousSpace* eden_space = dng->eden();
  1915   ContiguousSpace* from_space = dng->from();
  1782   ContiguousSpace* from_space = dng->from();
  1987     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  1854     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  1988     _c->_foregroundGCShouldWait = true;
  1855     _c->_foregroundGCShouldWait = true;
  1989   }
  1856   }
  1990 };
  1857 };
  1991 
  1858 
  1992 // There are separate collect_in_background and collect_in_foreground because of
  1859 void CMSCollector::collect_in_background(GCCause::Cause cause) {
  1993 // the different locking requirements of the background collector and the
       
  1994 // foreground collector.  There was originally an attempt to share
       
  1995 // one "collect" method between the background collector and the foreground
       
  1996 // collector but the if-then-else required made it cleaner to have
       
  1997 // separate methods.
       
  1998 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
       
  1999   assert(Thread::current()->is_ConcurrentGC_thread(),
  1860   assert(Thread::current()->is_ConcurrentGC_thread(),
  2000     "A CMS asynchronous collection is only allowed on a CMS thread.");
  1861     "A CMS asynchronous collection is only allowed on a CMS thread.");
  2001 
  1862 
  2002   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1863   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2003   {
  1864   {
  2034   }
  1895   }
  2035 
  1896 
  2036   // Used for PrintGC
  1897   // Used for PrintGC
  2037   size_t prev_used;
  1898   size_t prev_used;
  2038   if (PrintGC && Verbose) {
  1899   if (PrintGC && Verbose) {
  2039     prev_used = _cmsGen->used(); // XXXPERM
  1900     prev_used = _cmsGen->used();
  2040   }
  1901   }
  2041 
  1902 
  2042   // The change of the collection state is normally done at this level;
  1903   // The change of the collection state is normally done at this level;
  2043   // the exceptions are phases that are executed while the world is
  1904   // the exceptions are phases that are executed while the world is
  2044   // stopped.  For those phases the change of state is done while the
  1905   // stopped.  For those phases the change of state is done while the
  2114         // since the background collector may have yielded to the
  1975         // since the background collector may have yielded to the
  2115         // foreground collector.
  1976         // foreground collector.
  2116         break;
  1977         break;
  2117       case Marking:
  1978       case Marking:
  2118         // initial marking in checkpointRootsInitialWork has been completed
  1979         // initial marking in checkpointRootsInitialWork has been completed
  2119         if (markFromRoots(true)) { // we were successful
  1980         if (markFromRoots()) { // we were successful
  2120           assert(_collectorState == Precleaning, "Collector state should "
  1981           assert(_collectorState == Precleaning, "Collector state should "
  2121             "have changed");
  1982             "have changed");
  2122         } else {
  1983         } else {
  2123           assert(_foregroundGCIsActive, "Internal state inconsistency");
  1984           assert(_foregroundGCIsActive, "Internal state inconsistency");
  2124         }
  1985         }
  2144         }
  2005         }
  2145         assert(_foregroundGCShouldWait, "block post-condition");
  2006         assert(_foregroundGCShouldWait, "block post-condition");
  2146         break;
  2007         break;
  2147       case Sweeping:
  2008       case Sweeping:
  2148         // final marking in checkpointRootsFinal has been completed
  2009         // final marking in checkpointRootsFinal has been completed
  2149         sweep(true);
  2010         sweep();
  2150         assert(_collectorState == Resizing, "Collector state change "
  2011         assert(_collectorState == Resizing, "Collector state change "
  2151           "to Resizing must be done under the free_list_lock");
  2012           "to Resizing must be done under the free_list_lock");
  2152         _full_gcs_since_conc_gc = 0;
       
  2153 
  2013 
  2154       case Resizing: {
  2014       case Resizing: {
  2155         // Sweeping has been completed...
  2015         // Sweeping has been completed...
  2156         // At this point the background collection has completed.
  2016         // At this point the background collection has completed.
  2157         // Don't move the call to compute_new_size() down
  2017         // Don't move the call to compute_new_size() down
  2220   if (PrintGC && Verbose) {
  2080   if (PrintGC && Verbose) {
  2221     _cmsGen->print_heap_change(prev_used);
  2081     _cmsGen->print_heap_change(prev_used);
  2222   }
  2082   }
  2223 }
  2083 }
  2224 
  2084 
  2225 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
       
  2226   if (!_cms_start_registered) {
       
  2227     register_gc_start(cause);
       
  2228   }
       
  2229 }
       
  2230 
       
  2231 void CMSCollector::register_gc_start(GCCause::Cause cause) {
  2085 void CMSCollector::register_gc_start(GCCause::Cause cause) {
  2232   _cms_start_registered = true;
  2086   _cms_start_registered = true;
  2233   _gc_timer_cm->register_gc_start();
  2087   _gc_timer_cm->register_gc_start();
  2234   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
  2088   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
  2235 }
  2089 }
  2251 }
  2105 }
  2252 
  2106 
  2253 void CMSCollector::report_heap_summary(GCWhen::Type when) {
  2107 void CMSCollector::report_heap_summary(GCWhen::Type when) {
  2254   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
  2108   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
  2255   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
  2109   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
  2256 }
       
  2257 
       
  2258 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
       
  2259   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
       
  2260          "Foreground collector should be waiting, not executing");
       
  2261   assert(Thread::current()->is_VM_thread(), "A foreground collection"
       
  2262     "may only be done by the VM Thread with the world stopped");
       
  2263   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
       
  2264          "VM thread should have CMS token");
       
  2265 
       
  2266   // The gc id is created in register_foreground_gc_start if this collection is synchronous
       
  2267   const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
       
  2268   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
       
  2269     true, NULL, gc_id);)
       
  2270   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
       
  2271 
       
  2272   HandleMark hm;  // Discard invalid handles created during verification
       
  2273 
       
  2274   if (VerifyBeforeGC &&
       
  2275       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
       
  2276     Universe::verify();
       
  2277   }
       
  2278 
       
  2279   // Snapshot the soft reference policy to be used in this collection cycle.
       
  2280   ref_processor()->setup_policy(clear_all_soft_refs);
       
  2281 
       
  2282   // Decide if class unloading should be done
       
  2283   update_should_unload_classes();
       
  2284 
       
  2285   bool init_mark_was_synchronous = false; // until proven otherwise
       
  2286   while (_collectorState != Idling) {
       
  2287     if (TraceCMSState) {
       
  2288       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
       
  2289         Thread::current(), _collectorState);
       
  2290     }
       
  2291     switch (_collectorState) {
       
  2292       case InitialMarking:
       
  2293         register_foreground_gc_start(cause);
       
  2294         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
       
  2295         checkpointRootsInitial(false);
       
  2296         assert(_collectorState == Marking, "Collector state should have changed"
       
  2297           " within checkpointRootsInitial()");
       
  2298         break;
       
  2299       case Marking:
       
  2300         // initial marking in checkpointRootsInitialWork has been completed
       
  2301         if (VerifyDuringGC &&
       
  2302             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
       
  2303           Universe::verify("Verify before initial mark: ");
       
  2304         }
       
  2305         {
       
  2306           bool res = markFromRoots(false);
       
  2307           assert(res && _collectorState == FinalMarking, "Collector state should "
       
  2308             "have changed");
       
  2309           break;
       
  2310         }
       
  2311       case FinalMarking:
       
  2312         if (VerifyDuringGC &&
       
  2313             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
       
  2314           Universe::verify("Verify before re-mark: ");
       
  2315         }
       
  2316         checkpointRootsFinal(false, clear_all_soft_refs,
       
  2317                              init_mark_was_synchronous);
       
  2318         assert(_collectorState == Sweeping, "Collector state should not "
       
  2319           "have changed within checkpointRootsFinal()");
       
  2320         break;
       
  2321       case Sweeping:
       
  2322         // final marking in checkpointRootsFinal has been completed
       
  2323         if (VerifyDuringGC &&
       
  2324             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
       
  2325           Universe::verify("Verify before sweep: ");
       
  2326         }
       
  2327         sweep(false);
       
  2328         assert(_collectorState == Resizing, "Incorrect state");
       
  2329         break;
       
  2330       case Resizing: {
       
  2331         // Sweeping has been completed; the actual resize in this case
       
  2332         // is done separately; nothing to be done in this state.
       
  2333         _collectorState = Resetting;
       
  2334         break;
       
  2335       }
       
  2336       case Resetting:
       
  2337         // The heap has been resized.
       
  2338         if (VerifyDuringGC &&
       
  2339             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
       
  2340           Universe::verify("Verify before reset: ");
       
  2341         }
       
  2342         save_heap_summary();
       
  2343         reset(false);
       
  2344         assert(_collectorState == Idling, "Collector state should "
       
  2345           "have changed");
       
  2346         break;
       
  2347       case Precleaning:
       
  2348       case AbortablePreclean:
       
  2349         // Elide the preclean phase
       
  2350         _collectorState = FinalMarking;
       
  2351         break;
       
  2352       default:
       
  2353         ShouldNotReachHere();
       
  2354     }
       
  2355     if (TraceCMSState) {
       
  2356       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
       
  2357         Thread::current(), _collectorState);
       
  2358     }
       
  2359   }
       
  2360 
       
  2361   if (VerifyAfterGC &&
       
  2362       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
       
  2363     Universe::verify();
       
  2364   }
       
  2365   if (TraceCMSState) {
       
  2366     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
       
  2367       " exiting collection CMS state %d",
       
  2368       Thread::current(), _collectorState);
       
  2369   }
       
  2370 }
  2110 }
  2371 
  2111 
  2372 bool CMSCollector::waitForForegroundGC() {
  2112 bool CMSCollector::waitForForegroundGC() {
  2373   bool res = false;
  2113   bool res = false;
  2374   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  2114   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  3343 };
  3083 };
  3344 
  3084 
  3345 // Checkpoint the roots into this generation from outside
  3085 // Checkpoint the roots into this generation from outside
  3346 // this generation. [Note this initial checkpoint need only
  3086 // this generation. [Note this initial checkpoint need only
  3347 // be approximate -- we'll do a catch up phase subsequently.]
  3087 // be approximate -- we'll do a catch up phase subsequently.]
  3348 void CMSCollector::checkpointRootsInitial(bool asynch) {
  3088 void CMSCollector::checkpointRootsInitial() {
  3349   assert(_collectorState == InitialMarking, "Wrong collector state");
  3089   assert(_collectorState == InitialMarking, "Wrong collector state");
  3350   check_correct_thread_executing();
  3090   check_correct_thread_executing();
  3351   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
  3091   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
  3352 
  3092 
  3353   save_heap_summary();
  3093   save_heap_summary();
  3354   report_heap_summary(GCWhen::BeforeGC);
  3094   report_heap_summary(GCWhen::BeforeGC);
  3355 
  3095 
  3356   ReferenceProcessor* rp = ref_processor();
  3096   ReferenceProcessor* rp = ref_processor();
  3357   SpecializationStats::clear();
  3097   SpecializationStats::clear();
  3358   assert(_restart_addr == NULL, "Control point invariant");
  3098   assert(_restart_addr == NULL, "Control point invariant");
  3359   if (asynch) {
  3099   {
  3360     // acquire locks for subsequent manipulations
  3100     // acquire locks for subsequent manipulations
  3361     MutexLockerEx x(bitMapLock(),
  3101     MutexLockerEx x(bitMapLock(),
  3362                     Mutex::_no_safepoint_check_flag);
  3102                     Mutex::_no_safepoint_check_flag);
  3363     checkpointRootsInitialWork(asynch);
  3103     checkpointRootsInitialWork();
  3364     // enable ("weak") refs discovery
  3104     // enable ("weak") refs discovery
  3365     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
  3105     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
  3366     _collectorState = Marking;
  3106     _collectorState = Marking;
  3367   } else {
       
  3368     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
       
  3369     // which recognizes if we are a CMS generation, and doesn't try to turn on
       
  3370     // discovery; verify that they aren't meddling.
       
  3371     assert(!rp->discovery_is_atomic(),
       
  3372            "incorrect setting of discovery predicate");
       
  3373     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
       
  3374            "ref discovery for this generation kind");
       
  3375     // already have locks
       
  3376     checkpointRootsInitialWork(asynch);
       
  3377     // now enable ("weak") refs discovery
       
  3378     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
       
  3379     _collectorState = Marking;
       
  3380   }
  3107   }
  3381   SpecializationStats::print();
  3108   SpecializationStats::print();
  3382 }
  3109 }
  3383 
  3110 
  3384 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
  3111 void CMSCollector::checkpointRootsInitialWork() {
  3385   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
  3112   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
  3386   assert(_collectorState == InitialMarking, "just checking");
  3113   assert(_collectorState == InitialMarking, "just checking");
  3387 
  3114 
  3388   // If there has not been a GC[n-1] since last GC[n] cycle completed,
  3115   // If there has not been a GC[n-1] since last GC[n] cycle completed,
  3389   // precede our marking with a collection of all
  3116   // precede our marking with a collection of all
  3481   // to be used to limit the extent of sweep in each generation.
  3208   // to be used to limit the extent of sweep in each generation.
  3482   save_sweep_limits();
  3209   save_sweep_limits();
  3483   verify_overflow_empty();
  3210   verify_overflow_empty();
  3484 }
  3211 }
  3485 
  3212 
  3486 bool CMSCollector::markFromRoots(bool asynch) {
  3213 bool CMSCollector::markFromRoots() {
  3487   // we might be tempted to assert that:
  3214   // we might be tempted to assert that:
  3488   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
  3215   // assert(!SafepointSynchronize::is_at_safepoint(),
  3489   //        "inconsistent argument?");
  3216   //        "inconsistent argument?");
  3490   // However that wouldn't be right, because it's possible that
  3217   // However that wouldn't be right, because it's possible that
  3491   // a safepoint is indeed in progress as a younger generation
  3218   // a safepoint is indeed in progress as a younger generation
  3492   // stop-the-world GC happens even as we mark in this generation.
  3219   // stop-the-world GC happens even as we mark in this generation.
  3493   assert(_collectorState == Marking, "inconsistent state?");
  3220   assert(_collectorState == Marking, "inconsistent state?");
  3494   check_correct_thread_executing();
  3221   check_correct_thread_executing();
  3495   verify_overflow_empty();
  3222   verify_overflow_empty();
  3496 
  3223 
  3497   bool res;
  3224   // Weak ref discovery note: We may be discovering weak
  3498   if (asynch) {
  3225   // refs in this generation concurrent (but interleaved) with
  3499     // Weak ref discovery note: We may be discovering weak
  3226   // weak ref discovery by a younger generation collector.
  3500     // refs in this generation concurrent (but interleaved) with
  3227 
  3501     // weak ref discovery by a younger generation collector.
  3228   CMSTokenSyncWithLocks ts(true, bitMapLock());
  3502 
  3229   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3503     CMSTokenSyncWithLocks ts(true, bitMapLock());
  3230   CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
  3504     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3231   bool res = markFromRootsWork();
  3505     CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
  3232   if (res) {
  3506     res = markFromRootsWork(asynch);
  3233     _collectorState = Precleaning;
  3507     if (res) {
  3234   } else { // We failed and a foreground collection wants to take over
  3508       _collectorState = Precleaning;
  3235     assert(_foregroundGCIsActive, "internal state inconsistency");
  3509     } else { // We failed and a foreground collection wants to take over
  3236     assert(_restart_addr == NULL,  "foreground will restart from scratch");
  3510       assert(_foregroundGCIsActive, "internal state inconsistency");
  3237     if (PrintGCDetails) {
  3511       assert(_restart_addr == NULL,  "foreground will restart from scratch");
  3238       gclog_or_tty->print_cr("bailing out to foreground collection");
  3512       if (PrintGCDetails) {
  3239     }
  3513         gclog_or_tty->print_cr("bailing out to foreground collection");
       
  3514       }
       
  3515     }
       
  3516   } else {
       
  3517     assert(SafepointSynchronize::is_at_safepoint(),
       
  3518            "inconsistent with asynch == false");
       
  3519     // already have locks
       
  3520     res = markFromRootsWork(asynch);
       
  3521     _collectorState = FinalMarking;
       
  3522   }
  3240   }
  3523   verify_overflow_empty();
  3241   verify_overflow_empty();
  3524   return res;
  3242   return res;
  3525 }
  3243 }
  3526 
  3244 
  3527 bool CMSCollector::markFromRootsWork(bool asynch) {
  3245 bool CMSCollector::markFromRootsWork() {
  3528   // iterate over marked bits in bit map, doing a full scan and mark
  3246   // iterate over marked bits in bit map, doing a full scan and mark
  3529   // from these roots using the following algorithm:
  3247   // from these roots using the following algorithm:
  3530   // . if oop is to the right of the current scan pointer,
  3248   // . if oop is to the right of the current scan pointer,
  3531   //   mark corresponding bit (we'll process it later)
  3249   //   mark corresponding bit (we'll process it later)
  3532   // . else (oop is to left of current scan pointer)
  3250   // . else (oop is to left of current scan pointer)
  3547 
  3265 
  3548   verify_work_stacks_empty();
  3266   verify_work_stacks_empty();
  3549   verify_overflow_empty();
  3267   verify_overflow_empty();
  3550   bool result = false;
  3268   bool result = false;
  3551   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
  3269   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
  3552     result = do_marking_mt(asynch);
  3270     result = do_marking_mt();
  3553   } else {
  3271   } else {
  3554     result = do_marking_st(asynch);
  3272     result = do_marking_st();
  3555   }
  3273   }
  3556   return result;
  3274   return result;
  3557 }
  3275 }
  3558 
  3276 
  3559 // Forward decl
  3277 // Forward decl
  3589 
  3307 
  3590 // MT Concurrent Marking Task
  3308 // MT Concurrent Marking Task
  3591 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
  3309 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
  3592   CMSCollector* _collector;
  3310   CMSCollector* _collector;
  3593   int           _n_workers;                  // requested/desired # workers
  3311   int           _n_workers;                  // requested/desired # workers
  3594   bool          _asynch;
       
  3595   bool          _result;
  3312   bool          _result;
  3596   CompactibleFreeListSpace*  _cms_space;
  3313   CompactibleFreeListSpace*  _cms_space;
  3597   char          _pad_front[64];   // padding to ...
  3314   char          _pad_front[64];   // padding to ...
  3598   HeapWord*     _global_finger;   // ... avoid sharing cache line
  3315   HeapWord*     _global_finger;   // ... avoid sharing cache line
  3599   char          _pad_back[64];
  3316   char          _pad_back[64];
  3610   CMSConcMarkingTerminatorTerminator _term_term;
  3327   CMSConcMarkingTerminatorTerminator _term_term;
  3611 
  3328 
  3612  public:
  3329  public:
  3613   CMSConcMarkingTask(CMSCollector* collector,
  3330   CMSConcMarkingTask(CMSCollector* collector,
  3614                  CompactibleFreeListSpace* cms_space,
  3331                  CompactibleFreeListSpace* cms_space,
  3615                  bool asynch,
       
  3616                  YieldingFlexibleWorkGang* workers,
  3332                  YieldingFlexibleWorkGang* workers,
  3617                  OopTaskQueueSet* task_queues):
  3333                  OopTaskQueueSet* task_queues):
  3618     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
  3334     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
  3619     _collector(collector),
  3335     _collector(collector),
  3620     _cms_space(cms_space),
  3336     _cms_space(cms_space),
  3621     _asynch(asynch), _n_workers(0), _result(true),
  3337     _n_workers(0), _result(true),
  3622     _task_queues(task_queues),
  3338     _task_queues(task_queues),
  3623     _term(_n_workers, task_queues, _collector),
  3339     _term(_n_workers, task_queues, _collector),
  3624     _bit_map_lock(collector->bitMapLock())
  3340     _bit_map_lock(collector->bitMapLock())
  3625   {
  3341   {
  3626     _requested_size = _n_workers;
  3342     _requested_size = _n_workers;
  3643   }
  3359   }
  3644 
  3360 
  3645   void work(uint worker_id);
  3361   void work(uint worker_id);
  3646   bool should_yield() {
  3362   bool should_yield() {
  3647     return    ConcurrentMarkSweepThread::should_yield()
  3363     return    ConcurrentMarkSweepThread::should_yield()
  3648            && !_collector->foregroundGCIsActive()
  3364            && !_collector->foregroundGCIsActive();
  3649            && _asynch;
       
  3650   }
  3365   }
  3651 
  3366 
  3652   virtual void coordinator_yield();  // stuff done by coordinator
  3367   virtual void coordinator_yield();  // stuff done by coordinator
  3653   bool result() { return _result; }
  3368   bool result() { return _result; }
  3654 
  3369 
  3876         // the last argument to the constructor indicates whether the
  3591         // the last argument to the constructor indicates whether the
  3877         // iteration should be incremental with periodic yields.
  3592         // iteration should be incremental with periodic yields.
  3878         Par_MarkFromRootsClosure cl(this, _collector, my_span,
  3593         Par_MarkFromRootsClosure cl(this, _collector, my_span,
  3879                                     &_collector->_markBitMap,
  3594                                     &_collector->_markBitMap,
  3880                                     work_queue(i),
  3595                                     work_queue(i),
  3881                                     &_collector->_markStack,
  3596                                     &_collector->_markStack);
  3882                                     _asynch);
       
  3883         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
  3597         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
  3884       } // else nothing to do for this task
  3598       } // else nothing to do for this task
  3885     }   // else nothing to do for this task
  3599     }   // else nothing to do for this task
  3886   }
  3600   }
  3887   // We'd be tempted to assert here that since there are no
  3601   // We'd be tempted to assert here that since there are no
  4082   ConcurrentMarkSweepThread::synchronize(true);
  3796   ConcurrentMarkSweepThread::synchronize(true);
  4083   _bit_map_lock->lock_without_safepoint_check();
  3797   _bit_map_lock->lock_without_safepoint_check();
  4084   _collector->startTimer();
  3798   _collector->startTimer();
  4085 }
  3799 }
  4086 
  3800 
  4087 bool CMSCollector::do_marking_mt(bool asynch) {
  3801 bool CMSCollector::do_marking_mt() {
  4088   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
  3802   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
  4089   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
  3803   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
  4090                                        conc_workers()->total_workers(),
  3804                                        conc_workers()->total_workers(),
  4091                                        conc_workers()->active_workers(),
  3805                                        conc_workers()->active_workers(),
  4092                                        Threads::number_of_non_daemon_threads());
  3806                                        Threads::number_of_non_daemon_threads());
  4094 
  3808 
  4095   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
  3809   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
  4096 
  3810 
  4097   CMSConcMarkingTask tsk(this,
  3811   CMSConcMarkingTask tsk(this,
  4098                          cms_space,
  3812                          cms_space,
  4099                          asynch,
       
  4100                          conc_workers(),
  3813                          conc_workers(),
  4101                          task_queues());
  3814                          task_queues());
  4102 
  3815 
  4103   // Since the actual number of workers we get may be different
  3816   // Since the actual number of workers we get may be different
  4104   // from the number we requested above, do we need to do anything different
  3817   // from the number we requested above, do we need to do anything different
  4123     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
  3836     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
  4124     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
  3837     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
  4125     // If _restart_addr is non-NULL, a marking stack overflow
  3838     // If _restart_addr is non-NULL, a marking stack overflow
  4126     // occurred; we need to do a fresh marking iteration from the
  3839     // occurred; we need to do a fresh marking iteration from the
  4127     // indicated restart address.
  3840     // indicated restart address.
  4128     if (_foregroundGCIsActive && asynch) {
  3841     if (_foregroundGCIsActive) {
  4129       // We may be running into repeated stack overflows, having
  3842       // We may be running into repeated stack overflows, having
  4130       // reached the limit of the stack size, while making very
  3843       // reached the limit of the stack size, while making very
  4131       // slow forward progress. It may be best to bail out and
  3844       // slow forward progress. It may be best to bail out and
  4132       // let the foreground collector do its job.
  3845       // let the foreground collector do its job.
  4133       // Clear _restart_addr, so that foreground GC
  3846       // Clear _restart_addr, so that foreground GC
  4152   assert(tsk.completed(), "Inconsistency");
  3865   assert(tsk.completed(), "Inconsistency");
  4153   assert(tsk.result() == true, "Inconsistency");
  3866   assert(tsk.result() == true, "Inconsistency");
  4154   return true;
  3867   return true;
  4155 }
  3868 }
  4156 
  3869 
  4157 bool CMSCollector::do_marking_st(bool asynch) {
  3870 bool CMSCollector::do_marking_st() {
  4158   ResourceMark rm;
  3871   ResourceMark rm;
  4159   HandleMark   hm;
  3872   HandleMark   hm;
  4160 
  3873 
  4161   // Temporarily make refs discovery single threaded (non-MT)
  3874   // Temporarily make refs discovery single threaded (non-MT)
  4162   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
  3875   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
  4163   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
  3876   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
  4164     &_markStack, CMSYield && asynch);
  3877     &_markStack, CMSYield);
  4165   // the last argument to iterate indicates whether the iteration
  3878   // the last argument to iterate indicates whether the iteration
  4166   // should be incremental with periodic yields.
  3879   // should be incremental with periodic yields.
  4167   _markBitMap.iterate(&markFromRootsClosure);
  3880   _markBitMap.iterate(&markFromRootsClosure);
  4168   // If _restart_addr is non-NULL, a marking stack overflow
  3881   // If _restart_addr is non-NULL, a marking stack overflow
  4169   // occurred; we need to do a fresh iteration from the
  3882   // occurred; we need to do a fresh iteration from the
  4170   // indicated restart address.
  3883   // indicated restart address.
  4171   while (_restart_addr != NULL) {
  3884   while (_restart_addr != NULL) {
  4172     if (_foregroundGCIsActive && asynch) {
  3885     if (_foregroundGCIsActive) {
  4173       // We may be running into repeated stack overflows, having
  3886       // We may be running into repeated stack overflows, having
  4174       // reached the limit of the stack size, while making very
  3887       // reached the limit of the stack size, while making very
  4175       // slow forward progress. It may be best to bail out and
  3888       // slow forward progress. It may be best to bail out and
  4176       // let the foreground collector do its job.
  3889       // let the foreground collector do its job.
  4177       // Clear _restart_addr, so that foreground GC
  3890       // Clear _restart_addr, so that foreground GC
  4701 
  4414 
  4702   verify_work_stacks_empty();
  4415   verify_work_stacks_empty();
  4703   verify_overflow_empty();
  4416   verify_overflow_empty();
  4704 }
  4417 }
  4705 
  4418 
  4706 void CMSCollector::checkpointRootsFinal(bool asynch,
  4419 void CMSCollector::checkpointRootsFinal() {
  4707   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
       
  4708   assert(_collectorState == FinalMarking, "incorrect state transition?");
  4420   assert(_collectorState == FinalMarking, "incorrect state transition?");
  4709   check_correct_thread_executing();
  4421   check_correct_thread_executing();
  4710   // world is stopped at this checkpoint
  4422   // world is stopped at this checkpoint
  4711   assert(SafepointSynchronize::is_at_safepoint(),
  4423   assert(SafepointSynchronize::is_at_safepoint(),
  4712          "world should be stopped");
  4424          "world should be stopped");
  4719   if (PrintGCDetails) {
  4431   if (PrintGCDetails) {
  4720     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
  4432     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
  4721                         _young_gen->used() / K,
  4433                         _young_gen->used() / K,
  4722                         _young_gen->capacity() / K);
  4434                         _young_gen->capacity() / K);
  4723   }
  4435   }
  4724   if (asynch) {
  4436   {
  4725     if (CMSScavengeBeforeRemark) {
  4437     if (CMSScavengeBeforeRemark) {
  4726       GenCollectedHeap* gch = GenCollectedHeap::heap();
  4438       GenCollectedHeap* gch = GenCollectedHeap::heap();
  4727       // Temporarily set flag to false, GCH->do_collection will
  4439       // Temporarily set flag to false, GCH->do_collection will
  4728       // expect it to be false and set to true
  4440       // expect it to be false and set to true
  4729       FlagSetting fl(gch->_is_gc_active, false);
  4441       FlagSetting fl(gch->_is_gc_active, false);
  4740       }
  4452       }
  4741     }
  4453     }
  4742     FreelistLocker x(this);
  4454     FreelistLocker x(this);
  4743     MutexLockerEx y(bitMapLock(),
  4455     MutexLockerEx y(bitMapLock(),
  4744                     Mutex::_no_safepoint_check_flag);
  4456                     Mutex::_no_safepoint_check_flag);
  4745     assert(!init_mark_was_synchronous, "but that's impossible!");
  4457     checkpointRootsFinalWork();
  4746     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
       
  4747   } else {
       
  4748     // already have all the locks
       
  4749     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
       
  4750                              init_mark_was_synchronous);
       
  4751   }
  4458   }
  4752   verify_work_stacks_empty();
  4459   verify_work_stacks_empty();
  4753   verify_overflow_empty();
  4460   verify_overflow_empty();
  4754   SpecializationStats::print();
  4461   SpecializationStats::print();
  4755 }
  4462 }
  4756 
  4463 
  4757 void CMSCollector::checkpointRootsFinalWork(bool asynch,
  4464 void CMSCollector::checkpointRootsFinalWork() {
  4758   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
       
  4759 
       
  4760   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
  4465   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
  4761 
  4466 
  4762   assert(haveFreelistLocks(), "must have free list locks");
  4467   assert(haveFreelistLocks(), "must have free list locks");
  4763   assert_lock_strong(bitMapLock());
  4468   assert_lock_strong(bitMapLock());
  4764 
  4469 
  4771     CodeCache::gc_prologue();
  4476     CodeCache::gc_prologue();
  4772   }
  4477   }
  4773   assert(haveFreelistLocks(), "must have free list locks");
  4478   assert(haveFreelistLocks(), "must have free list locks");
  4774   assert_lock_strong(bitMapLock());
  4479   assert_lock_strong(bitMapLock());
  4775 
  4480 
  4776   if (!init_mark_was_synchronous) {
  4481   // We might assume that we need not fill TLAB's when
  4777     // We might assume that we need not fill TLAB's when
  4482   // CMSScavengeBeforeRemark is set, because we may have just done
  4778     // CMSScavengeBeforeRemark is set, because we may have just done
  4483   // a scavenge which would have filled all TLAB's -- and besides
  4779     // a scavenge which would have filled all TLAB's -- and besides
  4484   // Eden would be empty. This however may not always be the case --
  4780     // Eden would be empty. This however may not always be the case --
  4485   // for instance although we asked for a scavenge, it may not have
  4781     // for instance although we asked for a scavenge, it may not have
  4486   // happened because of a JNI critical section. We probably need
  4782     // happened because of a JNI critical section. We probably need
  4487   // a policy for deciding whether we can in that case wait until
  4783     // a policy for deciding whether we can in that case wait until
  4488   // the critical section releases and then do the remark following
  4784     // the critical section releases and then do the remark following
  4489   // the scavenge, and skip it here. In the absence of that policy,
  4785     // the scavenge, and skip it here. In the absence of that policy,
  4490   // or of an indication of whether the scavenge did indeed occur,
  4786     // or of an indication of whether the scavenge did indeed occur,
  4491   // we cannot rely on TLAB's having been filled and must do
  4787     // we cannot rely on TLAB's having been filled and must do
  4492   // so here just in case a scavenge did not happen.
  4788     // so here just in case a scavenge did not happen.
  4493   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
  4789     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
  4494   // Update the saved marks which may affect the root scans.
  4790     // Update the saved marks which may affect the root scans.
  4495   gch->save_marks();
  4791     gch->save_marks();
  4496 
  4792 
  4497   if (CMSPrintEdenSurvivorChunks) {
  4793     if (CMSPrintEdenSurvivorChunks) {
  4498     print_eden_and_survivor_chunk_arrays();
  4794       print_eden_and_survivor_chunk_arrays();
  4499   }
  4795     }
  4500 
  4796 
  4501   {
  4797     {
  4502     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  4798       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  4503 
  4799 
  4504     // Note on the role of the mod union table:
  4800       // Note on the role of the mod union table:
  4505     // Since the marker in "markFromRoots" marks concurrently with
  4801       // Since the marker in "markFromRoots" marks concurrently with
  4506     // mutators, it is possible for some reachable objects not to have been
  4802       // mutators, it is possible for some reachable objects not to have been
  4507     // scanned. For instance, an only reference to an object A was
  4803       // scanned. For instance, an only reference to an object A was
  4508     // placed in object B after the marker scanned B. Unless B is rescanned,
  4804       // placed in object B after the marker scanned B. Unless B is rescanned,
  4509     // A would be collected. Such updates to references in marked objects
  4805       // A would be collected. Such updates to references in marked objects
  4510     // are detected via the mod union table which is the set of all cards
  4806       // are detected via the mod union table which is the set of all cards
  4511     // dirtied since the first checkpoint in this GC cycle and prior to
  4807       // dirtied since the first checkpoint in this GC cycle and prior to
  4512     // the most recent young generation GC, minus those cleaned up by the
  4808       // the most recent young generation GC, minus those cleaned up by the
  4513     // concurrent precleaning.
  4809       // concurrent precleaning.
  4514     if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
  4810       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
  4515       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
  4811         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
  4516       do_remark_parallel();
  4812         do_remark_parallel();
  4517     } else {
  4813       } else {
  4518       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
  4814         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
  4519                   _gc_timer_cm, _gc_tracer_cm->gc_id());
  4815                     _gc_timer_cm, _gc_tracer_cm->gc_id());
  4520       do_remark_non_parallel();
  4816         do_remark_non_parallel();
  4521     }
  4817       }
       
  4818     }
       
  4819   } else {
       
  4820     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
       
  4821     // The initial mark was stop-world, so there's no rescanning to
       
  4822     // do; go straight on to the next step below.
       
  4823   }
  4522   }
  4824   verify_work_stacks_empty();
  4523   verify_work_stacks_empty();
  4825   verify_overflow_empty();
  4524   verify_overflow_empty();
  4826 
  4525 
  4827   {
  4526   {
  4828     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
  4527     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
  4829     refProcessingWork(asynch, clear_all_soft_refs);
  4528     refProcessingWork();
  4830   }
  4529   }
  4831   verify_work_stacks_empty();
  4530   verify_work_stacks_empty();
  4832   verify_overflow_empty();
  4531   verify_overflow_empty();
  4833 
  4532 
  4834   if (should_unload_classes()) {
  4533   if (should_unload_classes()) {
  5870   assert(workers != NULL, "Need parallel worker threads.");
  5569   assert(workers != NULL, "Need parallel worker threads.");
  5871   CMSRefEnqueueTaskProxy enq_task(task);
  5570   CMSRefEnqueueTaskProxy enq_task(task);
  5872   workers->run_task(&enq_task);
  5571   workers->run_task(&enq_task);
  5873 }
  5572 }
  5874 
  5573 
  5875 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
  5574 void CMSCollector::refProcessingWork() {
  5876 
       
  5877   ResourceMark rm;
  5575   ResourceMark rm;
  5878   HandleMark   hm;
  5576   HandleMark   hm;
  5879 
  5577 
  5880   ReferenceProcessor* rp = ref_processor();
  5578   ReferenceProcessor* rp = ref_processor();
  5881   assert(rp->span().equals(_span), "Spans should be equal");
  5579   assert(rp->span().equals(_span), "Spans should be equal");
  5882   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
  5580   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
  5883   // Process weak references.
  5581   // Process weak references.
  5884   rp->setup_policy(clear_all_soft_refs);
  5582   rp->setup_policy(false);
  5885   verify_work_stacks_empty();
  5583   verify_work_stacks_empty();
  5886 
  5584 
  5887   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
  5585   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
  5888                                           &_markStack, false /* !preclean */);
  5586                                           &_markStack, false /* !preclean */);
  5889   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
  5587   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
  6003     }
  5701     }
  6004   }
  5702   }
  6005 }
  5703 }
  6006 #endif
  5704 #endif
  6007 
  5705 
  6008 void CMSCollector::sweep(bool asynch) {
  5706 void CMSCollector::sweep() {
  6009   assert(_collectorState == Sweeping, "just checking");
  5707   assert(_collectorState == Sweeping, "just checking");
  6010   check_correct_thread_executing();
  5708   check_correct_thread_executing();
  6011   verify_work_stacks_empty();
  5709   verify_work_stacks_empty();
  6012   verify_overflow_empty();
  5710   verify_overflow_empty();
  6013   increment_sweep_count();
  5711   increment_sweep_count();
  6017   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
  5715   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
  6018 
  5716 
  6019   assert(!_intra_sweep_timer.is_active(), "Should not be active");
  5717   assert(!_intra_sweep_timer.is_active(), "Should not be active");
  6020   _intra_sweep_timer.reset();
  5718   _intra_sweep_timer.reset();
  6021   _intra_sweep_timer.start();
  5719   _intra_sweep_timer.start();
  6022   if (asynch) {
  5720   {
  6023     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  5721     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  6024     CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
  5722     CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
  6025     // First sweep the old gen
  5723     // First sweep the old gen
  6026     {
  5724     {
  6027       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
  5725       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
  6028                                bitMapLock());
  5726                                bitMapLock());
  6029       sweepWork(_cmsGen, asynch);
  5727       sweepWork(_cmsGen);
  6030     }
  5728     }
  6031 
  5729 
  6032     // Update Universe::_heap_*_at_gc figures.
  5730     // Update Universe::_heap_*_at_gc figures.
  6033     // We need all the free list locks to make the abstract state
  5731     // We need all the free list locks to make the abstract state
  6034     // transition from Sweeping to Resetting. See detailed note
  5732     // transition from Sweeping to Resetting. See detailed note
  6038       // Update heap occupancy information which is used as
  5736       // Update heap occupancy information which is used as
  6039       // input to soft ref clearing policy at the next gc.
  5737       // input to soft ref clearing policy at the next gc.
  6040       Universe::update_heap_info_at_gc();
  5738       Universe::update_heap_info_at_gc();
  6041       _collectorState = Resizing;
  5739       _collectorState = Resizing;
  6042     }
  5740     }
  6043   } else {
       
  6044     // already have needed locks
       
  6045     sweepWork(_cmsGen,  asynch);
       
  6046     // Update heap occupancy information which is used as
       
  6047     // input to soft ref clearing policy at the next gc.
       
  6048     Universe::update_heap_info_at_gc();
       
  6049     _collectorState = Resizing;
       
  6050   }
  5741   }
  6051   verify_work_stacks_empty();
  5742   verify_work_stacks_empty();
  6052   verify_overflow_empty();
  5743   verify_overflow_empty();
  6053 
  5744 
  6054   if (should_unload_classes()) {
  5745   if (should_unload_classes()) {
  6139   }
  5830   }
  6140 }
  5831 }
  6141 
  5832 
  6142 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
  5833 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
  6143   if (PrintGCDetails && Verbose) {
  5834   if (PrintGCDetails && Verbose) {
  6144     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
  5835     if (_debug_concurrent_cycle) {
  6145   }
  5836       gclog_or_tty->print_cr("Rotate from concurrent to STW collections");
  6146   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
  5837     } else {
  6147   _debug_collection_type =
  5838       gclog_or_tty->print_cr("Rotate from STW to concurrent collections");
  6148     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
  5839     }
  6149   if (PrintGCDetails && Verbose) {
  5840   }
  6150     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
  5841   _debug_concurrent_cycle = !_debug_concurrent_cycle;
  6151   }
  5842 }
  6152 }
  5843 
  6153 
  5844 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
  6154 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
       
  6155   bool asynch) {
       
  6156   // We iterate over the space(s) underlying this generation,
  5845   // We iterate over the space(s) underlying this generation,
  6157   // checking the mark bit map to see if the bits corresponding
  5846   // checking the mark bit map to see if the bits corresponding
  6158   // to specific blocks are marked or not. Blocks that are
  5847   // to specific blocks are marked or not. Blocks that are
  6159   // marked are live and are not swept up. All remaining blocks
  5848   // marked are live and are not swept up. All remaining blocks
  6160   // are swept up, with coalescing on-the-fly as we sweep up
  5849   // are swept up, with coalescing on-the-fly as we sweep up
  6178   // GC's while we do a sweeping step. For the same reason, we might
  5867   // GC's while we do a sweeping step. For the same reason, we might
  6179   // as well take the bit map lock for the entire duration
  5868   // as well take the bit map lock for the entire duration
  6180 
  5869 
  6181   // check that we hold the requisite locks
  5870   // check that we hold the requisite locks
  6182   assert(have_cms_token(), "Should hold cms token");
  5871   assert(have_cms_token(), "Should hold cms token");
  6183   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
  5872   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
  6184          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
       
  6185         "Should possess CMS token to sweep");
       
  6186   assert_lock_strong(gen->freelistLock());
  5873   assert_lock_strong(gen->freelistLock());
  6187   assert_lock_strong(bitMapLock());
  5874   assert_lock_strong(bitMapLock());
  6188 
  5875 
  6189   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
  5876   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
  6190   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
  5877   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
  6192                                       _inter_sweep_estimate.padded_average(),
  5879                                       _inter_sweep_estimate.padded_average(),
  6193                                       _intra_sweep_estimate.padded_average());
  5880                                       _intra_sweep_estimate.padded_average());
  6194   gen->setNearLargestChunk();
  5881   gen->setNearLargestChunk();
  6195 
  5882 
  6196   {
  5883   {
  6197     SweepClosure sweepClosure(this, gen, &_markBitMap,
  5884     SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
  6198                             CMSYield && asynch);
       
  6199     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
  5885     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
  6200     // We need to free-up/coalesce garbage/blocks from a
  5886     // We need to free-up/coalesce garbage/blocks from a
  6201     // co-terminal free run. This is done in the SweepClosure
  5887     // co-terminal free run. This is done in the SweepClosure
  6202     // destructor; so, do not remove this scope, else the
  5888     // destructor; so, do not remove this scope, else the
  6203     // end-of-sweep-census below will be off by a little bit.
  5889     // end-of-sweep-census below will be off by a little bit.
  6211   }
  5897   }
  6212 }
  5898 }
  6213 
  5899 
  6214 // Reset CMS data structures (for now just the marking bit map)
  5900 // Reset CMS data structures (for now just the marking bit map)
  6215 // preparatory for the next cycle.
  5901 // preparatory for the next cycle.
  6216 void CMSCollector::reset(bool asynch) {
  5902 void CMSCollector::reset(bool concurrent) {
  6217   if (asynch) {
  5903   if (concurrent) {
  6218     CMSTokenSyncWithLocks ts(true, bitMapLock());
  5904     CMSTokenSyncWithLocks ts(true, bitMapLock());
  6219 
  5905 
  6220     // If the state is not "Resetting", the foreground  thread
  5906     // If the state is not "Resetting", the foreground  thread
  6221     // has done a collection and the resetting.
  5907     // has done a collection and the resetting.
  6222     if (_collectorState != Resetting) {
  5908     if (_collectorState != Resetting) {
  6291   TraceCollectorStats tcs(counters());
  5977   TraceCollectorStats tcs(counters());
  6292 
  5978 
  6293   switch (op) {
  5979   switch (op) {
  6294     case CMS_op_checkpointRootsInitial: {
  5980     case CMS_op_checkpointRootsInitial: {
  6295       SvcGCMarker sgcm(SvcGCMarker::OTHER);
  5981       SvcGCMarker sgcm(SvcGCMarker::OTHER);
  6296       checkpointRootsInitial(true);       // asynch
  5982       checkpointRootsInitial();
  6297       if (PrintGC) {
  5983       if (PrintGC) {
  6298         _cmsGen->printOccupancy("initial-mark");
  5984         _cmsGen->printOccupancy("initial-mark");
  6299       }
  5985       }
  6300       break;
  5986       break;
  6301     }
  5987     }
  6302     case CMS_op_checkpointRootsFinal: {
  5988     case CMS_op_checkpointRootsFinal: {
  6303       SvcGCMarker sgcm(SvcGCMarker::OTHER);
  5989       SvcGCMarker sgcm(SvcGCMarker::OTHER);
  6304       checkpointRootsFinal(true,    // asynch
  5990       checkpointRootsFinal();
  6305                            false,   // !clear_all_soft_refs
       
  6306                            false);  // !init_mark_was_synchronous
       
  6307       if (PrintGC) {
  5991       if (PrintGC) {
  6308         _cmsGen->printOccupancy("remark");
  5992         _cmsGen->printOccupancy("remark");
  6309       }
  5993       }
  6310       break;
  5994       break;
  6311     }
  5995     }
  7191 
  6875 
  7192 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
  6876 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
  7193                        CMSCollector* collector, MemRegion span,
  6877                        CMSCollector* collector, MemRegion span,
  7194                        CMSBitMap* bit_map,
  6878                        CMSBitMap* bit_map,
  7195                        OopTaskQueue* work_queue,
  6879                        OopTaskQueue* work_queue,
  7196                        CMSMarkStack*  overflow_stack,
  6880                        CMSMarkStack*  overflow_stack):
  7197                        bool should_yield):
       
  7198   _collector(collector),
  6881   _collector(collector),
  7199   _whole_span(collector->_span),
  6882   _whole_span(collector->_span),
  7200   _span(span),
  6883   _span(span),
  7201   _bit_map(bit_map),
  6884   _bit_map(bit_map),
  7202   _mut(&collector->_modUnionTable),
  6885   _mut(&collector->_modUnionTable),
  7203   _work_queue(work_queue),
  6886   _work_queue(work_queue),
  7204   _overflow_stack(overflow_stack),
  6887   _overflow_stack(overflow_stack),
  7205   _yield(should_yield),
       
  7206   _skip_bits(0),
  6888   _skip_bits(0),
  7207   _task(task)
  6889   _task(task)
  7208 {
  6890 {
  7209   assert(_work_queue->size() == 0, "work_queue should be empty");
  6891   assert(_work_queue->size() == 0, "work_queue should be empty");
  7210   _finger = span.start();
  6892   _finger = span.start();