src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54786 ebf733a324d4
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
   690 size_t
   690 size_t
   691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
   691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
   692   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
   692   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
   693 }
   693 }
   694 
   694 
       
   695 size_t ConcurrentMarkSweepGeneration::used_stable() const {
       
   696   return cmsSpace()->used_stable();
       
   697 }
       
   698 
   695 size_t ConcurrentMarkSweepGeneration::max_available() const {
   699 size_t ConcurrentMarkSweepGeneration::max_available() const {
   696   return free() + _virtual_space.uncommitted_size();
   700   return free() + _virtual_space.uncommitted_size();
   697 }
   701 }
   698 
   702 
   699 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   703 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
  1008 // size of an object that may not yet have been initialized.
  1012 // size of an object that may not yet have been initialized.
  1009 
  1013 
  1010 // Things to support parallel young-gen collection.
  1014 // Things to support parallel young-gen collection.
  1011 oop
  1015 oop
  1012 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
  1016 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
  1013                                            oop old, markOop m,
  1017                                            oop old, markWord m,
  1014                                            size_t word_sz) {
  1018                                            size_t word_sz) {
  1015 #ifndef PRODUCT
  1019 #ifndef PRODUCT
  1016   if (CMSHeap::heap()->promotion_should_fail()) {
  1020   if (CMSHeap::heap()->promotion_should_fail()) {
  1017     return NULL;
  1021     return NULL;
  1018   }
  1022   }
  1521 void CMSCollector::compute_new_size() {
  1525 void CMSCollector::compute_new_size() {
  1522   assert_locked_or_safepoint(Heap_lock);
  1526   assert_locked_or_safepoint(Heap_lock);
  1523   FreelistLocker z(this);
  1527   FreelistLocker z(this);
  1524   MetaspaceGC::compute_new_size();
  1528   MetaspaceGC::compute_new_size();
  1525   _cmsGen->compute_new_size_free_list();
  1529   _cmsGen->compute_new_size_free_list();
       
  1530   // recalculate CMS used space after CMS collection
       
  1531   _cmsGen->cmsSpace()->recalculate_used_stable();
  1526 }
  1532 }
  1527 
  1533 
  1528 // A work method used by the foreground collector to do
  1534 // A work method used by the foreground collector to do
  1529 // a mark-sweep-compact.
  1535 // a mark-sweep-compact.
  1530 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
  1536 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
  2049 
  2055 
  2050 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
  2056 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
  2051 
  2057 
  2052   _capacity_at_prologue = capacity();
  2058   _capacity_at_prologue = capacity();
  2053   _used_at_prologue = used();
  2059   _used_at_prologue = used();
       
  2060   _cmsSpace->recalculate_used_stable();
  2054 
  2061 
  2055   // We enable promotion tracking so that card-scanning can recognize
  2062   // We enable promotion tracking so that card-scanning can recognize
  2056   // which objects have been promoted during this GC and skip them.
  2063   // which objects have been promoted during this GC and skip them.
  2057   for (uint i = 0; i < ParallelGCThreads; i++) {
  2064   for (uint i = 0; i < ParallelGCThreads; i++) {
  2058     _par_gc_thread_states[i]->promo.startTrackingPromotions();
  2065     _par_gc_thread_states[i]->promo.startTrackingPromotions();
  2121   }
  2128   }
  2122   // reset _eden_chunk_array so sampling starts afresh
  2129   // reset _eden_chunk_array so sampling starts afresh
  2123   _eden_chunk_index = 0;
  2130   _eden_chunk_index = 0;
  2124 
  2131 
  2125   size_t cms_used   = _cmsGen->cmsSpace()->used();
  2132   size_t cms_used   = _cmsGen->cmsSpace()->used();
       
  2133   _cmsGen->cmsSpace()->recalculate_used_stable();
  2126 
  2134 
  2127   // update performance counters - this uses a special version of
  2135   // update performance counters - this uses a special version of
  2128   // update_counters() that allows the utilization to be passed as a
  2136   // update_counters() that allows the utilization to be passed as a
  2129   // parameter, avoiding multiple calls to used().
  2137   // parameter, avoiding multiple calls to used().
  2130   //
  2138   //
  2635   CMSSynchronousYieldRequest yr;
  2643   CMSSynchronousYieldRequest yr;
  2636   assert(!tlab, "Can't deal with TLAB allocation");
  2644   assert(!tlab, "Can't deal with TLAB allocation");
  2637   MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
  2645   MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
  2638   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
  2646   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
  2639   if (GCExpandToAllocateDelayMillis > 0) {
  2647   if (GCExpandToAllocateDelayMillis > 0) {
  2640     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  2648     os::naked_sleep(GCExpandToAllocateDelayMillis);
  2641   }
  2649   }
  2642   return have_lock_and_allocate(word_size, tlab);
  2650   return have_lock_and_allocate(word_size, tlab);
  2643 }
  2651 }
  2644 
  2652 
  2645 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
  2653 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
  2674     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
  2682     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
  2675     // Now go around the loop and try alloc again;
  2683     // Now go around the loop and try alloc again;
  2676     // A competing par_promote might beat us to the expansion space,
  2684     // A competing par_promote might beat us to the expansion space,
  2677     // so we may go around the loop again if promotion fails again.
  2685     // so we may go around the loop again if promotion fails again.
  2678     if (GCExpandToAllocateDelayMillis > 0) {
  2686     if (GCExpandToAllocateDelayMillis > 0) {
  2679       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  2687       os::naked_sleep(GCExpandToAllocateDelayMillis);
  2680     }
  2688     }
  2681   }
  2689   }
  2682 }
  2690 }
  2683 
  2691 
  2684 
  2692 
  2701     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
  2709     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
  2702     // Now go around the loop and try alloc again;
  2710     // Now go around the loop and try alloc again;
  2703     // A competing allocation might beat us to the expansion space,
  2711     // A competing allocation might beat us to the expansion space,
  2704     // so we may go around the loop again if allocation fails again.
  2712     // so we may go around the loop again if allocation fails again.
  2705     if (GCExpandToAllocateDelayMillis > 0) {
  2713     if (GCExpandToAllocateDelayMillis > 0) {
  2706       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  2714       os::naked_sleep(GCExpandToAllocateDelayMillis);
  2707     }
  2715     }
  2708   }
  2716   }
  2709 }
  2717 }
  2710 
  2718 
  2711 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
  2719 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
  2814     checkpointRootsInitialWork();
  2822     checkpointRootsInitialWork();
  2815     // enable ("weak") refs discovery
  2823     // enable ("weak") refs discovery
  2816     rp->enable_discovery();
  2824     rp->enable_discovery();
  2817     _collectorState = Marking;
  2825     _collectorState = Marking;
  2818   }
  2826   }
       
  2827 
       
  2828   _cmsGen->cmsSpace()->recalculate_used_stable();
  2819 }
  2829 }
  2820 
  2830 
  2821 void CMSCollector::checkpointRootsInitialWork() {
  2831 void CMSCollector::checkpointRootsInitialWork() {
  2822   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
  2832   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
  2823   assert(_collectorState == InitialMarking, "just checking");
  2833   assert(_collectorState == InitialMarking, "just checking");
  3532   //
  3542   //
  3533   // Tony 2006.06.29
  3543   // Tony 2006.06.29
  3534   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
  3544   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
  3535                    ConcurrentMarkSweepThread::should_yield() &&
  3545                    ConcurrentMarkSweepThread::should_yield() &&
  3536                    !CMSCollector::foregroundGCIsActive(); ++i) {
  3546                    !CMSCollector::foregroundGCIsActive(); ++i) {
  3537     os::sleep(Thread::current(), 1, false);
  3547     os::naked_short_sleep(1);
  3538   }
  3548   }
  3539 
  3549 
  3540   ConcurrentMarkSweepThread::synchronize(true);
  3550   ConcurrentMarkSweepThread::synchronize(true);
  3541   _bit_map_lock->lock_without_safepoint_check();
  3551   _bit_map_lock->lock_without_safepoint_check();
  3542   _collector->startTimer();
  3552   _collector->startTimer();
  4175     }
  4185     }
  4176     FreelistLocker x(this);
  4186     FreelistLocker x(this);
  4177     MutexLocker y(bitMapLock(),
  4187     MutexLocker y(bitMapLock(),
  4178                   Mutex::_no_safepoint_check_flag);
  4188                   Mutex::_no_safepoint_check_flag);
  4179     checkpointRootsFinalWork();
  4189     checkpointRootsFinalWork();
       
  4190     _cmsGen->cmsSpace()->recalculate_used_stable();
  4180   }
  4191   }
  4181   verify_work_stacks_empty();
  4192   verify_work_stacks_empty();
  4182   verify_overflow_empty();
  4193   verify_overflow_empty();
  4183 }
  4194 }
  4184 
  4195 
  4248   verify_overflow_empty();
  4259   verify_overflow_empty();
  4249 
  4260 
  4250   if (should_unload_classes()) {
  4261   if (should_unload_classes()) {
  4251     heap->prune_scavengable_nmethods();
  4262     heap->prune_scavengable_nmethods();
  4252   }
  4263   }
  4253   JvmtiExport::gc_epilogue();
       
  4254 
  4264 
  4255   // If we encountered any (marking stack / work queue) overflow
  4265   // If we encountered any (marking stack / work queue) overflow
  4256   // events during the current CMS cycle, take appropriate
  4266   // events during the current CMS cycle, take appropriate
  4257   // remedial measures, where possible, so as to try and avoid
  4267   // remedial measures, where possible, so as to try and avoid
  4258   // recurrence of that condition.
  4268   // recurrence of that condition.
  5335     // We need all the free list locks to make the abstract state
  5345     // We need all the free list locks to make the abstract state
  5336     // transition from Sweeping to Resetting. See detailed note
  5346     // transition from Sweeping to Resetting. See detailed note
  5337     // further below.
  5347     // further below.
  5338     {
  5348     {
  5339       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
  5349       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
       
  5350 
  5340       // Update heap occupancy information which is used as
  5351       // Update heap occupancy information which is used as
  5341       // input to soft ref clearing policy at the next gc.
  5352       // input to soft ref clearing policy at the next gc.
  5342       Universe::update_heap_info_at_gc();
  5353       Universe::update_heap_info_at_gc();
       
  5354 
       
  5355       // recalculate CMS used space after CMS collection
       
  5356       _cmsGen->cmsSpace()->recalculate_used_stable();
       
  5357 
  5343       _collectorState = Resizing;
  5358       _collectorState = Resizing;
  5344     }
  5359     }
  5345   }
  5360   }
  5346   verify_work_stacks_empty();
  5361   verify_work_stacks_empty();
  5347   verify_overflow_empty();
  5362   verify_overflow_empty();
  5426   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
  5441   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
  5427   if (!full && current_is_young) {
  5442   if (!full && current_is_young) {
  5428     // Gather statistics on the young generation collection.
  5443     // Gather statistics on the young generation collection.
  5429     collector()->stats().record_gc0_end(used());
  5444     collector()->stats().record_gc0_end(used());
  5430   }
  5445   }
       
  5446   _cmsSpace->recalculate_used_stable();
  5431 }
  5447 }
  5432 
  5448 
  5433 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
  5449 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
  5434   // We iterate over the space(s) underlying this generation,
  5450   // We iterate over the space(s) underlying this generation,
  5435   // checking the mark bit map to see if the bits corresponding
  5451   // checking the mark bit map to see if the bits corresponding
  5523 
  5539 
  5524         // See the comment in coordinator_yield()
  5540         // See the comment in coordinator_yield()
  5525         for (unsigned i = 0; i < CMSYieldSleepCount &&
  5541         for (unsigned i = 0; i < CMSYieldSleepCount &&
  5526                          ConcurrentMarkSweepThread::should_yield() &&
  5542                          ConcurrentMarkSweepThread::should_yield() &&
  5527                          !CMSCollector::foregroundGCIsActive(); ++i) {
  5543                          !CMSCollector::foregroundGCIsActive(); ++i) {
  5528           os::sleep(Thread::current(), 1, false);
  5544           os::naked_short_sleep(1);
  5529         }
  5545         }
  5530 
  5546 
  5531         ConcurrentMarkSweepThread::synchronize(true);
  5547         ConcurrentMarkSweepThread::synchronize(true);
  5532         bitMapLock()->lock_without_safepoint_check();
  5548         bitMapLock()->lock_without_safepoint_check();
  5533         startTimer();
  5549         startTimer();
  5977   for (unsigned i = 0;
  5993   for (unsigned i = 0;
  5978        i < CMSYieldSleepCount &&
  5994        i < CMSYieldSleepCount &&
  5979        ConcurrentMarkSweepThread::should_yield() &&
  5995        ConcurrentMarkSweepThread::should_yield() &&
  5980        !CMSCollector::foregroundGCIsActive();
  5996        !CMSCollector::foregroundGCIsActive();
  5981        ++i) {
  5997        ++i) {
  5982     os::sleep(Thread::current(), 1, false);
  5998     os::naked_short_sleep(1);
  5983   }
  5999   }
  5984 
  6000 
  5985   ConcurrentMarkSweepThread::synchronize(true);
  6001   ConcurrentMarkSweepThread::synchronize(true);
  5986   _freelistLock->lock_without_safepoint_check();
  6002   _freelistLock->lock_without_safepoint_check();
  5987   _bit_map->lock()->lock_without_safepoint_check();
  6003   _bit_map->lock()->lock_without_safepoint_check();
  6132 
  6148 
  6133   // See the comment in coordinator_yield()
  6149   // See the comment in coordinator_yield()
  6134   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6150   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6135                    ConcurrentMarkSweepThread::should_yield() &&
  6151                    ConcurrentMarkSweepThread::should_yield() &&
  6136                    !CMSCollector::foregroundGCIsActive(); ++i) {
  6152                    !CMSCollector::foregroundGCIsActive(); ++i) {
  6137     os::sleep(Thread::current(), 1, false);
  6153     os::naked_short_sleep(1);
  6138   }
  6154   }
  6139 
  6155 
  6140   ConcurrentMarkSweepThread::synchronize(true);
  6156   ConcurrentMarkSweepThread::synchronize(true);
  6141   _freelistLock->lock_without_safepoint_check();
  6157   _freelistLock->lock_without_safepoint_check();
  6142   _bitMap->lock()->lock_without_safepoint_check();
  6158   _bitMap->lock()->lock_without_safepoint_check();
  6199 
  6215 
  6200   // See the comment in coordinator_yield()
  6216   // See the comment in coordinator_yield()
  6201   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6217   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6202                        ConcurrentMarkSweepThread::should_yield() &&
  6218                        ConcurrentMarkSweepThread::should_yield() &&
  6203                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6219                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6204     os::sleep(Thread::current(), 1, false);
  6220     os::naked_short_sleep(1);
  6205   }
  6221   }
  6206 
  6222 
  6207   ConcurrentMarkSweepThread::synchronize(true);
  6223   ConcurrentMarkSweepThread::synchronize(true);
  6208   _bit_map->lock()->lock_without_safepoint_check();
  6224   _bit_map->lock()->lock_without_safepoint_check();
  6209   _collector->startTimer();
  6225   _collector->startTimer();
  6350 
  6366 
  6351   // See the comment in coordinator_yield()
  6367   // See the comment in coordinator_yield()
  6352   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6368   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6353                        ConcurrentMarkSweepThread::should_yield() &&
  6369                        ConcurrentMarkSweepThread::should_yield() &&
  6354                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6370                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6355     os::sleep(Thread::current(), 1, false);
  6371     os::naked_short_sleep(1);
  6356   }
  6372   }
  6357 
  6373 
  6358   ConcurrentMarkSweepThread::synchronize(true);
  6374   ConcurrentMarkSweepThread::synchronize(true);
  6359   _bitMap->lock()->lock_without_safepoint_check();
  6375   _bitMap->lock()->lock_without_safepoint_check();
  6360   _collector->startTimer();
  6376   _collector->startTimer();
  6964 
  6980 
  6965   // See the comment in coordinator_yield()
  6981   // See the comment in coordinator_yield()
  6966   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6982   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6967                        ConcurrentMarkSweepThread::should_yield() &&
  6983                        ConcurrentMarkSweepThread::should_yield() &&
  6968                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6984                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6969     os::sleep(Thread::current(), 1, false);
  6985     os::naked_short_sleep(1);
  6970   }
  6986   }
  6971 
  6987 
  6972   ConcurrentMarkSweepThread::synchronize(true);
  6988   ConcurrentMarkSweepThread::synchronize(true);
  6973   bml->lock();
  6989   bml->lock_without_safepoint_check();
  6974 
  6990 
  6975   _collector->startTimer();
  6991   _collector->startTimer();
  6976 }
  6992 }
  6977 
  6993 
  6978 bool CMSPrecleanRefsYieldClosure::should_return() {
  6994 bool CMSPrecleanRefsYieldClosure::should_return() {
  7529 
  7545 
  7530   // See the comment in coordinator_yield()
  7546   // See the comment in coordinator_yield()
  7531   for (unsigned i = 0; i < CMSYieldSleepCount &&
  7547   for (unsigned i = 0; i < CMSYieldSleepCount &&
  7532                        ConcurrentMarkSweepThread::should_yield() &&
  7548                        ConcurrentMarkSweepThread::should_yield() &&
  7533                        !CMSCollector::foregroundGCIsActive(); ++i) {
  7549                        !CMSCollector::foregroundGCIsActive(); ++i) {
  7534     os::sleep(Thread::current(), 1, false);
  7550     os::naked_short_sleep(1);
  7535   }
  7551   }
  7536 
  7552 
  7537   ConcurrentMarkSweepThread::synchronize(true);
  7553   ConcurrentMarkSweepThread::synchronize(true);
  7538   _freelistLock->lock_without_safepoint_check();
  7554   _freelistLock->lock_without_safepoint_check();
  7539   _bitMap->lock()->lock_without_safepoint_check();
  7555   _bitMap->lock()->lock_without_safepoint_check();
  7775 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
  7791 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
  7776   assert(stack->isEmpty(), "Expected precondition");
  7792   assert(stack->isEmpty(), "Expected precondition");
  7777   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
  7793   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
  7778   size_t i = num;
  7794   size_t i = num;
  7779   oop  cur = _overflow_list;
  7795   oop  cur = _overflow_list;
  7780   const markOop proto = markOopDesc::prototype();
  7796   const markWord proto = markWord::prototype();
  7781   NOT_PRODUCT(ssize_t n = 0;)
  7797   NOT_PRODUCT(ssize_t n = 0;)
  7782   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
  7798   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
  7783     next = oop(cur->mark_raw());
  7799     next = oop(cur->mark_raw().to_pointer());
  7784     cur->set_mark_raw(proto);   // until proven otherwise
  7800     cur->set_mark_raw(proto);   // until proven otherwise
  7785     assert(oopDesc::is_oop(cur), "Should be an oop");
  7801     assert(oopDesc::is_oop(cur), "Should be an oop");
  7786     bool res = stack->push(cur);
  7802     bool res = stack->push(cur);
  7787     assert(res, "Bit off more than can chew?");
  7803     assert(res, "Bit off more than can chew?");
  7788     NOT_PRODUCT(n++;)
  7804     NOT_PRODUCT(n++;)
  7827   if (_overflow_list == NULL) {
  7843   if (_overflow_list == NULL) {
  7828     return false;
  7844     return false;
  7829   }
  7845   }
  7830   // Grab the entire list; we'll put back a suffix
  7846   // Grab the entire list; we'll put back a suffix
  7831   oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
  7847   oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
  7832   Thread* tid = Thread::current();
       
  7833   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
  7848   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
  7834   // set to ParallelGCThreads.
  7849   // set to ParallelGCThreads.
  7835   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
  7850   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
  7836   size_t sleep_time_millis = MAX2((size_t)1, num/100);
  7851   size_t sleep_time_millis = MAX2((size_t)1, num/100);
  7837   // If the list is busy, we spin for a short while,
  7852   // If the list is busy, we spin for a short while,
  7838   // sleeping between attempts to get the list.
  7853   // sleeping between attempts to get the list.
  7839   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
  7854   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
  7840     os::sleep(tid, sleep_time_millis, false);
  7855     os::naked_sleep(sleep_time_millis);
  7841     if (_overflow_list == NULL) {
  7856     if (_overflow_list == NULL) {
  7842       // Nothing left to take
  7857       // Nothing left to take
  7843       return false;
  7858       return false;
  7844     } else if (_overflow_list != BUSY) {
  7859     } else if (_overflow_list != BUSY) {
  7845       // Try and grab the prefix
  7860       // Try and grab the prefix
  7862   }
  7877   }
  7863   assert(prefix != NULL && prefix != BUSY, "Error");
  7878   assert(prefix != NULL && prefix != BUSY, "Error");
  7864   size_t i = num;
  7879   size_t i = num;
  7865   oop cur = prefix;
  7880   oop cur = prefix;
  7866   // Walk down the first "num" objects, unless we reach the end.
  7881   // Walk down the first "num" objects, unless we reach the end.
  7867   for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
  7882   for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
  7868   if (cur->mark_raw() == NULL) {
  7883   if (cur->mark_raw().to_pointer() == NULL) {
  7869     // We have "num" or fewer elements in the list, so there
  7884     // We have "num" or fewer elements in the list, so there
  7870     // is nothing to return to the global list.
  7885     // is nothing to return to the global list.
  7871     // Write back the NULL in lieu of the BUSY we wrote
  7886     // Write back the NULL in lieu of the BUSY we wrote
  7872     // above, if it is still the same value.
  7887     // above, if it is still the same value.
  7873     if (_overflow_list == BUSY) {
  7888     if (_overflow_list == BUSY) {
  7874       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
  7889       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
  7875     }
  7890     }
  7876   } else {
  7891   } else {
  7877     // Chop off the suffix and return it to the global list.
  7892     // Chop off the suffix and return it to the global list.
  7878     assert(cur->mark_raw() != BUSY, "Error");
  7893     assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
  7879     oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
  7894     oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
  7880     cur->set_mark_raw(NULL);           // break off suffix
  7895     cur->set_mark_raw(markWord::from_pointer(NULL));     // break off suffix
  7881     // It's possible that the list is still in the empty(busy) state
  7896     // It's possible that the list is still in the empty(busy) state
  7882     // we left it in a short while ago; in that case we may be
  7897     // we left it in a short while ago; in that case we may be
  7883     // able to place back the suffix without incurring the cost
  7898     // able to place back the suffix without incurring the cost
  7884     // of a walk down the list.
  7899     // of a walk down the list.
  7885     oop observed_overflow_list = _overflow_list;
  7900     oop observed_overflow_list = _overflow_list;
  7895     }
  7910     }
  7896     if (!attached) {
  7911     if (!attached) {
  7897       // Too bad, someone else sneaked in (at least) an element; we'll need
  7912       // Too bad, someone else sneaked in (at least) an element; we'll need
  7898       // to do a splice. Find tail of suffix so we can prepend suffix to global
  7913       // to do a splice. Find tail of suffix so we can prepend suffix to global
  7899       // list.
  7914       // list.
  7900       for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
  7915       for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
  7901       oop suffix_tail = cur;
  7916       oop suffix_tail = cur;
  7902       assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
  7917       assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
  7903              "Tautology");
  7918              "Tautology");
  7904       observed_overflow_list = _overflow_list;
  7919       observed_overflow_list = _overflow_list;
  7905       do {
  7920       do {
  7906         cur_overflow_list = observed_overflow_list;
  7921         cur_overflow_list = observed_overflow_list;
  7907         if (cur_overflow_list != BUSY) {
  7922         if (cur_overflow_list != BUSY) {
  7908           // Do the splice ...
  7923           // Do the splice ...
  7909           suffix_tail->set_mark_raw(markOop(cur_overflow_list));
  7924           suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
  7910         } else { // cur_overflow_list == BUSY
  7925         } else { // cur_overflow_list == BUSY
  7911           suffix_tail->set_mark_raw(NULL);
  7926           suffix_tail->set_mark_raw(markWord::from_pointer(NULL));
  7912         }
  7927         }
  7913         // ... and try to place spliced list back on overflow_list ...
  7928         // ... and try to place spliced list back on overflow_list ...
  7914         observed_overflow_list =
  7929         observed_overflow_list =
  7915           Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
  7930           Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
  7916       } while (cur_overflow_list != observed_overflow_list);
  7931       } while (cur_overflow_list != observed_overflow_list);
  7918     }
  7933     }
  7919   }
  7934   }
  7920 
  7935 
  7921   // Push the prefix elements on work_q
  7936   // Push the prefix elements on work_q
  7922   assert(prefix != NULL, "control point invariant");
  7937   assert(prefix != NULL, "control point invariant");
  7923   const markOop proto = markOopDesc::prototype();
  7938   const markWord proto = markWord::prototype();
  7924   oop next;
  7939   oop next;
  7925   NOT_PRODUCT(ssize_t n = 0;)
  7940   NOT_PRODUCT(ssize_t n = 0;)
  7926   for (cur = prefix; cur != NULL; cur = next) {
  7941   for (cur = prefix; cur != NULL; cur = next) {
  7927     next = oop(cur->mark_raw());
  7942     next = oop(cur->mark_raw().to_pointer());
  7928     cur->set_mark_raw(proto);   // until proven otherwise
  7943     cur->set_mark_raw(proto);   // until proven otherwise
  7929     assert(oopDesc::is_oop(cur), "Should be an oop");
  7944     assert(oopDesc::is_oop(cur), "Should be an oop");
  7930     bool res = work_q->push(cur);
  7945     bool res = work_q->push(cur);
  7931     assert(res, "Bit off more than we can chew?");
  7946     assert(res, "Bit off more than we can chew?");
  7932     NOT_PRODUCT(n++;)
  7947     NOT_PRODUCT(n++;)
  7941 // Single-threaded
  7956 // Single-threaded
  7942 void CMSCollector::push_on_overflow_list(oop p) {
  7957 void CMSCollector::push_on_overflow_list(oop p) {
  7943   NOT_PRODUCT(_num_par_pushes++;)
  7958   NOT_PRODUCT(_num_par_pushes++;)
  7944   assert(oopDesc::is_oop(p), "Not an oop");
  7959   assert(oopDesc::is_oop(p), "Not an oop");
  7945   preserve_mark_if_necessary(p);
  7960   preserve_mark_if_necessary(p);
  7946   p->set_mark_raw((markOop)_overflow_list);
  7961   p->set_mark_raw(markWord::from_pointer(_overflow_list));
  7947   _overflow_list = p;
  7962   _overflow_list = p;
  7948 }
  7963 }
  7949 
  7964 
  7950 // Multi-threaded; use CAS to prepend to overflow list
  7965 // Multi-threaded; use CAS to prepend to overflow list
  7951 void CMSCollector::par_push_on_overflow_list(oop p) {
  7966 void CMSCollector::par_push_on_overflow_list(oop p) {
  7955   oop observed_overflow_list = _overflow_list;
  7970   oop observed_overflow_list = _overflow_list;
  7956   oop cur_overflow_list;
  7971   oop cur_overflow_list;
  7957   do {
  7972   do {
  7958     cur_overflow_list = observed_overflow_list;
  7973     cur_overflow_list = observed_overflow_list;
  7959     if (cur_overflow_list != BUSY) {
  7974     if (cur_overflow_list != BUSY) {
  7960       p->set_mark_raw(markOop(cur_overflow_list));
  7975       p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
  7961     } else {
  7976     } else {
  7962       p->set_mark_raw(NULL);
  7977       p->set_mark_raw(markWord::from_pointer(NULL));
  7963     }
  7978     }
  7964     observed_overflow_list =
  7979     observed_overflow_list =
  7965       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
  7980       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
  7966   } while (cur_overflow_list != observed_overflow_list);
  7981   } while (cur_overflow_list != observed_overflow_list);
  7967 }
  7982 }
  7979 // an indication of success or failure with the assumption that
  7994 // an indication of success or failure with the assumption that
  7980 // the caller may be able to recover from a failure; code in
  7995 // the caller may be able to recover from a failure; code in
  7981 // the VM can then be changed, incrementally, to deal with such
  7996 // the VM can then be changed, incrementally, to deal with such
  7982 // failures where possible, thus, incrementally hardening the VM
  7997 // failures where possible, thus, incrementally hardening the VM
  7983 // in such low resource situations.
  7998 // in such low resource situations.
  7984 void CMSCollector::preserve_mark_work(oop p, markOop m) {
  7999 void CMSCollector::preserve_mark_work(oop p, markWord m) {
  7985   _preserved_oop_stack.push(p);
  8000   _preserved_oop_stack.push(p);
  7986   _preserved_mark_stack.push(m);
  8001   _preserved_mark_stack.push(m);
  7987   assert(m == p->mark_raw(), "Mark word changed");
  8002   assert(m == p->mark_raw(), "Mark word changed");
  7988   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
  8003   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
  7989          "bijection");
  8004          "bijection");
  7990 }
  8005 }
  7991 
  8006 
  7992 // Single threaded
  8007 // Single threaded
  7993 void CMSCollector::preserve_mark_if_necessary(oop p) {
  8008 void CMSCollector::preserve_mark_if_necessary(oop p) {
  7994   markOop m = p->mark_raw();
  8009   markWord m = p->mark_raw();
  7995   if (m->must_be_preserved(p)) {
  8010   if (p->mark_must_be_preserved(m)) {
  7996     preserve_mark_work(p, m);
  8011     preserve_mark_work(p, m);
  7997   }
  8012   }
  7998 }
  8013 }
  7999 
  8014 
  8000 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
  8015 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
  8001   markOop m = p->mark_raw();
  8016   markWord m = p->mark_raw();
  8002   if (m->must_be_preserved(p)) {
  8017   if (p->mark_must_be_preserved(m)) {
  8003     MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  8018     MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  8004     // Even though we read the mark word without holding
  8019     // Even though we read the mark word without holding
  8005     // the lock, we are assured that it will not change
  8020     // the lock, we are assured that it will not change
  8006     // because we "own" this oop, so no other thread can
  8021     // because we "own" this oop, so no other thread can
  8007     // be trying to push it on the overflow list; see
  8022     // be trying to push it on the overflow list; see
  8037 
  8052 
  8038   while (!_preserved_oop_stack.is_empty()) {
  8053   while (!_preserved_oop_stack.is_empty()) {
  8039     oop p = _preserved_oop_stack.pop();
  8054     oop p = _preserved_oop_stack.pop();
  8040     assert(oopDesc::is_oop(p), "Should be an oop");
  8055     assert(oopDesc::is_oop(p), "Should be an oop");
  8041     assert(_span.contains(p), "oop should be in _span");
  8056     assert(_span.contains(p), "oop should be in _span");
  8042     assert(p->mark_raw() == markOopDesc::prototype(),
  8057     assert(p->mark_raw() == markWord::prototype(),
  8043            "Set when taken from overflow list");
  8058            "Set when taken from overflow list");
  8044     markOop m = _preserved_mark_stack.pop();
  8059     markWord m = _preserved_mark_stack.pop();
  8045     p->set_mark_raw(m);
  8060     p->set_mark_raw(m);
  8046   }
  8061   }
  8047   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
  8062   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
  8048          "stacks were cleared above");
  8063          "stacks were cleared above");
  8049 }
  8064 }