hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp
changeset 33608 7afc768e4d62
parent 33152 6ad7fe735042
child 35061 be6025ebffea
equal deleted inserted replaced
33607:2db29ded3865 33608:7afc768e4d62
    90     gclog_or_tty->vprint_cr(fmt, args);
    90     gclog_or_tty->vprint_cr(fmt, args);
    91     va_end(args);
    91     va_end(args);
    92   }
    92   }
    93 }
    93 }
    94 
    94 
       
    95 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
       
    96 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
       
    97   if (g1_policy->adaptive_young_list_length()) {
       
    98     double now = os::elapsedTime();
       
    99     double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
       
   100                                   : g1_policy->predict_cleanup_time_ms();
       
   101     G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
       
   102     jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
       
   103     os::sleep(this, sleep_time_ms, false);
       
   104   }
       
   105 }
    95 void ConcurrentMarkThread::run() {
   106 void ConcurrentMarkThread::run() {
    96   initialize_in_thread();
   107   initialize_in_thread();
       
   108   wait_for_universe_init();
       
   109 
       
   110   run_service();
       
   111 
       
   112   terminate();
       
   113 }
       
   114 
       
   115 void ConcurrentMarkThread::run_service() {
    97   _vtime_start = os::elapsedVTime();
   116   _vtime_start = os::elapsedVTime();
    98   wait_for_universe_init();
       
    99 
   117 
   100   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   118   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   101   G1CollectorPolicy* g1_policy = g1h->g1_policy();
   119   G1CollectorPolicy* g1_policy = g1h->g1_policy();
   102   G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
       
   103   Thread *current_thread = Thread::current();
       
   104 
   120 
   105   while (!_should_terminate) {
   121   while (!_should_terminate) {
   106     // wait until started is set.
   122     // wait until started is set.
   107     sleepBeforeNextCycle();
   123     sleepBeforeNextCycle();
   108     if (_should_terminate) {
   124     if (_should_terminate) {
   139 
   155 
   140         double mark_end_time = os::elapsedVTime();
   156         double mark_end_time = os::elapsedVTime();
   141         double mark_end_sec = os::elapsedTime();
   157         double mark_end_sec = os::elapsedTime();
   142         _vtime_mark_accum += (mark_end_time - cycle_start);
   158         _vtime_mark_accum += (mark_end_time - cycle_start);
   143         if (!cm()->has_aborted()) {
   159         if (!cm()->has_aborted()) {
   144           if (g1_policy->adaptive_young_list_length()) {
   160           delay_to_keep_mmu(g1_policy, true /* remark */);
   145             double now = os::elapsedTime();
       
   146             double remark_prediction_ms = g1_policy->predict_remark_time_ms();
       
   147             jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms);
       
   148             os::sleep(current_thread, sleep_time_ms, false);
       
   149           }
       
   150 
   161 
   151           cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
   162           cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
   152 
   163 
   153           CMCheckpointRootsFinalClosure final_cl(_cm);
   164           CMCheckpointRootsFinalClosure final_cl(_cm);
   154           VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
   165           VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
   165       // to measure it to get the vtime for this marking.  We purposely
   176       // to measure it to get the vtime for this marking.  We purposely
   166       // neglect the presumably-short "completeCleanup" phase here.
   177       // neglect the presumably-short "completeCleanup" phase here.
   167       _vtime_accum = (end_time - _vtime_start);
   178       _vtime_accum = (end_time - _vtime_start);
   168 
   179 
   169       if (!cm()->has_aborted()) {
   180       if (!cm()->has_aborted()) {
   170         if (g1_policy->adaptive_young_list_length()) {
   181         delay_to_keep_mmu(g1_policy, false /* cleanup */);
   171           double now = os::elapsedTime();
       
   172           double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms();
       
   173           jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms);
       
   174           os::sleep(current_thread, sleep_time_ms, false);
       
   175         }
       
   176 
   182 
   177         CMCleanUp cl_cl(_cm);
   183         CMCleanUp cl_cl(_cm);
   178         VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
   184         VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
   179         VMThread::execute(&op);
   185         VMThread::execute(&op);
   180       } else {
   186       } else {
   270       SuspendibleThreadSetJoiner sts_join;
   276       SuspendibleThreadSetJoiner sts_join;
   271       g1h->increment_old_marking_cycles_completed(true /* concurrent */);
   277       g1h->increment_old_marking_cycles_completed(true /* concurrent */);
   272       g1h->register_concurrent_cycle_end();
   278       g1h->register_concurrent_cycle_end();
   273     }
   279     }
   274   }
   280   }
   275   assert(_should_terminate, "just checking");
       
   276 
       
   277   terminate();
       
   278 }
   281 }
   279 
   282 
   280 void ConcurrentMarkThread::stop() {
   283 void ConcurrentMarkThread::stop() {
   281   {
   284   {
   282     MutexLockerEx ml(Terminator_lock);
   285     MutexLockerEx ml(Terminator_lock);
   283     _should_terminate = true;
   286     _should_terminate = true;
   284   }
   287   }
   285 
   288 
   286   {
   289   stop_service();
   287     MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
       
   288     CGC_lock->notify_all();
       
   289   }
       
   290 
   290 
   291   {
   291   {
   292     MutexLockerEx ml(Terminator_lock);
   292     MutexLockerEx ml(Terminator_lock);
   293     while (!_has_terminated) {
   293     while (!_has_terminated) {
   294       Terminator_lock->wait();
   294       Terminator_lock->wait();
   295     }
   295     }
   296   }
   296   }
       
   297 }
       
   298 
       
   299 void ConcurrentMarkThread::stop_service() {
       
   300   MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
       
   301   CGC_lock->notify_all();
   297 }
   302 }
   298 
   303 
   299 void ConcurrentMarkThread::sleepBeforeNextCycle() {
   304 void ConcurrentMarkThread::sleepBeforeNextCycle() {
   300   // We join here because we don't want to do the "shouldConcurrentMark()"
   305   // We join here because we don't want to do the "shouldConcurrentMark()"
   301   // below while the world is otherwise stopped.
   306   // below while the world is otherwise stopped.