hotspot/src/share/vm/runtime/safepoint.cpp
changeset 37176 663bdc7d0b86
parent 37161 e881f320966e
child 37242 91e5f98fff6f
equal deleted inserted replaced
37175:ac6850d71f72 37176:663bdc7d0b86
    54 #include "runtime/sweeper.hpp"
    54 #include "runtime/sweeper.hpp"
    55 #include "runtime/synchronizer.hpp"
    55 #include "runtime/synchronizer.hpp"
    56 #include "runtime/thread.inline.hpp"
    56 #include "runtime/thread.inline.hpp"
    57 #include "runtime/timerTrace.hpp"
    57 #include "runtime/timerTrace.hpp"
    58 #include "services/runtimeService.hpp"
    58 #include "services/runtimeService.hpp"
       
    59 #include "trace/tracing.hpp"
       
    60 #include "trace/traceMacros.hpp"
    59 #include "utilities/events.hpp"
    61 #include "utilities/events.hpp"
    60 #include "utilities/macros.hpp"
    62 #include "utilities/macros.hpp"
    61 #if INCLUDE_ALL_GCS
    63 #if INCLUDE_ALL_GCS
    62 #include "gc/cms/concurrentMarkSweepThread.hpp"
    64 #include "gc/cms/concurrentMarkSweepThread.hpp"
    63 #include "gc/g1/suspendibleThreadSet.hpp"
    65 #include "gc/g1/suspendibleThreadSet.hpp"
    78 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
    80 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
    79 static bool timeout_error_printed = false;
    81 static bool timeout_error_printed = false;
    80 
    82 
    81 // Roll all threads forward to a safepoint and suspend them all
    83 // Roll all threads forward to a safepoint and suspend them all
    82 void SafepointSynchronize::begin() {
    84 void SafepointSynchronize::begin() {
    83 
    85   EventSafepointBegin begin_event;
    84   Thread* myThread = Thread::current();
    86   Thread* myThread = Thread::current();
    85   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
    87   assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
    86 
    88 
    87   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
    89   if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
    88     _safepoint_begin_time = os::javaTimeNanos();
    90     _safepoint_begin_time = os::javaTimeNanos();
   168   //  5. In VM or Transitioning between states
   170   //  5. In VM or Transitioning between states
   169   //     If a Java thread is currently running in the VM or transitioning
   171   //     If a Java thread is currently running in the VM or transitioning
   170   //     between states, the safepointing code will wait for the thread to
   172   //     between states, the safepointing code will wait for the thread to
   171   //     block itself when it attempts transitions to a new state.
   173   //     block itself when it attempts transitions to a new state.
   172   //
   174   //
   173   _state            = _synchronizing;
   175   {
   174   OrderAccess::fence();
   176     EventSafepointStateSync sync_event;
   175 
   177     int initial_running = 0;
   176   // Flush all thread states to memory
   178 
   177   if (!UseMembar) {
   179     _state            = _synchronizing;
   178     os::serialize_thread_states();
   180     OrderAccess::fence();
   179   }
   181 
   180 
   182     // Flush all thread states to memory
   181   // Make interpreter safepoint aware
   183     if (!UseMembar) {
   182   Interpreter::notice_safepoints();
   184       os::serialize_thread_states();
   183 
   185     }
   184   if (DeferPollingPageLoopCount < 0) {
   186 
   185     // Make polling safepoint aware
   187     // Make interpreter safepoint aware
   186     guarantee (PageArmed == 0, "invariant") ;
   188     Interpreter::notice_safepoints();
   187     PageArmed = 1 ;
   189 
   188     os::make_polling_page_unreadable();
   190     if (DeferPollingPageLoopCount < 0) {
   189   }
   191       // Make polling safepoint aware
   190 
   192       guarantee (PageArmed == 0, "invariant") ;
   191   // Consider using active_processor_count() ... but that call is expensive.
   193       PageArmed = 1 ;
   192   int ncpus = os::processor_count() ;
   194       os::make_polling_page_unreadable();
       
   195     }
       
   196 
       
   197     // Consider using active_processor_count() ... but that call is expensive.
       
   198     int ncpus = os::processor_count() ;
   193 
   199 
   194 #ifdef ASSERT
   200 #ifdef ASSERT
   195   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
   201     for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
   196     assert(cur->safepoint_state()->is_running(), "Illegal initial state");
   202       assert(cur->safepoint_state()->is_running(), "Illegal initial state");
   197     // Clear the visited flag to ensure that the critical counts are collected properly.
   203       // Clear the visited flag to ensure that the critical counts are collected properly.
   198     cur->set_visited_for_critical_count(false);
   204       cur->set_visited_for_critical_count(false);
   199   }
   205     }
   200 #endif // ASSERT
   206 #endif // ASSERT
   201 
   207 
   202   if (SafepointTimeout)
   208     if (SafepointTimeout)
   203     safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
   209       safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
   204 
   210 
   205   // Iterate through all threads until it have been determined how to stop them all at a safepoint
   211     // Iterate through all threads until it have been determined how to stop them all at a safepoint
   206   unsigned int iterations = 0;
   212     unsigned int iterations = 0;
   207   int steps = 0 ;
   213     int steps = 0 ;
   208   while(still_running > 0) {
   214     while(still_running > 0) {
   209     for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
   215       for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
   210       assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
   216         assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
   211       ThreadSafepointState *cur_state = cur->safepoint_state();
   217         ThreadSafepointState *cur_state = cur->safepoint_state();
   212       if (cur_state->is_running()) {
   218         if (cur_state->is_running()) {
   213         cur_state->examine_state_of_thread();
   219           cur_state->examine_state_of_thread();
   214         if (!cur_state->is_running()) {
   220           if (!cur_state->is_running()) {
   215            still_running--;
   221             still_running--;
   216            // consider adjusting steps downward:
   222             // consider adjusting steps downward:
   217            //   steps = 0
   223             //   steps = 0
   218            //   steps -= NNN
   224             //   steps -= NNN
   219            //   steps >>= 1
   225             //   steps >>= 1
   220            //   steps = MIN(steps, 2000-100)
   226             //   steps = MIN(steps, 2000-100)
   221            //   if (iterations != 0) steps -= NNN
   227             //   if (iterations != 0) steps -= NNN
   222         }
   228           }
   223         if (log_is_enabled(Trace, safepoint)) {
   229           if (log_is_enabled(Trace, safepoint)) {
   224           ResourceMark rm;
   230             ResourceMark rm;
   225           cur_state->print_on(LogHandle(safepoint)::trace_stream());
   231             cur_state->print_on(LogHandle(safepoint)::trace_stream());
       
   232           }
   226         }
   233         }
   227       }
   234       }
   228     }
   235 
   229 
   236       if (iterations == 0) {
   230     if (PrintSafepointStatistics && iterations == 0) {
   237         initial_running = still_running;
   231       begin_statistics(nof_threads, still_running);
   238         if (PrintSafepointStatistics) {
   232     }
   239           begin_statistics(nof_threads, still_running);
   233 
   240         }
   234     if (still_running > 0) {
       
   235       // Check for if it takes to long
       
   236       if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
       
   237         print_safepoint_timeout(_spinning_timeout);
       
   238       }
   241       }
   239 
   242 
   240       // Spin to avoid context switching.
   243       if (still_running > 0) {
   241       // There's a tension between allowing the mutators to run (and rendezvous)
   244         // Check for if it takes to long
   242       // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
   245         if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
   243       // a mutator might otherwise use profitably to reach a safepoint.  Excessive
   246           print_safepoint_timeout(_spinning_timeout);
   244       // spinning by the VM thread on a saturated system can increase rendezvous latency.
   247         }
   245       // Blocking or yielding incur their own penalties in the form of context switching
   248 
   246       // and the resultant loss of $ residency.
   249         // Spin to avoid context switching.
   247       //
   250         // There's a tension between allowing the mutators to run (and rendezvous)
   248       // Further complicating matters is that yield() does not work as naively expected
   251         // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
   249       // on many platforms -- yield() does not guarantee that any other ready threads
   252         // a mutator might otherwise use profitably to reach a safepoint.  Excessive
   250       // will run.   As such we revert to naked_short_sleep() after some number of iterations.
   253         // spinning by the VM thread on a saturated system can increase rendezvous latency.
   251       // nakes_short_sleep() is implemented as a short unconditional sleep.
   254         // Blocking or yielding incur their own penalties in the form of context switching
   252       // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
   255         // and the resultant loss of $ residency.
   253       // can actually increase the time it takes the VM thread to detect that a system-wide
   256         //
   254       // stop-the-world safepoint has been reached.  In a pathological scenario such as that
   257         // Further complicating matters is that yield() does not work as naively expected
   255       // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
   258         // on many platforms -- yield() does not guarantee that any other ready threads
   256       // In that case the mutators will be stalled waiting for the safepoint to complete and the
   259         // will run.   As such we revert to naked_short_sleep() after some number of iterations.
   257       // the VMthread will be sleeping, waiting for the mutators to rendezvous.  The VMthread
   260         // nakes_short_sleep() is implemented as a short unconditional sleep.
   258       // will eventually wake up and detect that all mutators are safe, at which point
   261         // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
   259       // we'll again make progress.
   262         // can actually increase the time it takes the VM thread to detect that a system-wide
   260       //
   263         // stop-the-world safepoint has been reached.  In a pathological scenario such as that
   261       // Beware too that that the VMThread typically runs at elevated priority.
   264         // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
   262       // Its default priority is higher than the default mutator priority.
   265         // In that case the mutators will be stalled waiting for the safepoint to complete and the
   263       // Obviously, this complicates spinning.
   266         // the VMthread will be sleeping, waiting for the mutators to rendezvous.  The VMthread
   264       //
   267         // will eventually wake up and detect that all mutators are safe, at which point
   265       // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
   268         // we'll again make progress.
   266       // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
   269         //
   267       //
   270         // Beware too that that the VMThread typically runs at elevated priority.
   268       // See the comments in synchronizer.cpp for additional remarks on spinning.
   271         // Its default priority is higher than the default mutator priority.
   269       //
   272         // Obviously, this complicates spinning.
   270       // In the future we might:
   273         //
   271       // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
   274         // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
   272       //    This is tricky as the path used by a thread exiting the JVM (say on
   275         // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
   273       //    on JNI call-out) simply stores into its state field.  The burden
   276         //
   274       //    is placed on the VM thread, which must poll (spin).
   277         // See the comments in synchronizer.cpp for additional remarks on spinning.
   275       // 2. Find something useful to do while spinning.  If the safepoint is GC-related
   278         //
   276       //    we might aggressively scan the stacks of threads that are already safe.
   279         // In the future we might:
   277       // 3. Use Solaris schedctl to examine the state of the still-running mutators.
   280         // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
   278       //    If all the mutators are ONPROC there's no reason to sleep or yield.
   281         //    This is tricky as the path used by a thread exiting the JVM (say on
   279       // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
   282         //    on JNI call-out) simply stores into its state field.  The burden
   280       // 5. Check system saturation.  If the system is not fully saturated then
   283         //    is placed on the VM thread, which must poll (spin).
   281       //    simply spin and avoid sleep/yield.
   284         // 2. Find something useful to do while spinning.  If the safepoint is GC-related
   282       // 6. As still-running mutators rendezvous they could unpark the sleeping
   285         //    we might aggressively scan the stacks of threads that are already safe.
   283       //    VMthread.  This works well for still-running mutators that become
   286         // 3. Use Solaris schedctl to examine the state of the still-running mutators.
   284       //    safe.  The VMthread must still poll for mutators that call-out.
   287         //    If all the mutators are ONPROC there's no reason to sleep or yield.
   285       // 7. Drive the policy on time-since-begin instead of iterations.
   288         // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
   286       // 8. Consider making the spin duration a function of the # of CPUs:
   289         // 5. Check system saturation.  If the system is not fully saturated then
   287       //    Spin = (((ncpus-1) * M) + K) + F(still_running)
   290         //    simply spin and avoid sleep/yield.
   288       //    Alternately, instead of counting iterations of the outer loop
   291         // 6. As still-running mutators rendezvous they could unpark the sleeping
   289       //    we could count the # of threads visited in the inner loop, above.
   292         //    VMthread.  This works well for still-running mutators that become
   290       // 9. On windows consider using the return value from SwitchThreadTo()
   293         //    safe.  The VMthread must still poll for mutators that call-out.
   291       //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
   294         // 7. Drive the policy on time-since-begin instead of iterations.
   292 
   295         // 8. Consider making the spin duration a function of the # of CPUs:
   293       if (int(iterations) == DeferPollingPageLoopCount) {
   296         //    Spin = (((ncpus-1) * M) + K) + F(still_running)
   294          guarantee (PageArmed == 0, "invariant") ;
   297         //    Alternately, instead of counting iterations of the outer loop
   295          PageArmed = 1 ;
   298         //    we could count the # of threads visited in the inner loop, above.
   296          os::make_polling_page_unreadable();
   299         // 9. On windows consider using the return value from SwitchThreadTo()
       
   300         //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
       
   301 
       
   302         if (int(iterations) == DeferPollingPageLoopCount) {
       
   303           guarantee (PageArmed == 0, "invariant") ;
       
   304           PageArmed = 1 ;
       
   305           os::make_polling_page_unreadable();
       
   306         }
       
   307 
       
   308         // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
       
   309         // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
       
   310         ++steps ;
       
   311         if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
       
   312           SpinPause() ;     // MP-Polite spin
       
   313         } else
       
   314           if (steps < DeferThrSuspendLoopCount) {
       
   315             os::naked_yield() ;
       
   316           } else {
       
   317             os::naked_short_sleep(1);
       
   318           }
       
   319 
       
   320         iterations ++ ;
   297       }
   321       }
   298 
   322       assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
   299       // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
   323     }
   300       // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
   324     assert(still_running == 0, "sanity check");
   301       ++steps ;
   325 
   302       if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
   326     if (PrintSafepointStatistics) {
   303         SpinPause() ;     // MP-Polite spin
   327       update_statistics_on_spin_end();
   304       } else
   328     }
   305       if (steps < DeferThrSuspendLoopCount) {
   329 
   306         os::naked_yield() ;
   330     if (sync_event.should_commit()) {
       
   331       sync_event.set_safepointId(safepoint_counter());
       
   332       sync_event.set_initialThreadCount(initial_running);
       
   333       sync_event.set_runningThreadCount(_waiting_to_block);
       
   334       sync_event.set_iterations(iterations);
       
   335       sync_event.commit();
       
   336     }
       
   337   } //EventSafepointStateSync
       
   338 
       
   339   // wait until all threads are stopped
       
   340   {
       
   341     EventSafepointWaitBlocked wait_blocked_event;
       
   342     int initial_waiting_to_block = _waiting_to_block;
       
   343 
       
   344     while (_waiting_to_block > 0) {
       
   345       log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block);
       
   346       if (!SafepointTimeout || timeout_error_printed) {
       
   347         Safepoint_lock->wait(true);  // true, means with no safepoint checks
   307       } else {
   348       } else {
   308         os::naked_short_sleep(1);
   349         // Compute remaining time
       
   350         jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
       
   351 
       
   352         // If there is no remaining time, then there is an error
       
   353         if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
       
   354           print_safepoint_timeout(_blocking_timeout);
       
   355         }
   309       }
   356       }
   310 
   357     }
   311       iterations ++ ;
   358     assert(_waiting_to_block == 0, "sanity check");
   312     }
   359 
   313     assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
   360 #ifndef PRODUCT
   314   }
   361     if (SafepointTimeout) {
   315   assert(still_running == 0, "sanity check");
   362       jlong current_time = os::javaTimeNanos();
   316 
   363       if (safepoint_limit_time < current_time) {
   317   if (PrintSafepointStatistics) {
   364         tty->print_cr("# SafepointSynchronize: Finished after "
   318     update_statistics_on_spin_end();
   365                       INT64_FORMAT_W(6) " ms",
   319   }
   366                       ((current_time - safepoint_limit_time) / MICROUNITS +
   320 
   367                        (jlong)SafepointTimeoutDelay));
   321   // wait until all threads are stopped
       
   322   while (_waiting_to_block > 0) {
       
   323     log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block);
       
   324     if (!SafepointTimeout || timeout_error_printed) {
       
   325       Safepoint_lock->wait(true);  // true, means with no safepoint checks
       
   326     } else {
       
   327       // Compute remaining time
       
   328       jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
       
   329 
       
   330       // If there is no remaining time, then there is an error
       
   331       if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
       
   332         print_safepoint_timeout(_blocking_timeout);
       
   333       }
   368       }
   334     }
   369     }
   335   }
       
   336   assert(_waiting_to_block == 0, "sanity check");
       
   337 
       
   338 #ifndef PRODUCT
       
   339   if (SafepointTimeout) {
       
   340     jlong current_time = os::javaTimeNanos();
       
   341     if (safepoint_limit_time < current_time) {
       
   342       tty->print_cr("# SafepointSynchronize: Finished after "
       
   343                     INT64_FORMAT_W(6) " ms",
       
   344                     ((current_time - safepoint_limit_time) / MICROUNITS +
       
   345                      (jlong)SafepointTimeoutDelay));
       
   346     }
       
   347   }
       
   348 #endif
   370 #endif
   349 
   371 
   350   assert((_safepoint_counter & 0x1) == 0, "must be even");
   372     assert((_safepoint_counter & 0x1) == 0, "must be even");
   351   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
   373     assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
   352   _safepoint_counter ++;
   374     _safepoint_counter ++;
   353 
   375 
   354   // Record state
   376     // Record state
   355   _state = _synchronized;
   377     _state = _synchronized;
   356 
   378 
   357   OrderAccess::fence();
   379     OrderAccess::fence();
       
   380 
       
   381     if (wait_blocked_event.should_commit()) {
       
   382       wait_blocked_event.set_safepointId(safepoint_counter());
       
   383       wait_blocked_event.set_runningThreadCount(initial_waiting_to_block);
       
   384       wait_blocked_event.commit();
       
   385     }
       
   386   } // EventSafepointWaitBlocked
   358 
   387 
   359 #ifdef ASSERT
   388 #ifdef ASSERT
   360   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
   389   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
   361     // make sure all the threads were visited
   390     // make sure all the threads were visited
   362     assert(cur->was_visited_for_critical_count(), "missed a thread");
   391     assert(cur->was_visited_for_critical_count(), "missed a thread");
   376   if (PrintSafepointStatistics) {
   405   if (PrintSafepointStatistics) {
   377     update_statistics_on_sync_end(os::javaTimeNanos());
   406     update_statistics_on_sync_end(os::javaTimeNanos());
   378   }
   407   }
   379 
   408 
   380   // Call stuff that needs to be run when a safepoint is just about to be completed
   409   // Call stuff that needs to be run when a safepoint is just about to be completed
   381   do_cleanup_tasks();
   410   {
       
   411     EventSafepointCleanup cleanup_event;
       
   412     do_cleanup_tasks();
       
   413     if (cleanup_event.should_commit()) {
       
   414       cleanup_event.set_safepointId(safepoint_counter());
       
   415       cleanup_event.commit();
       
   416     }
       
   417   }
   382 
   418 
   383   if (PrintSafepointStatistics) {
   419   if (PrintSafepointStatistics) {
   384     // Record how much time spend on the above cleanup tasks
   420     // Record how much time spend on the above cleanup tasks
   385     update_statistics_on_cleanup_end(os::javaTimeNanos());
   421     update_statistics_on_cleanup_end(os::javaTimeNanos());
   386   }
   422   }
       
   423   if (begin_event.should_commit()) {
       
   424     begin_event.set_safepointId(safepoint_counter());
       
   425     begin_event.set_totalThreadCount(nof_threads);
       
   426     begin_event.set_jniCriticalThreadCount(_current_jni_active_count);
       
   427     begin_event.commit();
       
   428   }
   387 }
   429 }
   388 
   430 
   389 // Wake up all threads, so they are ready to resume execution after the safepoint
   431 // Wake up all threads, so they are ready to resume execution after the safepoint
   390 // operation has been carried out
   432 // operation has been carried out
   391 void SafepointSynchronize::end() {
   433 void SafepointSynchronize::end() {
       
   434   EventSafepointEnd event;
       
   435   int safepoint_id = safepoint_counter(); // Keep the odd counter as "id"
   392 
   436 
   393   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
   437   assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
   394   assert((_safepoint_counter & 0x1) == 1, "must be odd");
   438   assert((_safepoint_counter & 0x1) == 1, "must be odd");
   395   _safepoint_counter ++;
   439   _safepoint_counter ++;
   396   // memory fence isn't required here since an odd _safepoint_counter
   440   // memory fence isn't required here since an odd _safepoint_counter
   473   }
   517   }
   474 #endif // INCLUDE_ALL_GCS
   518 #endif // INCLUDE_ALL_GCS
   475   // record this time so VMThread can keep track how much time has elapsed
   519   // record this time so VMThread can keep track how much time has elapsed
   476   // since last safepoint.
   520   // since last safepoint.
   477   _end_of_last_safepoint = os::javaTimeMillis();
   521   _end_of_last_safepoint = os::javaTimeMillis();
       
   522 
       
   523   if (event.should_commit()) {
       
   524     event.set_safepointId(safepoint_id);
       
   525     event.commit();
       
   526   }
   478 }
   527 }
   479 
   528 
   480 bool SafepointSynchronize::is_cleanup_needed() {
   529 bool SafepointSynchronize::is_cleanup_needed() {
   481   // Need a safepoint if some inline cache buffers is non-empty
   530   // Need a safepoint if some inline cache buffers is non-empty
   482   if (!InlineCacheBuffer::is_empty()) return true;
   531   if (!InlineCacheBuffer::is_empty()) return true;
   483   return false;
   532   return false;
   484 }
   533 }
   485 
   534 
   486 
   535 static void event_safepoint_cleanup_task_commit(EventSafepointCleanupTask& event, const char* name) {
       
   536   if (event.should_commit()) {
       
   537     event.set_safepointId(SafepointSynchronize::safepoint_counter());
       
   538     event.set_name(name);
       
   539     event.commit();
       
   540   }
       
   541 }
   487 
   542 
   488 // Various cleaning tasks that should be done periodically at safepoints
   543 // Various cleaning tasks that should be done periodically at safepoints
   489 void SafepointSynchronize::do_cleanup_tasks() {
   544 void SafepointSynchronize::do_cleanup_tasks() {
   490   {
   545   {
   491     TraceTime timer("deflating idle monitors", TRACETIME_LOG(Info, safepointcleanup));
   546     const char* name = "deflating idle monitors";
       
   547     EventSafepointCleanupTask event;
       
   548     TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup));
   492     ObjectSynchronizer::deflate_idle_monitors();
   549     ObjectSynchronizer::deflate_idle_monitors();
       
   550     event_safepoint_cleanup_task_commit(event, name);
   493   }
   551   }
   494 
   552 
   495   {
   553   {
   496     TraceTime timer("updating inline caches", TRACETIME_LOG(Info, safepointcleanup));
   554     const char* name = "updating inline caches";
       
   555     EventSafepointCleanupTask event;
       
   556     TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup));
   497     InlineCacheBuffer::update_inline_caches();
   557     InlineCacheBuffer::update_inline_caches();
       
   558     event_safepoint_cleanup_task_commit(event, name);
   498   }
   559   }
   499   {
   560   {
       
   561     const char* name = "compilation policy safepoint handler";
       
   562     EventSafepointCleanupTask event;
   500     TraceTime timer("compilation policy safepoint handler", TRACETIME_LOG(Info, safepointcleanup));
   563     TraceTime timer("compilation policy safepoint handler", TRACETIME_LOG(Info, safepointcleanup));
   501     CompilationPolicy::policy()->do_safepoint_work();
   564     CompilationPolicy::policy()->do_safepoint_work();
       
   565     event_safepoint_cleanup_task_commit(event, name);
   502   }
   566   }
   503 
   567 
   504   {
   568   {
   505     TraceTime timer("mark nmethods", TRACETIME_LOG(Info, safepointcleanup));
   569     const char* name = "mark nmethods";
       
   570     EventSafepointCleanupTask event;
       
   571     TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup));
   506     NMethodSweeper::mark_active_nmethods();
   572     NMethodSweeper::mark_active_nmethods();
       
   573     event_safepoint_cleanup_task_commit(event, name);
   507   }
   574   }
   508 
   575 
   509   if (SymbolTable::needs_rehashing()) {
   576   if (SymbolTable::needs_rehashing()) {
   510     TraceTime timer("rehashing symbol table", TRACETIME_LOG(Info, safepointcleanup));
   577     const char* name = "rehashing symbol table";
       
   578     EventSafepointCleanupTask event;
       
   579     TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup));
   511     SymbolTable::rehash_table();
   580     SymbolTable::rehash_table();
       
   581     event_safepoint_cleanup_task_commit(event, name);
   512   }
   582   }
   513 
   583 
   514   if (StringTable::needs_rehashing()) {
   584   if (StringTable::needs_rehashing()) {
   515     TraceTime timer("rehashing string table", TRACETIME_LOG(Info, safepointcleanup));
   585     const char* name = "rehashing string table";
       
   586     EventSafepointCleanupTask event;
       
   587     TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup));
   516     StringTable::rehash_table();
   588     StringTable::rehash_table();
       
   589     event_safepoint_cleanup_task_commit(event, name);
   517   }
   590   }
   518 
   591 
   519   {
   592   {
   520     // CMS delays purging the CLDG until the beginning of the next safepoint and to
   593     // CMS delays purging the CLDG until the beginning of the next safepoint and to
   521     // make sure concurrent sweep is done
   594     // make sure concurrent sweep is done
   522     TraceTime timer("purging class loader data graph", TRACETIME_LOG(Info, safepointcleanup));
   595     const char* name = "purging class loader data graph";
       
   596     EventSafepointCleanupTask event;
       
   597     TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup));
   523     ClassLoaderDataGraph::purge_if_needed();
   598     ClassLoaderDataGraph::purge_if_needed();
       
   599     event_safepoint_cleanup_task_commit(event, name);
   524   }
   600   }
   525 }
   601 }
   526 
   602 
   527 
   603 
   528 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
   604 bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {