src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54983 81becad91321
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
    35 #include "gc/g1/g1OopClosures.inline.hpp"
    35 #include "gc/g1/g1OopClosures.inline.hpp"
    36 #include "gc/g1/g1Policy.hpp"
    36 #include "gc/g1/g1Policy.hpp"
    37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
    37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
    38 #include "gc/g1/g1StringDedup.hpp"
    38 #include "gc/g1/g1StringDedup.hpp"
    39 #include "gc/g1/g1ThreadLocalData.hpp"
    39 #include "gc/g1/g1ThreadLocalData.hpp"
       
    40 #include "gc/g1/g1Trace.hpp"
    40 #include "gc/g1/heapRegion.inline.hpp"
    41 #include "gc/g1/heapRegion.inline.hpp"
    41 #include "gc/g1/heapRegionRemSet.hpp"
    42 #include "gc/g1/heapRegionRemSet.hpp"
    42 #include "gc/g1/heapRegionSet.inline.hpp"
    43 #include "gc/g1/heapRegionSet.inline.hpp"
    43 #include "gc/shared/gcId.hpp"
    44 #include "gc/shared/gcId.hpp"
    44 #include "gc/shared/gcTimer.hpp"
    45 #include "gc/shared/gcTimer.hpp"
    45 #include "gc/shared/gcTrace.hpp"
       
    46 #include "gc/shared/gcTraceTime.inline.hpp"
    46 #include "gc/shared/gcTraceTime.inline.hpp"
    47 #include "gc/shared/gcVMOperations.hpp"
    47 #include "gc/shared/gcVMOperations.hpp"
    48 #include "gc/shared/genOopClosures.inline.hpp"
    48 #include "gc/shared/genOopClosures.inline.hpp"
    49 #include "gc/shared/referencePolicy.hpp"
    49 #include "gc/shared/referencePolicy.hpp"
    50 #include "gc/shared/strongRootsScope.hpp"
    50 #include "gc/shared/strongRootsScope.hpp"
   255   _hwm = 0;
   255   _hwm = 0;
   256   _chunk_list = NULL;
   256   _chunk_list = NULL;
   257   _free_list = NULL;
   257   _free_list = NULL;
   258 }
   258 }
   259 
   259 
   260 G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
   260 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
   261   _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
   261     _root_regions(NULL),
   262   _max_regions(max_regions),
   262     _max_regions(max_regions),
   263   _num_root_regions(0),
   263     _num_root_regions(0),
   264   _claimed_root_regions(0),
   264     _claimed_root_regions(0),
   265   _scan_in_progress(false),
   265     _scan_in_progress(false),
   266   _should_abort(false) { }
   266     _should_abort(false) {
   267 
   267   _root_regions = new MemRegion[_max_regions];
   268 G1CMRootRegions::~G1CMRootRegions() {
   268   if (_root_regions == NULL) {
   269   FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
   269     vm_exit_during_initialization("Could not allocate root MemRegion set.");
   270 }
   270   }
   271 
   271 }
   272 void G1CMRootRegions::reset() {
   272 
       
   273 G1CMRootMemRegions::~G1CMRootMemRegions() {
       
   274   delete[] _root_regions;
       
   275 }
       
   276 
       
   277 void G1CMRootMemRegions::reset() {
   273   _num_root_regions = 0;
   278   _num_root_regions = 0;
   274 }
   279 }
   275 
   280 
   276 void G1CMRootRegions::add(HeapRegion* hr) {
   281 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
   277   assert_at_safepoint();
   282   assert_at_safepoint();
   278   size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
   283   size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
   279   assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
   284   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
   280   _root_regions[idx] = hr;
   285   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
   281 }
   286          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
   282 
   287   _root_regions[idx].set_start(start);
   283 void G1CMRootRegions::prepare_for_scan() {
   288   _root_regions[idx].set_end(end);
       
   289 }
       
   290 
       
   291 void G1CMRootMemRegions::prepare_for_scan() {
   284   assert(!scan_in_progress(), "pre-condition");
   292   assert(!scan_in_progress(), "pre-condition");
   285 
   293 
   286   _scan_in_progress = _num_root_regions > 0;
   294   _scan_in_progress = _num_root_regions > 0;
   287 
   295 
   288   _claimed_root_regions = 0;
   296   _claimed_root_regions = 0;
   289   _should_abort = false;
   297   _should_abort = false;
   290 }
   298 }
   291 
   299 
   292 HeapRegion* G1CMRootRegions::claim_next() {
   300 const MemRegion* G1CMRootMemRegions::claim_next() {
   293   if (_should_abort) {
   301   if (_should_abort) {
   294     // If someone has set the should_abort flag, we return NULL to
   302     // If someone has set the should_abort flag, we return NULL to
   295     // force the caller to bail out of their loop.
   303     // force the caller to bail out of their loop.
   296     return NULL;
   304     return NULL;
   297   }
   305   }
   300     return NULL;
   308     return NULL;
   301   }
   309   }
   302 
   310 
   303   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
   311   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
   304   if (claimed_index < _num_root_regions) {
   312   if (claimed_index < _num_root_regions) {
   305     return _root_regions[claimed_index];
   313     return &_root_regions[claimed_index];
   306   }
   314   }
   307   return NULL;
   315   return NULL;
   308 }
   316 }
   309 
   317 
   310 uint G1CMRootRegions::num_root_regions() const {
   318 uint G1CMRootMemRegions::num_root_regions() const {
   311   return (uint)_num_root_regions;
   319   return (uint)_num_root_regions;
   312 }
   320 }
   313 
   321 
   314 void G1CMRootRegions::notify_scan_done() {
   322 void G1CMRootMemRegions::notify_scan_done() {
   315   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   323   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   316   _scan_in_progress = false;
   324   _scan_in_progress = false;
   317   RootRegionScan_lock->notify_all();
   325   RootRegionScan_lock->notify_all();
   318 }
   326 }
   319 
   327 
   320 void G1CMRootRegions::cancel_scan() {
   328 void G1CMRootMemRegions::cancel_scan() {
   321   notify_scan_done();
   329   notify_scan_done();
   322 }
   330 }
   323 
   331 
   324 void G1CMRootRegions::scan_finished() {
   332 void G1CMRootMemRegions::scan_finished() {
   325   assert(scan_in_progress(), "pre-condition");
   333   assert(scan_in_progress(), "pre-condition");
   326 
   334 
   327   if (!_should_abort) {
   335   if (!_should_abort) {
   328     assert(_claimed_root_regions >= num_root_regions(),
   336     assert(_claimed_root_regions >= num_root_regions(),
   329            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
   337            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
   331   }
   339   }
   332 
   340 
   333   notify_scan_done();
   341   notify_scan_done();
   334 }
   342 }
   335 
   343 
   336 bool G1CMRootRegions::wait_until_scan_finished() {
   344 bool G1CMRootMemRegions::wait_until_scan_finished() {
   337   if (!scan_in_progress()) {
   345   if (!scan_in_progress()) {
   338     return false;
   346     return false;
   339   }
   347   }
   340 
   348 
   341   {
   349   {
   873          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
   881          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
   874          _max_concurrent_workers, result);
   882          _max_concurrent_workers, result);
   875   return result;
   883   return result;
   876 }
   884 }
   877 
   885 
   878 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
   886 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
   879   assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
   887 #ifdef ASSERT
   880          "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
   888   HeapWord* last = region->last();
       
   889   HeapRegion* hr = _g1h->heap_region_containing(last);
       
   890   assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
       
   891          "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
       
   892   assert(hr->next_top_at_mark_start() == region->start(),
       
   893          "MemRegion start should be equal to nTAMS");
       
   894 #endif
       
   895 
   881   G1RootRegionScanClosure cl(_g1h, this, worker_id);
   896   G1RootRegionScanClosure cl(_g1h, this, worker_id);
   882 
   897 
   883   const uintx interval = PrefetchScanIntervalInBytes;
   898   const uintx interval = PrefetchScanIntervalInBytes;
   884   HeapWord* curr = hr->next_top_at_mark_start();
   899   HeapWord* curr = region->start();
   885   const HeapWord* end = hr->top();
   900   const HeapWord* end = region->end();
   886   while (curr < end) {
   901   while (curr < end) {
   887     Prefetch::read(curr, interval);
   902     Prefetch::read(curr, interval);
   888     oop obj = oop(curr);
   903     oop obj = oop(curr);
   889     int size = obj->oop_iterate_size(&cl);
   904     int size = obj->oop_iterate_size(&cl);
   890     assert(size == obj->size(), "sanity");
   905     assert(size == obj->size(), "sanity");
   900 
   915 
   901   void work(uint worker_id) {
   916   void work(uint worker_id) {
   902     assert(Thread::current()->is_ConcurrentGC_thread(),
   917     assert(Thread::current()->is_ConcurrentGC_thread(),
   903            "this should only be done by a conc GC thread");
   918            "this should only be done by a conc GC thread");
   904 
   919 
   905     G1CMRootRegions* root_regions = _cm->root_regions();
   920     G1CMRootMemRegions* root_regions = _cm->root_regions();
   906     HeapRegion* hr = root_regions->claim_next();
   921     const MemRegion* region = root_regions->claim_next();
   907     while (hr != NULL) {
   922     while (region != NULL) {
   908       _cm->scan_root_region(hr, worker_id);
   923       _cm->scan_root_region(region, worker_id);
   909       hr = root_regions->claim_next();
   924       region = root_regions->claim_next();
   910     }
   925     }
   911   }
   926   }
   912 };
   927 };
   913 
   928 
   914 void G1ConcurrentMark::scan_root_regions() {
   929 void G1ConcurrentMark::scan_root_regions() {
  2402   while (!has_aborted() &&
  2417   while (!has_aborted() &&
  2403          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
  2418          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
  2404     abort_marking_if_regular_check_fail();
  2419     abort_marking_if_regular_check_fail();
  2405   }
  2420   }
  2406 
  2421 
       
  2422   // Can't assert qset is empty here, even if not aborted.  If concurrent,
       
  2423   // some other thread might be adding to the queue.  If not concurrent,
       
  2424   // some other thread might have won the race for the last buffer, but
       
  2425   // has not yet decremented the count.
       
  2426 
  2407   _draining_satb_buffers = false;
  2427   _draining_satb_buffers = false;
  2408 
       
  2409   assert(has_aborted() ||
       
  2410          _cm->concurrent() ||
       
  2411          satb_mq_set.completed_buffers_num() == 0, "invariant");
       
  2412 
  2428 
  2413   // again, this was a potentially expensive operation, decrease the
  2429   // again, this was a potentially expensive operation, decrease the
  2414   // limits to get the regular clock call early
  2430   // limits to get the regular clock call early
  2415   decrease_limits();
  2431   decrease_limits();
  2416 }
  2432 }
  2569   // steal work from the other G1CMTasks. It only makes sense to
  2585   // steal work from the other G1CMTasks. It only makes sense to
  2570   // enable stealing when the termination protocol is enabled
  2586   // enable stealing when the termination protocol is enabled
  2571   // and do_marking_step() is not being called serially.
  2587   // and do_marking_step() is not being called serially.
  2572   bool do_stealing = do_termination && !is_serial;
  2588   bool do_stealing = do_termination && !is_serial;
  2573 
  2589 
  2574   double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
  2590   double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diff_ms);
  2575   _time_target_ms = time_target_ms - diff_prediction_ms;
  2591   _time_target_ms = time_target_ms - diff_prediction_ms;
  2576 
  2592 
  2577   // set up the variables that are used in the work-based scheme to
  2593   // set up the variables that are used in the work-based scheme to
  2578   // call the regular clock method
  2594   // call the regular clock method
  2579   _words_scanned = 0;
  2595   _words_scanned = 0;
  2811     if (_has_timed_out) {
  2827     if (_has_timed_out) {
  2812       double diff_ms = elapsed_time_ms - _time_target_ms;
  2828       double diff_ms = elapsed_time_ms - _time_target_ms;
  2813       // Keep statistics of how well we did with respect to hitting
  2829       // Keep statistics of how well we did with respect to hitting
  2814       // our target only if we actually timed out (if we aborted for
  2830       // our target only if we actually timed out (if we aborted for
  2815       // other reasons, then the results might get skewed).
  2831       // other reasons, then the results might get skewed).
  2816       _marking_step_diffs_ms.add(diff_ms);
  2832       _marking_step_diff_ms.add(diff_ms);
  2817     }
  2833     }
  2818 
  2834 
  2819     if (_cm->has_overflown()) {
  2835     if (_cm->has_overflown()) {
  2820       // This is the interesting one. We aborted because a global
  2836       // This is the interesting one. We aborted because a global
  2821       // overflow was raised. This means we have to restart the
  2837       // overflow was raised. This means we have to restart the
  2894   _draining_satb_buffers(false),
  2910   _draining_satb_buffers(false),
  2895   _step_times_ms(),
  2911   _step_times_ms(),
  2896   _elapsed_time_ms(0.0),
  2912   _elapsed_time_ms(0.0),
  2897   _termination_time_ms(0.0),
  2913   _termination_time_ms(0.0),
  2898   _termination_start_time_ms(0.0),
  2914   _termination_start_time_ms(0.0),
  2899   _marking_step_diffs_ms()
  2915   _marking_step_diff_ms()
  2900 {
  2916 {
  2901   guarantee(task_queue != NULL, "invariant");
  2917   guarantee(task_queue != NULL, "invariant");
  2902 
  2918 
  2903   _marking_step_diffs_ms.add(0.5);
  2919   _marking_step_diff_ms.add(0.5);
  2904 }
  2920 }
  2905 
  2921 
  2906 // These are formatting macros that are used below to ensure
  2922 // These are formatting macros that are used below to ensure
  2907 // consistent formatting. The *_H_* versions are used to format the
  2923 // consistent formatting. The *_H_* versions are used to format the
  2908 // header for a particular value and they should be kept consistent
  2924 // header for a particular value and they should be kept consistent