hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 13336 e582172ff6ff
parent 13289 0f61d0bac1ca
child 13466 9fdf353d5f4c
equal deleted inserted replaced
13335:f2e823305677 13336:e582172ff6ff
  1147 
  1147 
  1148   ShouldNotReachHere();
  1148   ShouldNotReachHere();
  1149 }
  1149 }
  1150 
  1150 
  1151 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1151 class PostMCRemSetClearClosure: public HeapRegionClosure {
       
  1152   G1CollectedHeap* _g1h;
  1152   ModRefBarrierSet* _mr_bs;
  1153   ModRefBarrierSet* _mr_bs;
  1153 public:
  1154 public:
  1154   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
  1155   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
       
  1156     _g1h(g1h), _mr_bs(mr_bs) { }
  1155   bool doHeapRegion(HeapRegion* r) {
  1157   bool doHeapRegion(HeapRegion* r) {
  1156     r->reset_gc_time_stamp();
  1158     if (r->continuesHumongous()) {
  1157     if (r->continuesHumongous())
       
  1158       return false;
  1159       return false;
       
  1160     }
       
  1161     _g1h->reset_gc_time_stamps(r);
  1159     HeapRegionRemSet* hrrs = r->rem_set();
  1162     HeapRegionRemSet* hrrs = r->rem_set();
  1160     if (hrrs != NULL) hrrs->clear();
  1163     if (hrrs != NULL) hrrs->clear();
  1161     // You might think here that we could clear just the cards
  1164     // You might think here that we could clear just the cards
  1162     // corresponding to the used region.  But no: if we leave a dirty card
  1165     // corresponding to the used region.  But no: if we leave a dirty card
  1163     // in a region we might allocate into, then it would prevent that card
  1166     // in a region we might allocate into, then it would prevent that card
  1166     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1169     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1167     return false;
  1170     return false;
  1168   }
  1171   }
  1169 };
  1172 };
  1170 
  1173 
  1171 
  1174 void G1CollectedHeap::clear_rsets_post_compaction() {
  1172 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
  1175   PostMCRemSetClearClosure rs_clear(this, mr_bs());
  1173   ModRefBarrierSet* _mr_bs;
  1176   heap_region_iterate(&rs_clear);
  1174 public:
  1177 }
  1175   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
       
  1176   bool doHeapRegion(HeapRegion* r) {
       
  1177     if (r->continuesHumongous()) return false;
       
  1178     if (r->used_region().word_size() != 0) {
       
  1179       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
       
  1180     }
       
  1181     return false;
       
  1182   }
       
  1183 };
       
  1184 
  1178 
  1185 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1179 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1186   G1CollectedHeap*   _g1h;
  1180   G1CollectedHeap*   _g1h;
  1187   UpdateRSOopClosure _cl;
  1181   UpdateRSOopClosure _cl;
  1188   int                _worker_i;
  1182   int                _worker_i;
  1227     // We only generate output for non-empty regions.
  1221     // We only generate output for non-empty regions.
  1228     if (!hr->is_empty()) {
  1222     if (!hr->is_empty()) {
  1229       if (!hr->isHumongous()) {
  1223       if (!hr->isHumongous()) {
  1230         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1224         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1231       } else if (hr->startsHumongous()) {
  1225       } else if (hr->startsHumongous()) {
  1232         if (hr->capacity() == HeapRegion::GrainBytes) {
  1226         if (hr->region_num() == 1) {
  1233           // single humongous region
  1227           // single humongous region
  1234           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
  1228           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
  1235         } else {
  1229         } else {
  1236           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1230           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1237         }
  1231         }
  1244   }
  1238   }
  1245 
  1239 
  1246   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
  1240   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
  1247     : _hr_printer(hr_printer) { }
  1241     : _hr_printer(hr_printer) { }
  1248 };
  1242 };
       
  1243 
       
  1244 void G1CollectedHeap::print_hrs_post_compaction() {
       
  1245   PostCompactionPrinterClosure cl(hr_printer());
       
  1246   heap_region_iterate(&cl);
       
  1247 }
  1249 
  1248 
  1250 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1249 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1251                                     bool clear_all_soft_refs,
  1250                                     bool clear_all_soft_refs,
  1252                                     size_t word_size) {
  1251                                     size_t word_size) {
  1253   assert_at_safepoint(true /* should_be_vm_thread */);
  1252   assert_at_safepoint(true /* should_be_vm_thread */);
  1400 
  1399 
  1401     reset_gc_time_stamp();
  1400     reset_gc_time_stamp();
  1402     // Since everything potentially moved, we will clear all remembered
  1401     // Since everything potentially moved, we will clear all remembered
  1403     // sets, and clear all cards.  Later we will rebuild remebered
  1402     // sets, and clear all cards.  Later we will rebuild remebered
  1404     // sets. We will also reset the GC time stamps of the regions.
  1403     // sets. We will also reset the GC time stamps of the regions.
  1405     PostMCRemSetClearClosure rs_clear(mr_bs());
  1404     clear_rsets_post_compaction();
  1406     heap_region_iterate(&rs_clear);
  1405     check_gc_time_stamps();
  1407 
  1406 
  1408     // Resize the heap if necessary.
  1407     // Resize the heap if necessary.
  1409     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1408     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1410 
  1409 
  1411     if (_hr_printer.is_active()) {
  1410     if (_hr_printer.is_active()) {
  1412       // We should do this after we potentially resize the heap so
  1411       // We should do this after we potentially resize the heap so
  1413       // that all the COMMIT / UNCOMMIT events are generated before
  1412       // that all the COMMIT / UNCOMMIT events are generated before
  1414       // the end GC event.
  1413       // the end GC event.
  1415 
  1414 
  1416       PostCompactionPrinterClosure cl(hr_printer());
  1415       print_hrs_post_compaction();
  1417       heap_region_iterate(&cl);
       
  1418 
       
  1419       _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  1416       _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  1420     }
  1417     }
  1421 
  1418 
  1422     if (_cg1r->use_cache()) {
  1419     if (_cg1r->use_cache()) {
  1423       _cg1r->clear_and_record_card_counts();
  1420       _cg1r->clear_and_record_card_counts();
  2261 
  2258 
  2262 size_t G1CollectedHeap::capacity() const {
  2259 size_t G1CollectedHeap::capacity() const {
  2263   return _g1_committed.byte_size();
  2260   return _g1_committed.byte_size();
  2264 }
  2261 }
  2265 
  2262 
       
  2263 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
       
  2264   assert(!hr->continuesHumongous(), "pre-condition");
       
  2265   hr->reset_gc_time_stamp();
       
  2266   if (hr->startsHumongous()) {
       
  2267     uint first_index = hr->hrs_index() + 1;
       
  2268     uint last_index = hr->last_hc_index();
       
  2269     for (uint i = first_index; i < last_index; i += 1) {
       
  2270       HeapRegion* chr = region_at(i);
       
  2271       assert(chr->continuesHumongous(), "sanity");
       
  2272       chr->reset_gc_time_stamp();
       
  2273     }
       
  2274   }
       
  2275 }
       
  2276 
       
  2277 #ifndef PRODUCT
       
  2278 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
       
  2279 private:
       
  2280   unsigned _gc_time_stamp;
       
  2281   bool _failures;
       
  2282 
       
  2283 public:
       
  2284   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
       
  2285     _gc_time_stamp(gc_time_stamp), _failures(false) { }
       
  2286 
       
  2287   virtual bool doHeapRegion(HeapRegion* hr) {
       
  2288     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
       
  2289     if (_gc_time_stamp != region_gc_time_stamp) {
       
  2290       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
       
  2291                              "expected %d", HR_FORMAT_PARAMS(hr),
       
  2292                              region_gc_time_stamp, _gc_time_stamp);
       
  2293       _failures = true;
       
  2294     }
       
  2295     return false;
       
  2296   }
       
  2297 
       
  2298   bool failures() { return _failures; }
       
  2299 };
       
  2300 
       
  2301 void G1CollectedHeap::check_gc_time_stamps() {
       
  2302   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
       
  2303   heap_region_iterate(&cl);
       
  2304   guarantee(!cl.failures(), "all GC time stamps should have been reset");
       
  2305 }
       
  2306 #endif // PRODUCT
       
  2307 
  2266 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2308 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2267                                                  DirtyCardQueue* into_cset_dcq,
  2309                                                  DirtyCardQueue* into_cset_dcq,
  2268                                                  bool concurrent,
  2310                                                  bool concurrent,
  2269                                                  int worker_i) {
  2311                                                  int worker_i) {
  2270   // Clean cards in the hot card cache
  2312   // Clean cards in the hot card cache
  2528   OopClosure* _cl;
  2570   OopClosure* _cl;
  2529 public:
  2571 public:
  2530   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  2572   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  2531     : _mr(mr), _cl(cl) {}
  2573     : _mr(mr), _cl(cl) {}
  2532   bool doHeapRegion(HeapRegion* r) {
  2574   bool doHeapRegion(HeapRegion* r) {
  2533     if (! r->continuesHumongous()) {
  2575     if (!r->continuesHumongous()) {
  2534       r->oop_iterate(_cl);
  2576       r->oop_iterate(_cl);
  2535     }
  2577     }
  2536     return false;
  2578     return false;
  2537   }
  2579   }
  2538 };
  2580 };
  2599 
  2641 
  2600 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
  2642 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
  2601   _hrs.iterate(cl);
  2643   _hrs.iterate(cl);
  2602 }
  2644 }
  2603 
  2645 
  2604 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
       
  2605                                                HeapRegionClosure* cl) const {
       
  2606   _hrs.iterate_from(r, cl);
       
  2607 }
       
  2608 
       
  2609 void
  2646 void
  2610 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2647 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2611                                                  uint worker,
  2648                                                  uint worker_id,
  2612                                                  uint no_of_par_workers,
  2649                                                  uint no_of_par_workers,
  2613                                                  jint claim_value) {
  2650                                                  jint claim_value) {
  2614   const uint regions = n_regions();
  2651   const uint regions = n_regions();
  2615   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  2652   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  2616                              no_of_par_workers :
  2653                              no_of_par_workers :
  2617                              1);
  2654                              1);
  2618   assert(UseDynamicNumberOfGCThreads ||
  2655   assert(UseDynamicNumberOfGCThreads ||
  2619          no_of_par_workers == workers()->total_workers(),
  2656          no_of_par_workers == workers()->total_workers(),
  2620          "Non dynamic should use fixed number of workers");
  2657          "Non dynamic should use fixed number of workers");
  2621   // try to spread out the starting points of the workers
  2658   // try to spread out the starting points of the workers
  2622   const uint start_index = regions / max_workers * worker;
  2659   const HeapRegion* start_hr =
       
  2660                         start_region_for_worker(worker_id, no_of_par_workers);
       
  2661   const uint start_index = start_hr->hrs_index();
  2623 
  2662 
  2624   // each worker will actually look at all regions
  2663   // each worker will actually look at all regions
  2625   for (uint count = 0; count < regions; ++count) {
  2664   for (uint count = 0; count < regions; ++count) {
  2626     const uint index = (start_index + count) % regions;
  2665     const uint index = (start_index + count) % regions;
  2627     assert(0 <= index && index < regions, "sanity");
  2666     assert(0 <= index && index < regions, "sanity");
  2859   OrderAccess::storestore();
  2898   OrderAccess::storestore();
  2860   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
  2899   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
  2861   return result;
  2900   return result;
  2862 }
  2901 }
  2863 
  2902 
       
  2903 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
       
  2904                                                      uint no_of_par_workers) {
       
  2905   uint worker_num =
       
  2906            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
       
  2907   assert(UseDynamicNumberOfGCThreads ||
       
  2908          no_of_par_workers == workers()->total_workers(),
       
  2909          "Non dynamic should use fixed number of workers");
       
  2910   const uint start_index = n_regions() * worker_i / worker_num;
       
  2911   return region_at(start_index);
       
  2912 }
       
  2913 
  2864 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2914 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2865   HeapRegion* r = g1_policy()->collection_set();
  2915   HeapRegion* r = g1_policy()->collection_set();
  2866   while (r != NULL) {
  2916   while (r != NULL) {
  2867     HeapRegion* next = r->next_in_collection_set();
  2917     HeapRegion* next = r->next_in_collection_set();
  2868     if (cl->doHeapRegion(r)) {
  2918     if (cl->doHeapRegion(r)) {
  2970 void G1CollectedHeap::prepare_for_verify() {
  3020 void G1CollectedHeap::prepare_for_verify() {
  2971   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  3021   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2972     ensure_parsability(false);
  3022     ensure_parsability(false);
  2973   }
  3023   }
  2974   g1_rem_set()->prepare_for_verify();
  3024   g1_rem_set()->prepare_for_verify();
       
  3025 }
       
  3026 
       
  3027 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
       
  3028                                               VerifyOption vo) {
       
  3029   switch (vo) {
       
  3030   case VerifyOption_G1UsePrevMarking:
       
  3031     return hr->obj_allocated_since_prev_marking(obj);
       
  3032   case VerifyOption_G1UseNextMarking:
       
  3033     return hr->obj_allocated_since_next_marking(obj);
       
  3034   case VerifyOption_G1UseMarkWord:
       
  3035     return false;
       
  3036   default:
       
  3037     ShouldNotReachHere();
       
  3038   }
       
  3039   return false; // keep some compilers happy
       
  3040 }
       
  3041 
       
  3042 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
       
  3043   switch (vo) {
       
  3044   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
       
  3045   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
       
  3046   case VerifyOption_G1UseMarkWord:    return NULL;
       
  3047   default:                            ShouldNotReachHere();
       
  3048   }
       
  3049   return NULL; // keep some compilers happy
       
  3050 }
       
  3051 
       
  3052 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
       
  3053   switch (vo) {
       
  3054   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
       
  3055   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
       
  3056   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
       
  3057   default:                            ShouldNotReachHere();
       
  3058   }
       
  3059   return false; // keep some compilers happy
       
  3060 }
       
  3061 
       
  3062 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
       
  3063   switch (vo) {
       
  3064   case VerifyOption_G1UsePrevMarking: return "PTAMS";
       
  3065   case VerifyOption_G1UseNextMarking: return "NTAMS";
       
  3066   case VerifyOption_G1UseMarkWord:    return "NONE";
       
  3067   default:                            ShouldNotReachHere();
       
  3068   }
       
  3069   return NULL; // keep some compilers happy
  2975 }
  3070 }
  2976 
  3071 
  2977 class VerifyLivenessOopClosure: public OopClosure {
  3072 class VerifyLivenessOopClosure: public OopClosure {
  2978   G1CollectedHeap* _g1h;
  3073   G1CollectedHeap* _g1h;
  2979   VerifyOption _vo;
  3074   VerifyOption _vo;
  3059   }
  3154   }
  3060 };
  3155 };
  3061 
  3156 
  3062 class VerifyRegionClosure: public HeapRegionClosure {
  3157 class VerifyRegionClosure: public HeapRegionClosure {
  3063 private:
  3158 private:
  3064   bool         _par;
  3159   bool             _par;
  3065   VerifyOption _vo;
  3160   VerifyOption     _vo;
  3066   bool         _failures;
  3161   bool             _failures;
  3067 public:
  3162 public:
  3068   // _vo == UsePrevMarking -> use "prev" marking information,
  3163   // _vo == UsePrevMarking -> use "prev" marking information,
  3069   // _vo == UseNextMarking -> use "next" marking information,
  3164   // _vo == UseNextMarking -> use "next" marking information,
  3070   // _vo == UseMarkWord    -> use mark word from object header.
  3165   // _vo == UseMarkWord    -> use mark word from object header.
  3071   VerifyRegionClosure(bool par, VerifyOption vo)
  3166   VerifyRegionClosure(bool par, VerifyOption vo)
  3076   bool failures() {
  3171   bool failures() {
  3077     return _failures;
  3172     return _failures;
  3078   }
  3173   }
  3079 
  3174 
  3080   bool doHeapRegion(HeapRegion* r) {
  3175   bool doHeapRegion(HeapRegion* r) {
  3081     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
       
  3082               "Should be unclaimed at verify points.");
       
  3083     if (!r->continuesHumongous()) {
  3176     if (!r->continuesHumongous()) {
  3084       bool failures = false;
  3177       bool failures = false;
  3085       r->verify(_vo, &failures);
  3178       r->verify(_vo, &failures);
  3086       if (failures) {
  3179       if (failures) {
  3087         _failures = true;
  3180         _failures = true;
  5610 
  5703 
  5611   size_t hr_used = hr->used();
  5704   size_t hr_used = hr->used();
  5612   size_t hr_capacity = hr->capacity();
  5705   size_t hr_capacity = hr->capacity();
  5613   size_t hr_pre_used = 0;
  5706   size_t hr_pre_used = 0;
  5614   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  5707   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
       
  5708   // We need to read this before we make the region non-humongous,
       
  5709   // otherwise the information will be gone.
       
  5710   uint last_index = hr->last_hc_index();
  5615   hr->set_notHumongous();
  5711   hr->set_notHumongous();
  5616   free_region(hr, &hr_pre_used, free_list, par);
  5712   free_region(hr, &hr_pre_used, free_list, par);
  5617 
  5713 
  5618   uint i = hr->hrs_index() + 1;
  5714   uint i = hr->hrs_index() + 1;
  5619   uint num = 1;
  5715   while (i < last_index) {
  5620   while (i < n_regions()) {
       
  5621     HeapRegion* curr_hr = region_at(i);
  5716     HeapRegion* curr_hr = region_at(i);
  5622     if (!curr_hr->continuesHumongous()) {
  5717     assert(curr_hr->continuesHumongous(), "invariant");
  5623       break;
       
  5624     }
       
  5625     curr_hr->set_notHumongous();
  5718     curr_hr->set_notHumongous();
  5626     free_region(curr_hr, &hr_pre_used, free_list, par);
  5719     free_region(curr_hr, &hr_pre_used, free_list, par);
  5627     num += 1;
       
  5628     i += 1;
  5720     i += 1;
  5629   }
  5721   }
  5630   assert(hr_pre_used == hr_used,
  5722   assert(hr_pre_used == hr_used,
  5631          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  5723          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  5632                  "should be the same", hr_pre_used, hr_used));
  5724                  "should be the same", hr_pre_used, hr_used));
  5730   }
  5822   }
  5731 }
  5823 }
  5732 
  5824 
  5733 void G1CollectedHeap::verify_dirty_young_regions() {
  5825 void G1CollectedHeap::verify_dirty_young_regions() {
  5734   verify_dirty_young_list(_young_list->first_region());
  5826   verify_dirty_young_list(_young_list->first_region());
  5735   verify_dirty_young_list(_young_list->first_survivor_region());
       
  5736 }
  5827 }
  5737 #endif
  5828 #endif
  5738 
  5829 
  5739 void G1CollectedHeap::cleanUpCardTable() {
  5830 void G1CollectedHeap::cleanUpCardTable() {
  5740   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  5831   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());