hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
changeset 38183 cb68e4923223
parent 38172 90f405aac699
child 38185 c432f8466c73
equal deleted inserted replaced
38173:73d05e56ec86 38183:cb68e4923223
  1353       JavaThread::dirty_card_queue_set().abandon_logs();
  1353       JavaThread::dirty_card_queue_set().abandon_logs();
  1354       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
  1354       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
  1355 
  1355 
  1356       // At this point there should be no regions in the
  1356       // At this point there should be no regions in the
  1357       // entire heap tagged as young.
  1357       // entire heap tagged as young.
  1358       assert(check_young_list_empty(true /* check_heap */),
  1358       assert(check_young_list_empty(), "young list should be empty at this point");
  1359              "young list should be empty at this point");
       
  1360 
  1359 
  1361       // Update the number of full collections that have been completed.
  1360       // Update the number of full collections that have been completed.
  1362       increment_old_marking_cycles_completed(false /* concurrent */);
  1361       increment_old_marking_cycles_completed(false /* concurrent */);
  1363 
  1362 
  1364       _hrm.verify_optional();
  1363       _hrm.verify_optional();
  1715   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
  1714   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
  1716   _humongous_reclaim_candidates(),
  1715   _humongous_reclaim_candidates(),
  1717   _has_humongous_reclaim_candidates(false),
  1716   _has_humongous_reclaim_candidates(false),
  1718   _archive_allocator(NULL),
  1717   _archive_allocator(NULL),
  1719   _free_regions_coming(false),
  1718   _free_regions_coming(false),
  1720   _young_list(new YoungList(this)),
       
  1721   _gc_time_stamp(0),
  1719   _gc_time_stamp(0),
  1722   _summary_bytes_used(0),
  1720   _summary_bytes_used(0),
  1723   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
  1721   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
  1724   _old_evac_stats("Old", OldPLABSize, PLABWeight),
  1722   _old_evac_stats("Old", OldPLABSize, PLABWeight),
  1725   _expand_heap_after_alloc_failure(true),
  1723   _expand_heap_after_alloc_failure(true),
  2561 bool G1CollectedHeap::supports_tlab_allocation() const {
  2559 bool G1CollectedHeap::supports_tlab_allocation() const {
  2562   return true;
  2560   return true;
  2563 }
  2561 }
  2564 
  2562 
  2565 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2563 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2566   return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
  2564   return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
  2567 }
  2565 }
  2568 
  2566 
  2569 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
  2567 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
  2570   return young_list()->eden_used_bytes();
  2568   return _eden.length() * HeapRegion::GrainBytes;
  2571 }
  2569 }
  2572 
  2570 
  2573 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
  2571 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
  2574 // must be equal to the humongous object limit.
  2572 // must be equal to the humongous object limit.
  2575 size_t G1CollectedHeap::max_tlab_size() const {
  2573 size_t G1CollectedHeap::max_tlab_size() const {
  2650             p2i(_hrm.reserved().start()),
  2648             p2i(_hrm.reserved().start()),
  2651             p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
  2649             p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
  2652             p2i(_hrm.reserved().end()));
  2650             p2i(_hrm.reserved().end()));
  2653   st->cr();
  2651   st->cr();
  2654   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  2652   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  2655   uint young_regions = _young_list->length();
  2653   uint young_regions = young_regions_count();
  2656   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
  2654   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
  2657             (size_t) young_regions * HeapRegion::GrainBytes / K);
  2655             (size_t) young_regions * HeapRegion::GrainBytes / K);
  2658   uint survivor_regions = _young_list->survivor_length();
  2656   uint survivor_regions = survivor_regions_count();
  2659   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
  2657   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
  2660             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
  2658             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
  2661   st->cr();
  2659   st->cr();
  2662   MetaspaceAux::print_on(st);
  2660   MetaspaceAux::print_on(st);
  2663 }
  2661 }
  2763   heap_region_iterate(&cl);
  2761   heap_region_iterate(&cl);
  2764 }
  2762 }
  2765 #endif // PRODUCT
  2763 #endif // PRODUCT
  2766 
  2764 
  2767 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
  2765 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
  2768   YoungList* young_list = heap()->young_list();
  2766 
  2769 
  2767   size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
  2770   size_t eden_used_bytes = young_list->eden_used_bytes();
  2768   size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
  2771   size_t survivor_used_bytes = young_list->survivor_used_bytes();
       
  2772   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
  2769   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
  2773 
  2770 
  2774   size_t eden_capacity_bytes =
  2771   size_t eden_capacity_bytes =
  2775     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
  2772     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
  2776 
  2773 
  3186     }
  3183     }
  3187 
  3184 
  3188     G1HeapTransition heap_transition(this);
  3185     G1HeapTransition heap_transition(this);
  3189     size_t heap_used_bytes_before_gc = used();
  3186     size_t heap_used_bytes_before_gc = used();
  3190 
  3187 
  3191     assert(check_young_list_well_formed(), "young list should be well formed");
       
  3192 
       
  3193     // Don't dynamically change the number of GC threads this early.  A value of
  3188     // Don't dynamically change the number of GC threads this early.  A value of
  3194     // 0 is used to indicate serial work.  When parallel work is done,
  3189     // 0 is used to indicate serial work.  When parallel work is done,
  3195     // it will be set.
  3190     // it will be set.
  3196 
  3191 
  3197     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3192     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3251 
  3246 
  3252         if (collector_state()->during_initial_mark_pause()) {
  3247         if (collector_state()->during_initial_mark_pause()) {
  3253           concurrent_mark()->checkpointRootsInitialPre();
  3248           concurrent_mark()->checkpointRootsInitialPre();
  3254         }
  3249         }
  3255 
  3250 
  3256         g1_policy()->finalize_collection_set(target_pause_time_ms);
  3251         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
  3257 
  3252 
  3258         evacuation_info.set_collectionset_regions(collection_set()->region_length());
  3253         evacuation_info.set_collectionset_regions(collection_set()->region_length());
  3259 
  3254 
  3260         // Make sure the remembered sets are up to date. This needs to be
  3255         // Make sure the remembered sets are up to date. This needs to be
  3261         // done before register_humongous_regions_with_cset(), because the
  3256         // done before register_humongous_regions_with_cset(), because the
  3306         // Start a new incremental collection set for the next pause.
  3301         // Start a new incremental collection set for the next pause.
  3307         collection_set()->start_incremental_building();
  3302         collection_set()->start_incremental_building();
  3308 
  3303 
  3309         clear_cset_fast_test();
  3304         clear_cset_fast_test();
  3310 
  3305 
  3311         // Don't check the whole heap at this point as the
  3306         guarantee(_eden.length() == 0, "eden should have been cleared");
  3312         // GC alloc regions from this pause have been tagged
  3307         g1_policy()->transfer_survivors_to_cset(survivor());
  3313         // as survivors and moved on to the survivor list.
       
  3314         // Survivor regions will fail the !is_young() check.
       
  3315         assert(check_young_list_empty(false /* check_heap */),
       
  3316           "young list should be empty");
       
  3317 
       
  3318         _young_list->reset_auxilary_lists();
       
  3319 
  3308 
  3320         if (evacuation_failed()) {
  3309         if (evacuation_failed()) {
  3321           set_used(recalculate_used());
  3310           set_used(recalculate_used());
  3322           if (_archive_allocator != NULL) {
  3311           if (_archive_allocator != NULL) {
  3323             _archive_allocator->clear_used();
  3312             _archive_allocator->clear_used();
  4720   FreeRegionList local_free_list("Local List for CSet Freeing");
  4709   FreeRegionList local_free_list("Local List for CSet Freeing");
  4721 
  4710 
  4722   double young_time_ms     = 0.0;
  4711   double young_time_ms     = 0.0;
  4723   double non_young_time_ms = 0.0;
  4712   double non_young_time_ms = 0.0;
  4724 
  4713 
  4725   // Since the collection set is a superset of the the young list,
  4714   _eden.clear();
  4726   // all we need to do to clear the young list is clear its
       
  4727   // head and length, and unlink any young regions in the code below
       
  4728   _young_list->clear();
       
  4729 
  4715 
  4730   G1Policy* policy = g1_policy();
  4716   G1Policy* policy = g1_policy();
  4731 
  4717 
  4732   double start_sec = os::elapsedTime();
  4718   double start_sec = os::elapsedTime();
  4733   bool non_young = true;
  4719   bool non_young = true;
  4770       assert(index != -1, "invariant");
  4756       assert(index != -1, "invariant");
  4771       assert((uint) index < collection_set()->young_region_length(), "invariant");
  4757       assert((uint) index < collection_set()->young_region_length(), "invariant");
  4772       size_t words_survived = surviving_young_words[index];
  4758       size_t words_survived = surviving_young_words[index];
  4773       cur->record_surv_words_in_group(words_survived);
  4759       cur->record_surv_words_in_group(words_survived);
  4774 
  4760 
  4775       // At this point the we have 'popped' cur from the collection set
       
  4776       // (linked via next_in_collection_set()) but it is still in the
       
  4777       // young list (linked via next_young_region()). Clear the
       
  4778       // _next_young_region field.
       
  4779       cur->set_next_young_region(NULL);
       
  4780     } else {
  4761     } else {
  4781       int index = cur->young_index_in_cset();
  4762       int index = cur->young_index_in_cset();
  4782       assert(index == -1, "invariant");
  4763       assert(index == -1, "invariant");
  4783     }
  4764     }
  4784 
  4765 
  5041 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
  5022 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
  5042   return _allocator->is_retained_old_region(hr);
  5023   return _allocator->is_retained_old_region(hr);
  5043 }
  5024 }
  5044 
  5025 
  5045 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5026 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5046   _young_list->push_region(hr);
  5027   _eden.add(hr);
  5047 }
  5028   _g1_policy->set_region_eden(hr);
       
  5029 }
       
  5030 
       
  5031 #ifdef ASSERT
  5048 
  5032 
  5049 class NoYoungRegionsClosure: public HeapRegionClosure {
  5033 class NoYoungRegionsClosure: public HeapRegionClosure {
  5050 private:
  5034 private:
  5051   bool _success;
  5035   bool _success;
  5052 public:
  5036 public:
  5060     return false;
  5044     return false;
  5061   }
  5045   }
  5062   bool success() { return _success; }
  5046   bool success() { return _success; }
  5063 };
  5047 };
  5064 
  5048 
  5065 bool G1CollectedHeap::check_young_list_empty(bool check_heap) {
  5049 bool G1CollectedHeap::check_young_list_empty() {
  5066   bool ret = _young_list->check_list_empty();
  5050   bool ret = (young_regions_count() == 0);
  5067 
  5051 
  5068   if (check_heap) {
  5052   NoYoungRegionsClosure closure;
  5069     NoYoungRegionsClosure closure;
  5053   heap_region_iterate(&closure);
  5070     heap_region_iterate(&closure);
  5054   ret = ret && closure.success();
  5071     ret = ret && closure.success();
       
  5072   }
       
  5073 
  5055 
  5074   return ret;
  5056   return ret;
  5075 }
  5057 }
       
  5058 
       
  5059 #endif // ASSERT
  5076 
  5060 
  5077 class TearDownRegionSetsClosure : public HeapRegionClosure {
  5061 class TearDownRegionSetsClosure : public HeapRegionClosure {
  5078 private:
  5062 private:
  5079   HeapRegionSet *_old_set;
  5063   HeapRegionSet *_old_set;
  5080 
  5064 
  5082   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
  5066   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
  5083 
  5067 
  5084   bool doHeapRegion(HeapRegion* r) {
  5068   bool doHeapRegion(HeapRegion* r) {
  5085     if (r->is_old()) {
  5069     if (r->is_old()) {
  5086       _old_set->remove(r);
  5070       _old_set->remove(r);
       
  5071     } else if(r->is_young()) {
       
  5072       r->uninstall_surv_rate_group();
  5087     } else {
  5073     } else {
  5088       // We ignore free regions, we'll empty the free list afterwards.
  5074       // We ignore free regions, we'll empty the free list afterwards.
  5089       // We ignore young regions, we'll empty the young list afterwards.
       
  5090       // We ignore humongous regions, we're not tearing down the
  5075       // We ignore humongous regions, we're not tearing down the
  5091       // humongous regions set.
  5076       // humongous regions set.
  5092       assert(r->is_free() || r->is_young() || r->is_humongous(),
  5077       assert(r->is_free() || r->is_humongous(),
  5093              "it cannot be another type");
  5078              "it cannot be another type");
  5094     }
  5079     }
  5095     return false;
  5080     return false;
  5096   }
  5081   }
  5097 
  5082 
  5153       // Add free regions to the free list
  5138       // Add free regions to the free list
  5154       r->set_free();
  5139       r->set_free();
  5155       r->set_allocation_context(AllocationContext::system());
  5140       r->set_allocation_context(AllocationContext::system());
  5156       _hrm->insert_into_free_list(r);
  5141       _hrm->insert_into_free_list(r);
  5157     } else if (!_free_list_only) {
  5142     } else if (!_free_list_only) {
  5158       assert(!r->is_young(), "we should not come across young regions");
       
  5159 
  5143 
  5160       if (r->is_humongous()) {
  5144       if (r->is_humongous()) {
  5161         // We ignore humongous regions. We left the humongous set unchanged.
  5145         // We ignore humongous regions. We left the humongous set unchanged.
  5162       } else {
  5146       } else {
  5163         // Objects that were compacted would have ended up on regions
  5147         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
  5164         // that were previously old or free.  Archive regions (which are
  5148         // We now consider all regions old, so register as such. Leave
  5165         // old) will not have been touched.
       
  5166         assert(r->is_free() || r->is_old(), "invariant");
       
  5167         // We now consider them old, so register as such. Leave
       
  5168         // archive regions set that way, however, while still adding
  5149         // archive regions set that way, however, while still adding
  5169         // them to the old set.
  5150         // them to the old set.
  5170         if (!r->is_archive()) {
  5151         if (!r->is_archive()) {
  5171           r->set_old();
  5152           r->set_old();
  5172         }
  5153         }
  5185 
  5166 
  5186 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
  5167 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
  5187   assert_at_safepoint(true /* should_be_vm_thread */);
  5168   assert_at_safepoint(true /* should_be_vm_thread */);
  5188 
  5169 
  5189   if (!free_list_only) {
  5170   if (!free_list_only) {
  5190     _young_list->empty_list();
  5171     _eden.clear();
       
  5172     _survivor.clear();
  5191   }
  5173   }
  5192 
  5174 
  5193   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
  5175   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
  5194   heap_region_iterate(&cl);
  5176   heap_region_iterate(&cl);
  5195 
  5177 
  5254 
  5236 
  5255 bool G1CollectedHeap::has_more_regions(InCSetState dest) {
  5237 bool G1CollectedHeap::has_more_regions(InCSetState dest) {
  5256   if (dest.is_old()) {
  5238   if (dest.is_old()) {
  5257     return true;
  5239     return true;
  5258   } else {
  5240   } else {
  5259     return young_list()->survivor_length() < g1_policy()->max_survivor_regions();
  5241     return survivor_regions_count() < g1_policy()->max_survivor_regions();
  5260   }
  5242   }
  5261 }
  5243 }
  5262 
  5244 
  5263 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
  5245 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
  5264   assert(FreeList_lock->owned_by_self(), "pre-condition");
  5246   assert(FreeList_lock->owned_by_self(), "pre-condition");
  5277     // should never scan survivors. But it doesn't hurt to do it
  5259     // should never scan survivors. But it doesn't hurt to do it
  5278     // for survivors too.
  5260     // for survivors too.
  5279     new_alloc_region->record_timestamp();
  5261     new_alloc_region->record_timestamp();
  5280     if (is_survivor) {
  5262     if (is_survivor) {
  5281       new_alloc_region->set_survivor();
  5263       new_alloc_region->set_survivor();
  5282       young_list()->add_survivor_region(new_alloc_region);
  5264       _survivor.add(new_alloc_region);
  5283       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
  5265       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
  5284     } else {
  5266     } else {
  5285       new_alloc_region->set_old();
  5267       new_alloc_region->set_old();
  5286       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
  5268       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
  5287     }
  5269     }