src/hotspot/share/gc/g1/g1CollectedHeap.cpp
changeset 49643 a3453bbd5418
parent 49635 e79bbf1635da
child 49644 50a01910e00a
equal deleted inserted replaced
49642:7bad9c9efdf3 49643:a3453bbd5418
   995   // incremental collection set and then start rebuilding it afresh
   995   // incremental collection set and then start rebuilding it afresh
   996   // after this full GC.
   996   // after this full GC.
   997   abandon_collection_set(collection_set());
   997   abandon_collection_set(collection_set());
   998 
   998 
   999   tear_down_region_sets(false /* free_list_only */);
   999   tear_down_region_sets(false /* free_list_only */);
  1000   collector_state()->set_gcs_are_young(true);
       
  1001 }
  1000 }
  1002 
  1001 
  1003 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
  1002 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
  1004   assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
  1003   assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
  1005   assert(used() == recalculate_used(), "Should be equal");
  1004   assert(used() == recalculate_used(), "Should be equal");
  2754   _verifier->verify_dirty_young_regions();
  2753   _verifier->verify_dirty_young_regions();
  2755 
  2754 
  2756   // We should not be doing initial mark unless the conc mark thread is running
  2755   // We should not be doing initial mark unless the conc mark thread is running
  2757   if (!_cmThread->should_terminate()) {
  2756   if (!_cmThread->should_terminate()) {
  2758     // This call will decide whether this pause is an initial-mark
  2757     // This call will decide whether this pause is an initial-mark
  2759     // pause. If it is, during_initial_mark_pause() will return true
  2758     // pause. If it is, in_initial_mark_gc() will return true
  2760     // for the duration of this pause.
  2759     // for the duration of this pause.
  2761     g1_policy()->decide_on_conc_mark_initiation();
  2760     g1_policy()->decide_on_conc_mark_initiation();
  2762   }
  2761   }
  2763 
  2762 
  2764   // We do not allow initial-mark to be piggy-backed on a mixed GC.
  2763   // We do not allow initial-mark to be piggy-backed on a mixed GC.
  2765   assert(!collector_state()->during_initial_mark_pause() ||
  2764   assert(!collector_state()->in_initial_mark_gc() ||
  2766           collector_state()->gcs_are_young(), "sanity");
  2765           collector_state()->in_young_only_phase(), "sanity");
  2767 
  2766 
  2768   // We also do not allow mixed GCs during marking.
  2767   // We also do not allow mixed GCs during marking.
  2769   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
  2768   assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
  2770 
  2769 
  2771   // Record whether this pause is an initial mark. When the current
  2770   // Record whether this pause is an initial mark. When the current
  2772   // thread has completed its logging output and it's safe to signal
  2771   // thread has completed its logging output and it's safe to signal
  2773   // the CM thread, the flag's value in the policy has been reset.
  2772   // the CM thread, the flag's value in the policy has been reset.
  2774   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
  2773   bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
  2775 
  2774 
  2776   // Inner scope for scope based logging, timers, and stats collection
  2775   // Inner scope for scope based logging, timers, and stats collection
  2777   {
  2776   {
  2778     EvacuationInfo evacuation_info;
  2777     EvacuationInfo evacuation_info;
  2779 
  2778 
  2780     if (collector_state()->during_initial_mark_pause()) {
  2779     if (collector_state()->in_initial_mark_gc()) {
  2781       // We are about to start a marking cycle, so we increment the
  2780       // We are about to start a marking cycle, so we increment the
  2782       // full collection counter.
  2781       // full collection counter.
  2783       increment_old_marking_cycles_started();
  2782       increment_old_marking_cycles_started();
  2784       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
  2783       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
  2785     }
  2784     }
  2788 
  2787 
  2789     GCTraceCPUTime tcpu;
  2788     GCTraceCPUTime tcpu;
  2790 
  2789 
  2791     G1HeapVerifier::G1VerifyType verify_type;
  2790     G1HeapVerifier::G1VerifyType verify_type;
  2792     FormatBuffer<> gc_string("Pause ");
  2791     FormatBuffer<> gc_string("Pause ");
  2793     if (collector_state()->during_initial_mark_pause()) {
  2792     if (collector_state()->in_initial_mark_gc()) {
  2794       gc_string.append("Initial Mark");
  2793       gc_string.append("Initial Mark");
  2795       verify_type = G1HeapVerifier::G1VerifyInitialMark;
  2794       verify_type = G1HeapVerifier::G1VerifyInitialMark;
  2796     } else if (collector_state()->gcs_are_young()) {
  2795     } else if (collector_state()->in_young_only_phase()) {
  2797       gc_string.append("Young");
  2796       gc_string.append("Young");
  2798       verify_type = G1HeapVerifier::G1VerifyYoungOnly;
  2797       verify_type = G1HeapVerifier::G1VerifyYoungOnly;
  2799     } else {
  2798     } else {
  2800       gc_string.append("Mixed");
  2799       gc_string.append("Mixed");
  2801       verify_type = G1HeapVerifier::G1VerifyMixed;
  2800       verify_type = G1HeapVerifier::G1VerifyMixed;
  2869         // the possible verification above.
  2868         // the possible verification above.
  2870         double sample_start_time_sec = os::elapsedTime();
  2869         double sample_start_time_sec = os::elapsedTime();
  2871 
  2870 
  2872         g1_policy()->record_collection_pause_start(sample_start_time_sec);
  2871         g1_policy()->record_collection_pause_start(sample_start_time_sec);
  2873 
  2872 
  2874         if (collector_state()->during_initial_mark_pause()) {
  2873         if (collector_state()->in_initial_mark_gc()) {
  2875           concurrent_mark()->checkpoint_roots_initial_pre();
  2874           concurrent_mark()->checkpoint_roots_initial_pre();
  2876         }
  2875         }
  2877 
  2876 
  2878         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
  2877         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
  2879 
  2878 
  2937           // The "used" of the the collection set have already been subtracted
  2936           // The "used" of the the collection set have already been subtracted
  2938           // when they were freed.  Add in the bytes evacuated.
  2937           // when they were freed.  Add in the bytes evacuated.
  2939           increase_used(g1_policy()->bytes_copied_during_gc());
  2938           increase_used(g1_policy()->bytes_copied_during_gc());
  2940         }
  2939         }
  2941 
  2940 
  2942         if (collector_state()->during_initial_mark_pause()) {
  2941         if (collector_state()->in_initial_mark_gc()) {
  2943           // We have to do this before we notify the CM threads that
  2942           // We have to do this before we notify the CM threads that
  2944           // they can start working to make sure that all the
  2943           // they can start working to make sure that all the
  2945           // appropriate initialization is done on the CM object.
  2944           // appropriate initialization is done on the CM object.
  2946           concurrent_mark()->checkpoint_roots_initial_post();
  2945           concurrent_mark()->checkpoint_roots_initial_post();
  2947           collector_state()->set_mark_in_progress(true);
       
  2948           // Note that we don't actually trigger the CM thread at
  2946           // Note that we don't actually trigger the CM thread at
  2949           // this point. We do that later when we're sure that
  2947           // this point. We do that later when we're sure that
  2950           // the current thread has completed its logging output.
  2948           // the current thread has completed its logging output.
  2951         }
  2949         }
  2952 
  2950 
  4105   rp->verify_no_references_recorded();
  4103   rp->verify_no_references_recorded();
  4106   assert(!rp->discovery_enabled(), "should have been disabled");
  4104   assert(!rp->discovery_enabled(), "should have been disabled");
  4107 
  4105 
  4108   // If during an initial mark pause we install a pending list head which is not otherwise reachable
  4106   // If during an initial mark pause we install a pending list head which is not otherwise reachable
  4109   // ensure that it is marked in the bitmap for concurrent marking to discover.
  4107   // ensure that it is marked in the bitmap for concurrent marking to discover.
  4110   if (collector_state()->during_initial_mark_pause()) {
  4108   if (collector_state()->in_initial_mark_gc()) {
  4111     oop pll_head = Universe::reference_pending_list();
  4109     oop pll_head = Universe::reference_pending_list();
  4112     if (pll_head != NULL) {
  4110     if (pll_head != NULL) {
  4113       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
  4111       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
  4114       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
  4112       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
  4115     }
  4113     }
  4142   _preserved_marks_set.assert_empty();
  4140   _preserved_marks_set.assert_empty();
  4143 
  4141 
  4144   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
  4142   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
  4145 
  4143 
  4146   // InitialMark needs claim bits to keep track of the marked-through CLDs.
  4144   // InitialMark needs claim bits to keep track of the marked-through CLDs.
  4147   if (collector_state()->during_initial_mark_pause()) {
  4145   if (collector_state()->in_initial_mark_gc()) {
  4148     double start_clear_claimed_marks = os::elapsedTime();
  4146     double start_clear_claimed_marks = os::elapsedTime();
  4149 
  4147 
  4150     ClassLoaderDataGraph::clear_claimed_marks();
  4148     ClassLoaderDataGraph::clear_claimed_marks();
  4151 
  4149 
  4152     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
  4150     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
  5009       new_alloc_region->set_old();
  5007       new_alloc_region->set_old();
  5010       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
  5008       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
  5011     }
  5009     }
  5012     _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
  5010     _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
  5013     _hr_printer.alloc(new_alloc_region);
  5011     _hr_printer.alloc(new_alloc_region);
  5014     bool during_im = collector_state()->during_initial_mark_pause();
  5012     bool during_im = collector_state()->in_initial_mark_gc();
  5015     new_alloc_region->note_start_of_copying(during_im);
  5013     new_alloc_region->note_start_of_copying(during_im);
  5016     return new_alloc_region;
  5014     return new_alloc_region;
  5017   }
  5015   }
  5018   return NULL;
  5016   return NULL;
  5019 }
  5017 }
  5020 
  5018 
  5021 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
  5019 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
  5022                                              size_t allocated_bytes,
  5020                                              size_t allocated_bytes,
  5023                                              InCSetState dest) {
  5021                                              InCSetState dest) {
  5024   bool during_im = collector_state()->during_initial_mark_pause();
  5022   bool during_im = collector_state()->in_initial_mark_gc();
  5025   alloc_region->note_end_of_copying(during_im);
  5023   alloc_region->note_end_of_copying(during_im);
  5026   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
  5024   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
  5027   if (dest.is_old()) {
  5025   if (dest.is_old()) {
  5028     _old_set.add(alloc_region);
  5026     _old_set.add(alloc_region);
  5029   }
  5027   }