8199326: Remove G1 gc time stamp logic
authortschatzl
Wed, 28 Mar 2018 16:39:32 +0200
changeset 49635 e79bbf1635da
parent 49634 df9dcfff6628
child 49636 6d5bd76650df
8199326: Remove G1 gc time stamp logic Summary: G1 gc time stamp logic is unused completely after JDK-8180415, so removing it. Reviewed-by: sangheki, sjohanss
src/hotspot/share/gc/g1/g1Allocator.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.hpp
src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
src/hotspot/share/gc/g1/heapRegion.cpp
src/hotspot/share/gc/g1/heapRegion.hpp
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Wed Mar 28 16:39:32 2018 +0200
@@ -74,7 +74,6 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->is_humongous()) {
-    retained_region->record_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Mar 28 16:39:32 2018 +0200
@@ -1042,7 +1042,6 @@
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
-  check_gc_time_stamps();
   _hrm.verify_optional();
   _verifier->verify_region_sets_optional();
   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
@@ -1414,7 +1413,6 @@
   _humongous_reclaim_candidates(),
   _has_humongous_reclaim_candidates(false),
   _archive_allocator(NULL),
-  _gc_time_stamp(0),
   _summary_bytes_used(0),
   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
   _old_evac_stats("Old", OldPLABSize, PLABWeight),
@@ -1831,41 +1829,6 @@
   return _hrm.total_free_bytes();
 }
 
-void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
-  hr->reset_gc_time_stamp();
-}
-
-#ifndef PRODUCT
-
-class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
-private:
-  unsigned _gc_time_stamp;
-  bool _failures;
-
-public:
-  CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
-    _gc_time_stamp(gc_time_stamp), _failures(false) { }
-
-  virtual bool do_heap_region(HeapRegion* hr) {
-    unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
-    if (_gc_time_stamp != region_gc_time_stamp) {
-      log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
-                            region_gc_time_stamp, _gc_time_stamp);
-      _failures = true;
-    }
-    return false;
-  }
-
-  bool failures() { return _failures; }
-};
-
-void G1CollectedHeap::check_gc_time_stamps() {
-  CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
-  heap_region_iterate(&cl);
-  guarantee(!cl.failures(), "all GC time stamps should have been reset");
-}
-#endif // PRODUCT
-
 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
   _hot_card_cache->drain(cl, worker_i);
 }
@@ -2286,7 +2249,7 @@
 void G1CollectedHeap::print_regions_on(outputStream* st) const {
   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
                "HS=humongous(starts), HC=humongous(continues), "
-               "CS=collection set, F=free, A=archive, TS=gc time stamp, "
+               "CS=collection set, F=free, A=archive, "
                "TAMS=top-at-mark-start (previous, next)");
   PrintRegionClosure blk(st);
   heap_region_iterate(&blk);
@@ -2432,9 +2395,6 @@
   increment_total_collections(full /* full gc */);
   if (full) {
     increment_old_marking_cycles_started();
-    reset_gc_time_stamp();
-  } else {
-    increment_gc_time_stamp();
   }
 
   // Fill TLAB's and such
@@ -5041,10 +5001,6 @@
                                             !is_survivor,
                                             true /* do_expand */);
   if (new_alloc_region != NULL) {
-    // We really only need to do this for old regions given that we
-    // should never scan survivors. But it doesn't hurt to do it
-    // for survivors too.
-    new_alloc_region->record_timestamp();
     if (is_survivor) {
       new_alloc_region->set_survivor();
       _survivor.add(new_alloc_region);
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Mar 28 16:39:32 2018 +0200
@@ -262,8 +262,6 @@
   // If not, we can skip a few steps.
   bool _has_humongous_reclaim_candidates;
 
-  volatile uint _gc_time_stamp;
-
   G1HRPrinter _hr_printer;
 
   // It decides whether an explicit GC should start a concurrent cycle
@@ -972,21 +970,6 @@
   // Try to minimize the remembered set.
   void scrub_rem_set();
 
-  uint get_gc_time_stamp() {
-    return _gc_time_stamp;
-  }
-
-  inline void reset_gc_time_stamp();
-
-  void check_gc_time_stamps() PRODUCT_RETURN;
-
-  inline void increment_gc_time_stamp();
-
-  // Reset the given region's GC timestamp. If it's starts humongous,
-  // also reset the GC timestamp of its corresponding
-  // continues humongous regions too.
-  void reset_gc_time_stamps(HeapRegion* hr);
-
   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
   void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Wed Mar 28 16:39:32 2018 +0200
@@ -84,16 +84,6 @@
   return _hrm.addr_to_region((HeapWord*) addr);
 }
 
-inline void G1CollectedHeap::reset_gc_time_stamp() {
-  assert_at_safepoint_on_vm_thread();
-  _gc_time_stamp = 0;
-}
-
-inline void G1CollectedHeap::increment_gc_time_stamp() {
-  assert_at_safepoint_on_vm_thread();
-  ++_gc_time_stamp;
-}
-
 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
   _old_set.add(hr);
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Wed Mar 28 16:39:32 2018 +0200
@@ -1163,7 +1163,6 @@
     const uint humongous_regions_removed() { return _humongous_regions_removed; }
 
     bool do_heap_region(HeapRegion *hr) {
-      _g1->reset_gc_time_stamps(hr);
       hr->note_end_of_marking();
 
       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
@@ -1285,8 +1284,6 @@
     _g1h->heap_region_iterate(&cl);
   }
 
-  g1h->reset_gc_time_stamp();
-
   // Install newly created mark bitmap as "prev".
   swap_mark_bitmaps();
   {
@@ -1294,8 +1291,6 @@
     reclaim_empty_regions();
   }
 
-  g1h->check_gc_time_stamps();
-
   {
     GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
     g1h->g1_policy()->record_concurrent_mark_cleanup_end();
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Wed Mar 28 16:39:32 2018 +0200
@@ -109,7 +109,6 @@
 }
 
 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
-  hr->reset_gc_time_stamp();
   hr->rem_set()->clear();
   hr->clear_cardtable();
 
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Wed Mar 28 16:39:32 2018 +0200
@@ -131,7 +131,6 @@
   zero_marked_bytes();
 
   init_top_at_mark_start();
-  _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp();
   if (clear_space) clear(SpaceDecorator::Mangle);
 }
 
@@ -254,7 +253,6 @@
 
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  record_timestamp();
 }
 
 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
@@ -448,7 +446,6 @@
   } else {
     st->print("|  ");
   }
-  st->print("|TS%3u", _gc_time_stamp);
   st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ",
                p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str());
 }
@@ -854,15 +851,6 @@
   return _bot_part.threshold();
 }
 
-void G1ContiguousSpace::record_timestamp() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  uint curr_gc_time_stamp = g1h->get_gc_time_stamp();
-
-  if (_gc_time_stamp < curr_gc_time_stamp) {
-    _gc_time_stamp = curr_gc_time_stamp;
-  }
-}
-
 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
 }
@@ -879,8 +867,7 @@
 
 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) :
   _bot_part(bot, this),
-  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
-  _gc_time_stamp(0)
+  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
 {
 }
 
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Wed Mar 28 16:39:32 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Wed Mar 28 16:39:32 2018 +0200
@@ -100,7 +100,6 @@
  protected:
   G1BlockOffsetTablePart _bot_part;
   Mutex _par_alloc_lock;
-  volatile uint _gc_time_stamp;
   // When we need to retire an allocation region, while other threads
   // are also concurrently trying to allocate into it, we typically
   // allocate a dummy object at the end of the region to ensure that
@@ -147,10 +146,6 @@
   void mangle_unused_area() PRODUCT_RETURN;
   void mangle_unused_area_complete() PRODUCT_RETURN;
 
-  void record_timestamp();
-  void reset_gc_time_stamp() { _gc_time_stamp = 0; }
-  uint get_gc_time_stamp() { return _gc_time_stamp; }
-
   // See the comment above in the declaration of _pre_dummy_top for an
   // explanation of what it is.
   void set_pre_dummy_top(HeapWord* pre_dummy_top) {