8222992: Shenandoah: Pre-evacuate all roots
authorzgu
Thu, 02 May 2019 09:49:52 -0400
changeset 54687 df2b3565f343
parent 54686 09f09b4e7808
child 54688 96ad739cfc39
8222992: Shenandoah: Pre-evacuate all roots Reviewed-by: shade
src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp
src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp
src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp
src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp	Thu May 02 09:49:52 2019 -0400
@@ -79,4 +79,16 @@
   inline void do_oop_work(T* p);
 };
 
+#ifdef ASSERT
+class ShenandoahAssertNotForwardedClosure : public OopClosure {
+private:
+  template <class T>
+  inline void do_oop_work(T* p);
+
+public:
+  inline void do_oop(narrowOop* p);
+  inline void do_oop(oop* p);
+};
+#endif // ASSERT
+
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp	Thu May 02 09:49:52 2019 -0400
@@ -107,4 +107,18 @@
   do_oop_work(p);
 }
 
+#ifdef ASSERT
+template <class T>
+void ShenandoahAssertNotForwardedClosure::do_oop_work(T* p) {
+  T o = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(o)) {
+    oop obj = CompressedOops::decode_not_null(o);
+    shenandoah_assert_not_forwarded(p, obj);
+  }
+}
+
+void ShenandoahAssertNotForwardedClosure::do_oop(narrowOop* p) { do_oop_work(p); }
+void ShenandoahAssertNotForwardedClosure::do_oop(oop* p)       { do_oop_work(p); }
+#endif
+
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Thu May 02 09:49:52 2019 -0400
@@ -661,15 +661,13 @@
 // anything to them.
 void ShenandoahConcurrentMark::weak_roots_work() {
   WorkGang* workers = _heap->workers();
-  ShenandoahIsAliveSelector is_alive;
-
-  if (_heap->has_forwarded_objects()) {
-    ShenandoahWeakUpdateClosure cl;
-    WeakProcessor::weak_oops_do(workers, is_alive.is_alive_closure(), &cl, 1);
-  } else {
-    ShenandoahWeakAssertNotForwardedClosure cl;
-    WeakProcessor::weak_oops_do(workers, is_alive.is_alive_closure(), &cl, 1);
-  }
+  OopClosure* keep_alive = &do_nothing_cl;
+#ifdef ASSERT
+  ShenandoahWeakAssertNotForwardedClosure verify_cl;
+  keep_alive = &verify_cl;
+#endif
+  ShenandoahIsAliveClosure is_alive;
+  WeakProcessor::weak_oops_do(workers, &is_alive, keep_alive, 1);
 }
 
 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu May 02 09:49:52 2019 -0400
@@ -1487,8 +1487,13 @@
   if (!cancelled_gc()) {
     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
 
-    if (has_forwarded_objects()) {
-      concurrent_mark()->update_roots(ShenandoahPhaseTimings::update_roots);
+    // Degen may be caused by failed evacuation of roots
+    if (is_degenerated_gc_in_progress() && has_forwarded_objects()) {
+      concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
+    }
+
+    if (ShenandoahVerify) {
+      verifier()->verify_roots_no_forwarded();
     }
 
     stop_concurrent_marking();
@@ -1540,6 +1545,7 @@
       }
 
       if (ShenandoahVerify) {
+        verifier()->verify_roots_no_forwarded();
         verifier()->verify_during_evacuation();
       }
     } else {
@@ -2176,8 +2182,8 @@
   assert(!cancelled_gc(), "Should have been done right before");
 
   concurrent_mark()->update_roots(is_degenerated_gc_in_progress() ?
-                                 ShenandoahPhaseTimings::degen_gc_update_roots:
-                                 ShenandoahPhaseTimings::final_update_refs_roots);
+                                  ShenandoahPhaseTimings::degen_gc_update_roots:
+                                  ShenandoahPhaseTimings::final_update_refs_roots);
 
   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
 
@@ -2186,6 +2192,7 @@
   set_update_refs_in_progress(false);
 
   if (ShenandoahVerify) {
+    verifier()->verify_roots_no_forwarded();
     verifier()->verify_after_updaterefs();
   }
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Thu May 02 09:49:52 2019 -0400
@@ -42,26 +42,6 @@
 #include "runtime/thread.hpp"
 #include "services/management.hpp"
 
-struct PhaseMap {
-  WeakProcessorPhases::Phase            _weak_processor_phase;
-  ShenandoahPhaseTimings::GCParPhases   _shenandoah_phase;
-};
-
-static const struct PhaseMap phase_mapping[] = {
-#if INCLUDE_JVMTI
-  {WeakProcessorPhases::jvmti,                 ShenandoahPhaseTimings::JVMTIWeakRoots},
-#endif
-#if INCLUDE_JFR
-  {WeakProcessorPhases::jfr,                   ShenandoahPhaseTimings::JFRWeakRoots},
-#endif
-  {WeakProcessorPhases::jni,                   ShenandoahPhaseTimings::JNIWeakRoots},
-  {WeakProcessorPhases::stringtable,           ShenandoahPhaseTimings::StringTableRoots},
-  {WeakProcessorPhases::resolved_method_table, ShenandoahPhaseTimings::ResolvedMethodTableRoots},
-  {WeakProcessorPhases::vm,                    ShenandoahPhaseTimings::VMWeakRoots}
-};
-
-STATIC_ASSERT(sizeof(phase_mapping) / sizeof(PhaseMap) == WeakProcessorPhases::phase_count);
-
 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
                                                  ShenandoahPhaseTimings::Phase phase) :
   _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
@@ -84,32 +64,16 @@
     StringDedup::gc_epilogue();
   }
 
-  ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
-
   if (_processed_weak_roots) {
     assert(_weak_processor_timings.max_threads() == n_workers(), "Must match");
-    for (uint index = 0; index < WeakProcessorPhases::phase_count; index ++) {
-      weak_processor_timing_to_shenandoah_timing(phase_mapping[index]._weak_processor_phase,
-                                                 phase_mapping[index]._shenandoah_phase,
-                                                 worker_times);
-    }
+    ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
+    ShenandoahTimingConverter::weak_processing_timing_to_shenandoah_timing(&_weak_processor_timings,
+                                                                           worker_times);
   }
 
   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 }
 
-void ShenandoahRootProcessor::weak_processor_timing_to_shenandoah_timing(const WeakProcessorPhases::Phase wpp,
-                                                                         const ShenandoahPhaseTimings::GCParPhases spp,
-                                                                         ShenandoahWorkerTimings* worker_times) const {
-  if (WeakProcessorPhases::is_serial(wpp)) {
-    worker_times->record_time_secs(spp, 0, _weak_processor_timings.phase_time_sec(wpp));
-  } else {
-    for (uint index = 0; index < _weak_processor_timings.max_threads(); index ++) {
-      worker_times->record_time_secs(spp, index, _weak_processor_timings.worker_time_sec(index, wpp));
-    }
-  }
-}
-
 void ShenandoahRootProcessor::process_all_roots_slow(OopClosure* oops) {
   CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
   CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
@@ -251,7 +215,9 @@
   _evacuation_tasks(new SubTasksDone(SHENANDOAH_EVAC_NumElements)),
   _srs(n_workers),
   _phase(phase),
-  _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator()) {
+  _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator()),
+  _weak_processor_timings(n_workers),
+  _weak_processor_task(&_weak_processor_timings, n_workers) {
   heap->phase_timings()->record_workers_start(_phase);
   if (ShenandoahStringDedup::is_enabled()) {
     StringDedup::gc_prologue(false);
@@ -263,6 +229,12 @@
   if (ShenandoahStringDedup::is_enabled()) {
     StringDedup::gc_epilogue();
   }
+
+  ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
+  assert(_weak_processor_timings.max_threads() == n_workers(), "Must match");
+  ShenandoahTimingConverter::weak_processing_timing_to_shenandoah_timing(&_weak_processor_timings,
+                                                                         worker_times);
+
   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 }
 
@@ -270,13 +242,19 @@
                                                      CodeBlobClosure* blobs,
                                                      uint worker_id) {
 
+  AlwaysTrueClosure always_true;
   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
   {
     bool is_par = n_workers() > 1;
     ResourceMark rm;
     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
+    Threads::possibly_parallel_oops_do(is_par, oops, NULL);
+  }
 
-    Threads::possibly_parallel_oops_do(is_par, oops, NULL);
+  {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CLDGRoots, worker_id);
+    CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
+    _cld_iterator.root_cld_do(&clds, &clds);
   }
 
   if (blobs != NULL) {
@@ -285,8 +263,7 @@
   }
 
   if (ShenandoahStringDedup::is_enabled()) {
-    ShenandoahForwardedIsAliveClosure is_alive;
-    ShenandoahStringDedup::parallel_oops_do(&is_alive, oops, worker_id);
+    ShenandoahStringDedup::parallel_oops_do(&always_true, oops, worker_id);
   }
 
   if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_Universe_oops_do)) {
@@ -306,6 +283,11 @@
     JvmtiExport::weak_oops_do(&is_alive, oops);
   }
 
+  if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_JNIHandles_oops_do)) {
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIRoots, worker_id);
+    JNIHandles::oops_do(oops);
+  }
+
   if (_evacuation_tasks->try_claim_task(SHENANDOAH_EVAC_SystemDictionary_oops_do)) {
     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id);
     SystemDictionary::oops_do(oops);
@@ -316,6 +298,7 @@
     ObjectSynchronizer::oops_do(oops);
   }
 
+  _weak_processor_task.work<AlwaysTrueClosure, OopClosure>(worker_id, &always_true, oops);
 }
 
 uint ShenandoahRootEvacuator::n_workers() const {
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Thu May 02 09:49:52 2019 -0400
@@ -119,6 +119,9 @@
   StrongRootsScope _srs;
   ShenandoahPhaseTimings::Phase _phase;
   ShenandoahCsetCodeRootsIterator _coderoots_cset_iterator;
+  ParallelCLDRootIterator _cld_iterator;
+  WeakProcessorPhaseTimes _weak_processor_timings;
+  WeakProcessor::Task     _weak_processor_task;
 
   enum Shenandoah_evacuate_roots_tasks {
     SHENANDOAH_EVAC_Universe_oops_do,
@@ -126,6 +129,7 @@
     SHENANDOAH_EVAC_Management_oops_do,
     SHENANDOAH_EVAC_SystemDictionary_oops_do,
     SHENANDOAH_EVAC_jvmti_oops_do,
+    SHENANDOAH_EVAC_JNIHandles_oops_do,
     // Leave this one last.
     SHENANDOAH_EVAC_NumElements
   };
--- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp	Thu May 02 09:49:52 2019 -0400
@@ -183,3 +183,47 @@
   ShenandoahThreadLocalData::set_worker_id(thr, ShenandoahThreadLocalData::INVALID_WORKER_ID);
 #endif
 }
+
+struct PhaseMap {
+   WeakProcessorPhases::Phase            _weak_processor_phase;
+   ShenandoahPhaseTimings::GCParPhases   _shenandoah_phase;
+};
+
+static const struct PhaseMap phase_mapping[] = {
+#if INCLUDE_JVMTI
+  {WeakProcessorPhases::jvmti,                 ShenandoahPhaseTimings::JVMTIWeakRoots},
+#endif
+#if INCLUDE_JFR
+  {WeakProcessorPhases::jfr,                   ShenandoahPhaseTimings::JFRWeakRoots},
+#endif
+  {WeakProcessorPhases::jni,                   ShenandoahPhaseTimings::JNIWeakRoots},
+  {WeakProcessorPhases::stringtable,           ShenandoahPhaseTimings::StringTableRoots},
+  {WeakProcessorPhases::resolved_method_table, ShenandoahPhaseTimings::ResolvedMethodTableRoots},
+  {WeakProcessorPhases::vm,                    ShenandoahPhaseTimings::VMWeakRoots}
+};
+
+STATIC_ASSERT(sizeof(phase_mapping) / sizeof(PhaseMap) == WeakProcessorPhases::phase_count);
+
+void ShenandoahTimingConverter::weak_processing_timing_to_shenandoah_timing(WeakProcessorPhaseTimes* weak_processing_timings,
+                                                                            ShenandoahWorkerTimings* sh_worker_times) {
+  assert(weak_processing_timings->max_threads() == weak_processing_timings->max_threads(), "Must match");
+  for (uint index = 0; index < WeakProcessorPhases::phase_count; index ++) {
+    weak_processing_phase_to_shenandoah_phase(phase_mapping[index]._weak_processor_phase,
+                                              weak_processing_timings,
+                                              phase_mapping[index]._shenandoah_phase,
+                                              sh_worker_times);
+  }
+}
+
+void ShenandoahTimingConverter::weak_processing_phase_to_shenandoah_phase(WeakProcessorPhases::Phase wpp,
+                                                                          WeakProcessorPhaseTimes* weak_processing_timings,
+                                                                          ShenandoahPhaseTimings::GCParPhases spp,
+                                                                          ShenandoahWorkerTimings* sh_worker_times) {
+  if (WeakProcessorPhases::is_serial(wpp)) {
+    sh_worker_times->record_time_secs(spp, 0, weak_processing_timings->phase_time_sec(wpp));
+  } else {
+    for (uint index = 0; index < weak_processing_timings->max_threads(); index ++) {
+      sh_worker_times->record_time_secs(spp, index, weak_processing_timings->worker_time_sec(index, wpp));
+    }
+  }
+}
--- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp	Thu May 02 09:49:52 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
@@ -24,14 +24,14 @@
 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHUTILS_HPP
 #define SHARE_GC_SHENANDOAH_SHENANDOAHUTILS_HPP
 
-#include "jfr/jfrEvents.hpp"
-
 #include "gc/shared/gcCause.hpp"
+#include "gc/shared/gcVMOperations.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
-#include "gc/shared/gcVMOperations.hpp"
+#include "gc/shared/weakProcessorPhaseTimes.hpp"
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "jfr/jfrEvents.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/vmThread.hpp"
@@ -175,4 +175,15 @@
   }
 };
 
+class ShenandoahTimingConverter : public AllStatic {
+public:
+  static void weak_processing_timing_to_shenandoah_timing(WeakProcessorPhaseTimes* weak_processing_timings,
+                                                          ShenandoahWorkerTimings* sh_worker_times);
+private:
+  static void weak_processing_phase_to_shenandoah_phase(WeakProcessorPhases::Phase wpp,
+                                                        WeakProcessorPhaseTimes* weak_processing_timings,
+                                                        ShenandoahPhaseTimings::GCParPhases spp,
+                                                        ShenandoahWorkerTimings* sh_worker_times);
+};
+
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHUTILS_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Thu May 02 09:49:52 2019 -0400
@@ -919,3 +919,31 @@
           _verify_gcstate_stable       // full gc cleaned up everything
   );
 }
+
+class ShenandoahVerifyNoForwared : public OopClosure {
+private:
+  template <class T>
+  void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      oop fwd = (oop) ShenandoahBrooksPointer::get_raw_unchecked(obj);
+      if (!oopDesc::equals_raw(obj, fwd)) {
+        ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL,
+                                         "Verify Roots", "Should not be forwarded", __FILE__, __LINE__);
+      }
+    }
+  }
+
+public:
+  void do_oop(narrowOop* p) { do_oop_work(p); }
+  void do_oop(oop* p)       { do_oop_work(p); }
+};
+
+void ShenandoahVerifier::verify_roots_no_forwarded() {
+  guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens");
+  ShenandoahRootProcessor rp(_heap, 1, ShenandoahPhaseTimings::_num_phases); // no need for stats
+  ShenandoahVerifyNoForwared cl;
+  rp.process_all_roots_slow(&cl);
+}
+
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp	Thu May 02 06:33:28 2019 -0700
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp	Thu May 02 09:49:52 2019 -0400
@@ -186,6 +186,9 @@
   void verify_after_traversal();
   void verify_after_degenerated();
   void verify_generic(VerifyOption option);
+
+  // Roots should only contain to-space oops
+  void verify_roots_no_forwarded();
 };
 
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP