8076314: Remove the static instance variable SharedHeap:: _sh
authorbrutisso
Thu, 02 Apr 2015 06:42:24 +0200
changeset 30147 af9a41999c6e
parent 29809 c59a5f161524
child 30148 1d79283147a2
8076314: Remove the static instance variable SharedHeap:: _sh Reviewed-by: pliden, jmasa
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp
hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp
hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
hotspot/src/share/vm/memory/cardTableModRefBS.cpp
hotspot/src/share/vm/memory/cardTableRS.cpp
hotspot/src/share/vm/memory/defNewGeneration.cpp
hotspot/src/share/vm/memory/genCollectedHeap.cpp
hotspot/src/share/vm/memory/sharedHeap.cpp
hotspot/src/share/vm/memory/sharedHeap.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -673,10 +673,10 @@
                                                  HeapWord* bottom,              \
                                                  HeapWord* top,                 \
                                                  ClosureType* cl) {             \
-   bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
+   bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0;                 \
    if (is_par) {                                                                \
-     assert(SharedHeap::heap()->n_par_threads() ==                              \
-            SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
+     assert(GenCollectedHeap::heap()->n_par_threads() ==                        \
+            GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
    } else {                                                                     \
      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
@@ -1907,11 +1907,11 @@
   assert(chunk->is_free() && ffc->is_free(), "Error");
   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
   if (rem_sz < SmallForDictionary) {
-    bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
+    bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
     assert(!is_par ||
-           (SharedHeap::heap()->n_par_threads() ==
-            SharedHeap::heap()->workers()->active_workers()), "Mismatch");
+           (GenCollectedHeap::heap()->n_par_threads() ==
+            GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
     returnChunkToFreeList(ffc);
     split(size, rem_sz);
     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
@@ -1982,7 +1982,7 @@
 
 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
   assert(_promoInfo.tracking(), "No preceding save_marks?");
-  assert(SharedHeap::heap()->n_par_threads() == 0,
+  assert(GenCollectedHeap::heap()->n_par_threads() == 0,
          "Shouldn't be called if using parallel gc.");
   return _promoInfo.noPromotions();
 }
@@ -1991,7 +1991,7 @@
                                                                             \
 void CompactibleFreeListSpace::                                             \
 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
-  assert(SharedHeap::heap()->n_par_threads() == 0,                          \
+  assert(GenCollectedHeap::heap()->n_par_threads() == 0,                    \
          "Shouldn't be called (yet) during parallel part of gc.");          \
   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
   /*                                                                        \
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -2128,6 +2128,7 @@
 }
 
 #ifndef PRODUCT
+
 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
 private:
   unsigned _gc_time_stamp;
@@ -3336,8 +3337,6 @@
 #endif // PRODUCT
 
 G1CollectedHeap* G1CollectedHeap::heap() {
-  assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
-         "not a garbage-first heap");
   return _g1h;
 }
 
@@ -6163,8 +6162,6 @@
 }
 
 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
-  assert(heap_lock_held_for_gc(),
-              "the heap lock should already be held by or for this thread");
   _young_list->push_region(hr);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -1460,7 +1460,7 @@
   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
 
   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
-        HeapRegion::GrainWords * _max_survivor_regions);
+        HeapRegion::GrainWords * _max_survivor_regions, counters());
 }
 
 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -61,9 +61,8 @@
                                       bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
-  SharedHeap* sh = SharedHeap::heap();
 #ifdef ASSERT
-  if (sh->collector_policy()->should_clear_all_soft_refs()) {
+  if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
     assert(clear_all_softrefs, "Policy should have been checked earler");
   }
 #endif
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -253,7 +253,8 @@
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
-    Threads::possibly_parallel_oops_do(strong_roots, thread_stack_clds, strong_code);
+    bool is_par = _g1h->n_par_threads() > 0;
+    Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -225,15 +225,10 @@
 
 void VM_CGC_Operation::doit() {
   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
-  GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
-  SharedHeap* sh = SharedHeap::heap();
-  // This could go away if CollectedHeap gave access to _gc_is_active...
-  if (sh != NULL) {
-    IsGCActiveMark x;
-    _cl->do_void();
-  } else {
-    _cl->do_void();
-  }
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id());
+  IsGCActiveMark x;
+  _cl->do_void();
 }
 
 bool VM_CGC_Operation::doit_prologue() {
@@ -244,14 +239,12 @@
   }
 
   Heap_lock->lock();
-  SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
   return true;
 }
 
 void VM_CGC_Operation::doit_epilogue() {
   // Note the relative order of the unlocks must match that in
   // VM_GC_Operation::doit_epilogue()
-  SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
   Heap_lock->unlock();
   if (_needs_pll) {
     release_and_notify_pending_list_lock();
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -79,7 +79,7 @@
   }
 }
 
-uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
+uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
   size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
   uint result;
 
@@ -126,9 +126,6 @@
       age++;
     }
     if (UsePerfData) {
-      SharedHeap* sh = SharedHeap::heap();
-      CollectorPolicy* policy = sh->collector_policy();
-      GCPolicyCounters* gc_counters = policy->counters();
       gc_counters->tenuring_threshold()->set_value(result);
       gc_counters->desired_survivor_size()->set_value(
         desired_survivor_size*oopSize);
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	Thu Apr 02 06:42:24 2015 +0200
@@ -29,6 +29,8 @@
 #include "oops/oop.hpp"
 #include "runtime/perfData.hpp"
 
+class GCPolicyCounters;
+
 /* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University.
    See the LICENSE file for license information. */
 
@@ -69,7 +71,7 @@
   void merge_par(ageTable* subTable);
 
   // calculate new tenuring threshold based on age information
-  uint compute_tenuring_threshold(size_t survivor_capacity);
+  uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters);
 
  private:
   PerfVariable* _perf_sizes[table_size];
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -116,8 +116,6 @@
     _prologue_succeeded = false;
   } else {
     _prologue_succeeded = true;
-    SharedHeap* sh = SharedHeap::heap();
-    if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
   }
   return _prologue_succeeded;
 }
@@ -126,8 +124,6 @@
 void VM_GC_Operation::doit_epilogue() {
   assert(Thread::current()->is_Java_thread(), "just checking");
   // Release the Heap_lock first.
-  SharedHeap* sh = SharedHeap::heap();
-  if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
   Heap_lock->unlock();
   release_and_notify_pending_list_lock();
 }
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -459,12 +459,12 @@
     // equal to active_workers.  When a different mechanism for shutting
     // off parallelism is used, then active_workers can be used in
     // place of n_par_threads.
-    int n_threads =  SharedHeap::heap()->n_par_threads();
+    int n_threads =  GenCollectedHeap::heap()->n_par_threads();
     bool is_par = n_threads > 0;
     if (is_par) {
 #if INCLUDE_ALL_GCS
-      assert(SharedHeap::heap()->n_par_threads() ==
-             SharedHeap::heap()->workers()->active_workers(), "Mismatch");
+      assert(GenCollectedHeap::heap()->n_par_threads() ==
+             GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 #else  // INCLUDE_ALL_GCS
       fatal("Parallel gc not supported here.");
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -167,10 +167,10 @@
     // Cannot yet substitute active_workers for n_par_threads
     // in the case where parallelism is being turned off by
     // setting n_par_threads to 0.
-    _is_par = (SharedHeap::heap()->n_par_threads() > 0);
+    _is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
     assert(!_is_par ||
-           (SharedHeap::heap()->n_par_threads() ==
-            SharedHeap::heap()->workers()->active_workers()), "Mismatch");
+           (GenCollectedHeap::heap()->n_par_threads() ==
+            GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
 }
 
 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -550,8 +550,9 @@
 
 void DefNewGeneration::adjust_desired_tenuring_threshold() {
   // Set the desired survivor size to half the real survivor space
+  GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters();
   _tenuring_threshold =
-    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
+    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
 }
 
 void DefNewGeneration::collect(bool   full,
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -606,7 +606,8 @@
   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 
-  Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
+  bool is_par = n_par_threads() > 0;
+  Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);
 
   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
     Universe::oops_do(strong_roots);
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -35,13 +35,10 @@
 #include "utilities/copy.hpp"
 #include "utilities/workgroup.hpp"
 
-SharedHeap* SharedHeap::_sh;
-
 SharedHeap::SharedHeap() :
   CollectedHeap(),
   _workers(NULL)
 {
-  _sh = this;  // ch is static, should be set only once.
   if (UseConcMarkSweepGC || UseG1GC) {
     _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
                             /* are_GC_task_threads */true,
@@ -54,13 +51,6 @@
   }
 }
 
-bool SharedHeap::heap_lock_held_for_gc() {
-  Thread* t = Thread::current();
-  return    Heap_lock->owned_by_self()
-         || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
-             && _thread_holds_heap_lock_for_gc);
-}
-
 void SharedHeap::set_par_threads(uint t) {
   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
   _n_par_threads = t;
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp	Thu Apr 02 06:42:24 2015 +0200
@@ -105,11 +105,6 @@
   friend class VM_CGC_Operation;
 
 protected:
-  // There should be only a single instance of "SharedHeap" in a program.
-  // This is enforced with the protected constructor below, which will also
-  // set the static pointer "_sh" to that instance.
-  static SharedHeap* _sh;
-
   // If we're doing parallel GC, use this gang of threads.
   FlexibleWorkGang* _workers;
 
@@ -117,17 +112,7 @@
   // function.
   SharedHeap();
 
-  // Returns true if the calling thread holds the heap lock,
-  // or the calling thread is a par gc thread and the heap_lock is held
-  // by the vm thread doing a gc operation.
-  bool heap_lock_held_for_gc();
-  // True if the heap_lock is held by the a non-gc thread invoking a gc
-  // operation.
-  bool _thread_holds_heap_lock_for_gc;
-
 public:
-  static SharedHeap* heap() { return _sh; }
-
   void set_barrier_set(BarrierSet* bs);
 
   // Does operations required after initialization has been done.
--- a/hotspot/src/share/vm/runtime/thread.cpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Apr 02 06:42:24 2015 +0200
@@ -754,13 +754,9 @@
       return true;
     } else {
       guarantee(res == strong_roots_parity, "Or else what?");
-      assert(SharedHeap::heap()->workers()->active_workers() > 0,
-             "Should only fail when parallel.");
       return false;
     }
   }
-  assert(SharedHeap::heap()->workers()->active_workers() > 0,
-         "Should only fail when parallel.");
   return false;
 }
 
@@ -4066,20 +4062,7 @@
 }
 #endif // PRODUCT
 
-void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
-  // Introduce a mechanism allowing parallel threads to claim threads as
-  // root groups.  Overhead should be small enough to use all the time,
-  // even in sequential code.
-  SharedHeap* sh = SharedHeap::heap();
-  // Cannot yet substitute active_workers for n_par_threads
-  // because of G1CollectedHeap::verify() use of
-  // SharedHeap::process_roots().  n_par_threads == 0 will
-  // turn off parallelism in process_roots while active_workers
-  // is being used for parallelism elsewhere.
-  bool is_par = sh->n_par_threads() > 0;
-  assert(!is_par ||
-         (SharedHeap::heap()->n_par_threads() ==
-         SharedHeap::heap()->workers()->active_workers()), "Mismatch");
+void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   int cp = Threads::thread_claim_parity();
   ALL_JAVA_THREADS(p) {
     if (p->claim_oops_do(is_par, cp)) {
--- a/hotspot/src/share/vm/runtime/thread.hpp	Tue Mar 17 11:19:05 2015 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Thu Apr 02 06:42:24 2015 +0200
@@ -1894,7 +1894,7 @@
   // This version may only be called by sequential code.
   static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
   // This version may be called by sequential or parallel code.
-  static void possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
+  static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
   // This creates a list of GCTasks, one per thread.
   static void create_thread_roots_tasks(GCTaskQueue* q);
   // This creates a list of GCTasks, one per thread, for marking objects.