8077413: Avoid use of Universe::heap() inside collectors
authorpliden
Mon, 13 Apr 2015 15:47:48 +0200
changeset 30173 13cf7580b000
parent 30172 4dcc7e33e633
child 30174 01b674a7cb8f
8077413: Avoid use of Universe::heap() inside collectors Reviewed-by: stefank, kbarrett
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp
hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp
hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
hotspot/src/share/vm/memory/cardGeneration.cpp
hotspot/src/share/vm/memory/cardTableRS.cpp
hotspot/src/share/vm/memory/collectorPolicy.cpp
hotspot/src/share/vm/memory/defNewGeneration.cpp
hotspot/src/share/vm/memory/defNewGeneration.inline.hpp
hotspot/src/share/vm/memory/generation.cpp
hotspot/src/share/vm/oops/instanceRefKlass.cpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -299,8 +299,6 @@
 
 AdaptiveSizePolicy* CMSCollector::size_policy() {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "Wrong type of heap");
   return gch->gen_policy()->size_policy();
 }
 
@@ -981,7 +979,7 @@
   assert_lock_strong(freelistLock());
 
 #ifndef PRODUCT
-  if (Universe::heap()->promotion_should_fail()) {
+  if (GenCollectedHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -1058,7 +1056,7 @@
                                            oop old, markOop m,
                                            size_t word_sz) {
 #ifndef PRODUCT
-  if (Universe::heap()->promotion_should_fail()) {
+  if (GenCollectedHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -2468,7 +2466,7 @@
   verification_mark_bm()->iterate(&vcl);
   if (vcl.failed()) {
     gclog_or_tty->print("Verification failed");
-    Universe::heap()->print_on(gclog_or_tty);
+    gch->print_on(gclog_or_tty);
     fatal("CMS: failed marking verification after remark");
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    Universe::heap()->prepare_for_verify();
+    GenCollectedHeap::heap()->prepare_for_verify();
     Universe::verify();
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1326,7 +1326,7 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    Universe::heap()->prepare_for_verify();
+    g1h->prepare_for_verify();
     Universe::verify(VerifyOption_G1UsePrevMarking,
                      " VerifyDuringGC:(before)");
   }
@@ -1353,7 +1353,7 @@
     // Verify the heap w.r.t. the previous marking bitmap.
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
-      Universe::heap()->prepare_for_verify();
+      g1h->prepare_for_verify();
       Universe::verify(VerifyOption_G1UsePrevMarking,
                        " VerifyDuringGC:(overflow)");
     }
@@ -1379,7 +1379,7 @@
 
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
-      Universe::heap()->prepare_for_verify();
+      g1h->prepare_for_verify();
       Universe::verify(VerifyOption_G1UseNextMarking,
                        " VerifyDuringGC:(after)");
     }
@@ -1987,13 +1987,13 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    Universe::heap()->prepare_for_verify();
+    g1h->prepare_for_verify();
     Universe::verify(VerifyOption_G1UsePrevMarking,
                      " VerifyDuringGC:(before)");
   }
   g1h->check_bitmaps("Cleanup Start");
 
-  G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
   g1p->record_concurrent_mark_cleanup_start();
 
   double start = os::elapsedTime();
@@ -2098,7 +2098,7 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    Universe::heap()->prepare_for_verify();
+    g1h->prepare_for_verify();
     Universe::verify(VerifyOption_G1UsePrevMarking,
                      " VerifyDuringGC:(after)");
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "memory/space.hpp"
@@ -303,9 +304,9 @@
   assert(blk_start <= threshold, "blk_start should be at or before threshold");
   assert(pointer_delta(threshold, blk_start) <= N_words,
          "offset should be <= BlockOffsetSharedArray::N");
-  assert(Universe::heap()->is_in_reserved(blk_start),
+  assert(G1CollectedHeap::heap()->is_in_reserved(blk_start),
          "reference must be into the heap");
-  assert(Universe::heap()->is_in_reserved(blk_end-1),
+  assert(G1CollectedHeap::heap()->is_in_reserved(blk_end-1),
          "limit must be within the heap");
   assert(threshold == _array->_reserved.start() + index*N_words,
          "index must agree with threshold");
@@ -458,7 +459,7 @@
 }
 
 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
-  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+  assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
          "just checking");
   _next_offset_index = _array->index_for_raw(_bottom);
   _next_offset_index++;
@@ -468,7 +469,7 @@
 }
 
 void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
-  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+  assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
          "just checking");
   size_t bottom_index = _array->index_for_raw(_bottom);
   assert(_array->address_for_index_raw(bottom_index) == _bottom,
@@ -477,7 +478,7 @@
 }
 
 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
-  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+  assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
          "just checking");
   _next_offset_index = _array->index_for(_bottom);
   _next_offset_index++;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -3318,6 +3318,8 @@
 #endif // PRODUCT
 
 G1CollectedHeap* G1CollectedHeap::heap() {
+  assert(_g1h != NULL, "Uninitialized access to G1CollectedHeap::heap()");
+  assert(_g1h->kind() == CollectedHeap::G1CollectedHeap, "Not a G1 heap");
   return _g1h;
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -101,11 +101,6 @@
   BiasedLocking::restore_marks();
   GenMarkSweep::deallocate_stacks();
 
-  // "free at last gc" is calculated from these.
-  // CHF: cheating for now!!!
-  //  Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
-  //  Universe::set_heap_used_at_last_gc(Universe::heap()->used());
-
   CodeCache::gc_epilogue();
   JvmtiExport::gc_epilogue();
 
@@ -167,12 +162,12 @@
   Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
 
   // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
-  G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
+  g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
-    Universe::heap()->prepare_for_verify();
+    g1h->prepare_for_verify();
     // Note: we can verify only the heap here. When an object is
     // marked, the previous value of the mark word (including
     // identity hash values, ages, etc) is preserved, and the mark
@@ -186,7 +181,7 @@
     if (!VerifySilently) {
       gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
     }
-    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
+    g1h->verify(VerifySilently, VerifyOption_G1UseMarkWord);
     if (!VerifySilently) {
       gclog_or_tty->print_cr("]");
     }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -172,7 +172,7 @@
   oopDesc* o = obj;
 #endif // CHECK_UNHANDLED_OOPS
   assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
-  assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
+  assert(_g1->is_in_reserved(obj), "must be in heap");
 #endif // ASSERT
 
   assert(_from != NULL, "from region must be non-NULL");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,7 @@
   assert(has_partial_array_mask(p), "invariant");
   oop from_obj = clear_partial_array_mask(p);
 
-  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
+  assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
   assert(from_obj->is_objArray(), "must be obj array");
   objArrayOop from_obj_array = objArrayOop(from_obj);
   // The from-space object contains the real length.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,7 +57,7 @@
   oopDesc* o = obj;
 #endif // CHECK_UNHANDLED_OOPS
   assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
-  assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
+  assert(_g1->is_in_reserved(obj), "must be in heap");
 #endif // ASSERT
 
   assert(from == NULL || from->is_in_reserved(p), "p is not in from");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -206,7 +206,7 @@
   if (new_val == NULL) return;
   // Otherwise, log it.
   G1SATBCardTableLoggingModRefBS* g1_bs =
-    barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
+    barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
   g1_bs->write_ref_field_work(field, new_val);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/javaClasses.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "gc_implementation/g1/g1StringDedupQueue.hpp"
 #include "memory/gcLocker.hpp"
@@ -163,7 +164,7 @@
     while (!iter.is_empty()) {
       oop obj = iter.next();
       if (obj != NULL) {
-        guarantee(Universe::heap()->is_in_reserved(obj), "Object must be on the heap");
+        guarantee(G1CollectedHeap::heap()->is_in_reserved(obj), "Object must be on the heap");
         guarantee(!obj->is_forwarded(), "Object must not be forwarded");
         guarantee(java_lang_String::is_instance(obj), "Object must be a String");
       }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -519,7 +519,7 @@
     while (*entry != NULL) {
       typeArrayOop value = (*entry)->obj();
       guarantee(value != NULL, "Object must not be NULL");
-      guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
+      guarantee(G1CollectedHeap::heap()->is_in_reserved(value), "Object must be on the heap");
       guarantee(!value->is_forwarded(), "Object must not be forwarded");
       guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
       unsigned int hash = hash_code(value);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -27,8 +27,8 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/cardTableRS.hpp"
+#include "memory/genCollectedHeap.hpp"
 #include "memory/space.inline.hpp"
-#include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -449,7 +449,7 @@
   // Do a dirty read here. If we pass the conditional then take the rare
   // event lock and do the read again in case some other thread had already
   // succeeded and done the resize.
-  int cur_collection = Universe::heap()->total_collections();
+  int cur_collection = GenCollectedHeap::heap()->total_collections();
   if (_last_LNC_resizing_collection[i] != cur_collection) {
     MutexLocker x(ParGCRareEvent_lock);
     if (_last_LNC_resizing_collection[i] != cur_collection) {
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -117,7 +117,7 @@
 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
   assert(old->is_objArray(), "must be obj array");
   assert(old->is_forwarded(), "must be forwarded");
-  assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
+  assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
   assert(!old_gen()->is_in(old), "must be in young generation.");
 
   objArrayOop obj = objArrayOop(old->forwardee());
@@ -199,9 +199,9 @@
   for (size_t i = 0; i != num_take_elems; i++) {
     oop cur = of_stack->pop();
     oop obj_to_push = cur->forwardee();
-    assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
+    assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
     assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
-    assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
+    assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
     if (should_be_partially_scanned(obj_to_push, cur)) {
       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
       obj_to_push = cur;
@@ -695,7 +695,7 @@
 
   _par_cl->do_oop_nv(p);
 
-  if (Universe::heap()->is_in_reserved(p)) {
+  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -722,7 +722,7 @@
 
   _cl->do_oop_nv(p);
 
-  if (Universe::heap()->is_in_reserved(p)) {
+  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -821,8 +821,6 @@
 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
 {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-         "not a generational heap");
   FlexibleWorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   _state_set.reset(workers->active_workers(), _generation.promotion_failed());
@@ -897,7 +895,7 @@
     _gc_tracer.report_promotion_failed(_promotion_failed_info);
   }
   // Reset the PromotionFailureALot counters.
-  NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+  NOT_PRODUCT(gch->reset_promotion_should_fail();)
 }
 
 void ParNewGeneration::collect(bool   full,
@@ -910,8 +908,6 @@
 
   _gc_timer->register_gc_start();
 
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "not a CMS generational heap");
   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
   FlexibleWorkGang* workers = gch->workers();
   assert(workers != NULL, "Need workgang for parallel work");
@@ -1190,7 +1186,7 @@
   } else {
     // Is in to-space; do copying ourselves.
     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    assert(Universe::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
+    assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
     forward_ptr = old->forward_to_atomic(new_obj);
     // Restore the mark word copied above.
     new_obj->set_mark(m);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -70,7 +70,7 @@
 inline void ParScanClosure::do_oop_work(T* p,
                                         bool gc_barrier,
                                         bool root_scan) {
-  assert((!Universe::heap()->is_in_reserved(p) ||
+  assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
           generation()->is_in_reserved(p))
          && (generation()->level() == 0 || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
@@ -82,7 +82,7 @@
 #ifndef PRODUCT
       if (_g->to()->is_in_reserved(obj)) {
         tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
-        GenCollectedHeap* gch =  (GenCollectedHeap*)Universe::heap();
+        GenCollectedHeap* gch = GenCollectedHeap::heap();
         Space* sp = gch->space_containing(p);
         oop obj = oop(sp->block_start(p));
         assert((HeapWord*)obj < (HeapWord*)p, "Error");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,7 +89,7 @@
   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   size_t result =  gen_size_limit() - virtual_space()->committed_size();
   size_t result_aligned = align_size_down(result, heap->generation_alignment());
   return result_aligned;
@@ -101,7 +101,7 @@
     return uncommitted_bytes;
   }
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   const size_t gen_alignment = heap->generation_alignment();
   PSAdaptiveSizePolicy* policy = heap->size_policy();
   const size_t working_size =
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,7 @@
   size_t current_committed_size = virtual_space()->committed_size();
   assert((gen_size_limit() >= current_committed_size),
     "generation size limit is wrong");
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   size_t result =  gen_size_limit() - current_committed_size;
   size_t result_aligned = align_size_down(result, heap->generation_alignment());
   return result_aligned;
@@ -91,7 +91,7 @@
 
   if (eden_space()->is_empty()) {
     // Respect the minimum size for eden and for the young gen as a whole.
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     const size_t eden_alignment = heap->space_alignment();
     const size_t gen_alignment = heap->generation_alignment();
 
@@ -128,7 +128,7 @@
 // If to_space is below from_space, to_space is not considered.
 // to_space can be.
 size_t ASPSYoungGen::available_to_live() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   const size_t alignment = heap->space_alignment();
 
   // Include any space that is committed but is not in eden.
@@ -292,7 +292,7 @@
 
   assert(eden_start < from_start, "Cannot push into from_space");
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   const size_t alignment = heap->space_alignment();
   const bool maintain_minimum =
     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@@ -345,8 +345,6 @@
 
     // Does the optimal to-space overlap from-space?
     if (to_start < (char*)from_space()->end()) {
-      assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
       // Calculate the minimum offset possible for from_end
       size_t from_size =
         pointer_delta(from_space()->top(), from_start, sizeof(char));
@@ -509,9 +507,7 @@
   assert(from_space()->top() == old_from_top, "from top changed!");
 
   if (PrintAdaptiveSizePolicy) {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
                   "collection: %d "
                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
@@ -542,7 +538,7 @@
   }
   MemRegion cmr((HeapWord*)virtual_space()->low(),
                 (HeapWord*)virtual_space()->high());
-  Universe::heap()->barrier_set()->resize_covered_region(cmr);
+  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
 
   space_invariants();
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -76,9 +76,7 @@
 
  public:
   CheckForUnmarkedObjects() {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     _young_gen = heap->young_gen();
     _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
     // No point in asserting barrier set type here. Need to make CardTableExtension
@@ -325,9 +323,7 @@
 void CardTableExtension::verify_all_young_refs_imprecise() {
   CheckForUnmarkedObjects check;
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSOldGen* old_gen = heap->old_gen();
 
   old_gen->object_iterate(&check);
@@ -335,9 +331,7 @@
 
 // This should be called immediately after a scavenge, before mutators resume.
 void CardTableExtension::verify_all_young_refs_precise() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSOldGen* old_gen = heap->old_gen();
 
   CheckForPreciseMarks check(
@@ -351,7 +345,7 @@
 
 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
   CardTableExtension* card_table =
-    barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set());
+    barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
 
   jbyte* bot = card_table->byte_for(mr.start());
   jbyte* top = card_table->byte_for(mr.end());
@@ -523,7 +517,7 @@
     cur_committed = new_committed;
   }
 #ifdef ASSERT
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   assert(cur_committed.start() ==
     (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
                               os::vm_page_size()),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -89,6 +89,7 @@
   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 
+  _psh = this;
   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
 
   _old_gen = _gens->old_gen();
@@ -114,7 +115,6 @@
   // initialize the policy counters - 2 collectors, 3 generations
   _gc_policy_counters =
     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
-  _psh = this;
 
   // Set up the GCTaskManager
   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
@@ -259,7 +259,7 @@
     // total_collections() value!
     {
       MutexLocker ml(Heap_lock);
-      gc_count = Universe::heap()->total_collections();
+      gc_count = total_collections();
 
       result = young_gen()->allocate(size);
       if (result != NULL) {
@@ -309,8 +309,7 @@
       // This prevents us from looping until time out on requests that can
       // not be satisfied.
       if (op.prologue_succeeded()) {
-        assert(Universe::heap()->is_in_or_null(op.result()),
-          "result not in heap");
+        assert(is_in_or_null(op.result()), "result not in heap");
 
         // If GC was locked out during VM operation then retry allocation
         // and/or stall as necessary.
@@ -420,7 +419,7 @@
 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
-  assert(!Universe::heap()->is_gc_active(), "not reentrant");
+  assert(!is_gc_active(), "not reentrant");
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
   // We assume that allocation in eden will fail unless we collect.
@@ -508,8 +507,8 @@
   {
     MutexLocker ml(Heap_lock);
     // This value is guarded by the Heap_lock
-    gc_count      = Universe::heap()->total_collections();
-    full_gc_count = Universe::heap()->total_full_collections();
+    gc_count      = total_collections();
+    full_gc_count = total_full_collections();
   }
 
   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -48,7 +48,7 @@
 //
 
 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   ResourceMark rm;
 
@@ -79,7 +79,7 @@
 
 
 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -150,7 +150,7 @@
 
 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
 {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("RefProcTask",
     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -168,7 +168,7 @@
 
 void RefProcTaskExecutor::execute(ProcessTask& task)
 {
-  ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
   uint active_gc_threads = heap->gc_task_manager()->active_workers();
   RegionTaskQueueSet* qset = ParCompactionManager::region_array();
@@ -189,7 +189,7 @@
 
 void RefProcTaskExecutor::execute(EnqueueTask& task)
 {
-  ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
   GCTaskQueue* q = GCTaskQueue::create();
   for(uint i=0; i<parallel_gc_threads; i++) {
@@ -206,7 +206,7 @@
   _terminator(t) {}
 
 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -238,7 +238,7 @@
   _terminator(t) {}
 
 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -320,7 +320,7 @@
 }
 
 void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -60,8 +60,7 @@
     _region_stack(NULL),
     _region_stack_index((uint)max_uintx) {
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   _old_gen = heap->old_gen();
   _start_array = old_gen()->start_array();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -59,7 +59,7 @@
 CollectorCounters*  PSMarkSweep::_counters = NULL;
 
 void PSMarkSweep::initialize() {
-  MemRegion mr = Universe::heap()->reserved_region();
+  MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
   _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
   _counters = new CollectorCounters("PSMarkSweep", 1);
 }
@@ -81,9 +81,9 @@
 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
-  assert(!Universe::heap()->is_gc_active(), "not reentrant");
+  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   GCCause::Cause gc_cause = heap->gc_cause();
   PSAdaptiveSizePolicy* policy = heap->size_policy();
   IsGCActiveMark mark;
@@ -110,8 +110,7 @@
     return false;
   }
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   GCCause::Cause gc_cause = heap->gc_cause();
 
   _gc_timer->register_gc_start();
@@ -487,9 +486,7 @@
 }
 
 void PSMarkSweep::allocate_stacks() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
 
   MutableSpace* to_space = young_gen->to_space();
@@ -515,8 +512,7 @@
   GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace(" 1");
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   // Need to clear claim bits before the tracing starts.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -582,9 +578,7 @@
   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   // tracking expects us to do so. See comment under phase4.
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSOldGen* old_gen = heap->old_gen();
 
   // Begin compacting into the old gen
@@ -606,9 +600,7 @@
   GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace("3");
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
 
@@ -651,9 +643,7 @@
 
   // All pointers are now adjusted, move objects accordingly
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -38,15 +38,12 @@
 
 
 void PSMarkSweepDecorator::set_destination_decorator_tenured() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   _destination_decorator = heap->old_gen()->object_mark_sweep();
 }
 
 void PSMarkSweepDecorator::advance_destination_decorator() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   assert(_destination_decorator != NULL, "Sanity");
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -107,20 +107,22 @@
     SpaceMangler::mangle_region(cmr);
   }
 
-  Universe::heap()->barrier_set()->resize_covered_region(cmr);
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+  BarrierSet* bs = heap->barrier_set();
 
-  CardTableModRefBS* _ct =
-    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
+  bs->resize_covered_region(cmr);
+
+  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
 
   // Verify that the start and end of this generation is the start of a card.
   // If this wasn't true, a single card could span more than one generation,
   // which would cause problems when we commit/uncommit memory, and when we
   // clear and dirty cards.
-  guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
-  if (_reserved.end() != Universe::heap()->reserved_region().end()) {
+  guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
+  if (_reserved.end() != heap->reserved_region().end()) {
     // Don't check at the very end of the heap as we'll assert that we're probing off
     // the end if we try.
-    guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
+    guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
   }
 
   //
@@ -161,8 +163,7 @@
 }
 
 void PSOldGen::precompact() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   // Reset start array first.
   start_array()->reset();
@@ -197,7 +198,7 @@
 
   // Allocations in the old generation need to be reported
   if (res != NULL) {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     heap->size_policy()->tenured_allocation(word_size);
   }
 
@@ -376,8 +377,7 @@
   }
 
   if (PrintAdaptiveSizePolicy) {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
                   "collection: %d "
                   "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
@@ -397,7 +397,7 @@
   size_t new_word_size = new_memregion.word_size();
 
   start_array()->set_covered_region(new_memregion);
-  Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
+  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
 
   // ALWAYS do this last!!
   object_space()->initialize(new_memregion,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -748,7 +748,7 @@
 
 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
   assert(addr != NULL, "Should detect NULL oop earlier");
-  assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
+  assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
   assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
 
   // Region covering the object.
@@ -836,9 +836,7 @@
 }
 
 void PSParallelCompact::post_initialize() {
-  ParallelScavengeHeap* heap = gc_heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   MemRegion mr = heap->reserved_region();
   _ref_processor =
     new ReferenceProcessor(mr,            // span
@@ -855,8 +853,7 @@
 }
 
 bool PSParallelCompact::initialize() {
-  ParallelScavengeHeap* heap = gc_heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   MemRegion mr = heap->reserved_region();
 
   // Was the old gen get allocated successfully?
@@ -890,7 +887,7 @@
 {
   memset(&_space_info, 0, sizeof(_space_info));
 
-  ParallelScavengeHeap* heap = gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
 
   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
@@ -973,7 +970,7 @@
   // promotion failure does not swap spaces) because an unknown number of minor
   // collections will have swapped the spaces an unknown number of times.
   GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
-  ParallelScavengeHeap* heap = gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 
@@ -1028,7 +1025,7 @@
   MutableSpace* const from_space = _space_info[from_space_id].space();
   MutableSpace* const to_space   = _space_info[to_space_id].space();
 
-  ParallelScavengeHeap* heap = gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   bool eden_empty = eden_space->is_empty();
   if (!eden_empty) {
     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
@@ -1966,7 +1963,7 @@
   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
          "should be in vm thread");
 
-  ParallelScavengeHeap* heap = gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   GCCause::Cause gc_cause = heap->gc_cause();
   assert(!heap->is_gc_active(), "not reentrant");
 
@@ -1994,7 +1991,7 @@
     return false;
   }
 
-  ParallelScavengeHeap* heap = gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   _gc_timer.register_gc_start();
   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
@@ -2347,7 +2344,7 @@
   // Recursively traverse all live objects and mark them
   GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
-  ParallelScavengeHeap* heap = gc_heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
   uint active_gc_threads = heap->gc_task_manager()->active_workers();
   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
@@ -2687,8 +2684,7 @@
   // trace("5");
   GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSOldGen* old_gen = heap->old_gen();
   old_gen->start_array()->reset();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
@@ -2839,7 +2835,7 @@
 // heap, last_space_id is returned.  In debug mode it expects the address to be
 // in the heap and asserts such.
 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
-  assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
+  assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
 
   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
     if (_space_info[id].space()->contains(addr)) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
 
 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
@@ -1168,11 +1169,6 @@
 
   PSParallelCompact();
 
-  // Convenient accessor for Universe::heap().
-  static ParallelScavengeHeap* gc_heap() {
-    return (ParallelScavengeHeap*)Universe::heap();
-  }
-
   static void invoke(bool maximum_heap_compaction);
   static bool invoke_no_policy(bool maximum_heap_compaction);
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
 
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
 #include "gc_interface/collectedHeap.hpp"
@@ -36,7 +37,7 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    assert(Universe::heap()->is_in(obj), "should be in heap");
+    assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 
     if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
       cm->push(obj);
@@ -62,14 +63,14 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
-    assert(Universe::heap()->is_in(obj), "should be in heap");
+    assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
 
     oop new_obj = (oop)summary_data().calc_new_pointer(obj);
     assert(new_obj != NULL,                    // is forwarding ptr?
            "should be forwarded");
     // Just always do the update unconditionally?
     if (new_obj != NULL) {
-      assert(Universe::heap()->is_in_reserved(new_obj),
+      assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
              "should be in object space");
       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
     }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,7 +103,7 @@
 }
 
 bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
-  assert(Universe::heap()->is_in(obj), "Object outside heap");
+  assert(ParallelScavengeHeap::heap()->is_in(obj), "Object outside heap");
 
   if (contains(obj)) {
     HeapWord* object_end = obj + obj_size;
@@ -137,9 +137,7 @@
 #ifdef ASSERT
 
 bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   MutableSpace* to_space = heap->young_gen()->to_space();
   MemRegion used = to_space->used_region();
   if (used.contains(lab)) {
@@ -150,10 +148,9 @@
 }
 
 bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   assert(_start_array->covered_region().contains(lab), "Sanity");
 
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSOldGen* old_gen = heap->old_gen();
   MemRegion used = old_gen->object_space()->used_region();
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -44,8 +44,7 @@
 MutableSpace*                  PSPromotionManager::_young_space = NULL;
 
 void PSPromotionManager::initialize() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   _old_gen = heap->old_gen();
   _young_space = heap->young_gen()->to_space();
@@ -88,8 +87,7 @@
 }
 
 void PSPromotionManager::pre_scavenge() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   _young_space = heap->young_gen()->to_space();
 
@@ -132,7 +130,7 @@
 void
 PSPromotionManager::print_taskqueue_stats(outputStream* const out) {
   out->print_cr("== GC Tasks Stats, GC %3d",
-                Universe::heap()->total_collections());
+                ParallelScavengeHeap::heap()->total_collections());
 
   TaskQueueStats totals;
   out->print("thr "); TaskQueueStats::print_header(1, out); out->cr();
@@ -160,8 +158,7 @@
 #endif // TASKQUEUE_STATS
 
 PSPromotionManager::PSPromotionManager() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   // We set the old lab's start array.
   _old_lab.set_start_array(old_gen()->start_array());
@@ -191,8 +188,7 @@
 
   // We need to get an assert in here to make sure the labs are always flushed.
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   // Do not prefill the LAB's, save heap wastage!
   HeapWord* lab_base = young_space()->top();
@@ -213,8 +209,7 @@
   totally_drain = totally_drain || _totally_drain;
 
 #ifdef ASSERT
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   MutableSpace* to_space = heap->young_gen()->to_space();
   MutableSpace* old_space = heap->old_gen()->object_space();
 #endif /* ASSERT */
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
 
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
@@ -57,9 +58,7 @@
 template <class T>
 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
   assert(should_scavenge(p, true), "revisiting object?");
-  assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
-         "Sanity");
-  assert(Universe::heap()->is_in(p), "pointer outside heap");
+  assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
 
   claim_or_forward_internal_depth(p);
 }
@@ -150,7 +149,7 @@
     // Otherwise try allocating obj tenured
     if (new_obj == NULL) {
 #ifndef PRODUCT
-      if (Universe::heap()->promotion_should_fail()) {
+      if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
         return oop_promotion_failed(o, test_mark);
       }
 #endif  // #ifndef PRODUCT
@@ -296,7 +295,7 @@
   // that are outside the heap. These pointers are either from roots
   // or from metadata.
   if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
-      Universe::heap()->is_in_reserved(p)) {
+      ParallelScavengeHeap::heap()->is_in_reserved(p)) {
     if (PSScavenge::is_obj_in_young(new_obj)) {
       PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
     }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -87,8 +87,7 @@
 
 public:
   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     _to_space = heap->young_gen()->to_space();
 
     assert(_promotion_manager != NULL, "Sanity");
@@ -218,11 +217,9 @@
 bool PSScavenge::invoke() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
-  assert(!Universe::heap()->is_gc_active(), "not reentrant");
+  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
 
-  ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
   PSAdaptiveSizePolicy* policy = heap->size_policy();
   IsGCActiveMark mark;
 
@@ -273,9 +270,8 @@
     return false;
   }
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   GCCause::Cause gc_cause = heap->gc_cause();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   // Check for potential problems.
   if (!should_attempt_scavenge()) {
@@ -713,9 +709,7 @@
 // unforwarding markOops. It then restores any preserved mark oops,
 // and clears the _preserved_mark_stack.
 void PSScavenge::clean_up_failed_promotion() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
 
   {
@@ -742,7 +736,7 @@
   }
 
   // Reset the PromotionFailureALot counters.
-  NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+  NOT_PRODUCT(heap->reset_promotion_should_fail();)
 }
 
 // This method is called whenever an attempt to promote an object
@@ -761,8 +755,7 @@
 }
 
 bool PSScavenge::should_attempt_scavenge() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 
   if (UsePerfData) {
@@ -838,9 +831,7 @@
                                                     MaxTenuringThreshold;
   }
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
 #include "utilities/globalDefinitions.hpp"
 
 inline void PSScavenge::save_to_space_top_before_gc() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   _to_space_top_before_gc = heap->young_gen()->to_space()->top();
 }
 
@@ -56,7 +56,7 @@
 template <class T>
 inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
   if (check_to_space) {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     return should_scavenge(p, heap->young_gen()->to_space());
   }
   return should_scavenge(p);
@@ -97,7 +97,6 @@
     ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
     assert(!psh->is_in_reserved(p), "GC barrier needed");
     if (PSScavenge::should_scavenge(p)) {
-      assert(!Universe::heap()->is_in_reserved(p), "Not from meta-data?");
       assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
 
       oop o = *p;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -47,7 +47,7 @@
 //
 
 void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
@@ -118,7 +118,7 @@
 //
 
 void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
@@ -143,7 +143,7 @@
   _terminator(t) {}
 
 void StealTask::do_it(GCTaskManager* manager, uint which) {
-  assert(Universe::heap()->is_gc_active(), "called outside gc");
+  assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   PSPromotionManager* pm =
     PSPromotionManager::gc_thread_promotion_manager(which);
@@ -181,10 +181,8 @@
 
   {
     PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
-
-    assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
     CardTableExtension* card_table =
-      barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set());
+      barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
 
     card_table->scavenge_contents_parallel(_gen->start_array(),
                                            _gen->object_space(),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
 
   MemRegion cmr((HeapWord*)virtual_space()->low(),
                 (HeapWord*)virtual_space()->high());
-  Universe::heap()->barrier_set()->resize_covered_region(cmr);
+  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
 
   if (ZapUnusedHeapArea) {
     // Mangle newly committed space immediately because it
@@ -103,7 +103,7 @@
                                            _max_gen_size, _virtual_space);
 
   // Compute maximum space sizes for performance counters
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   size_t alignment = heap->space_alignment();
   size_t size = virtual_space()->reserved_size();
 
@@ -153,8 +153,7 @@
 }
 
 void PSYoungGen::compute_initial_space_boundaries() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   // Compute sizes
   size_t alignment = heap->space_alignment();
@@ -208,7 +207,7 @@
 
 #ifndef PRODUCT
 void PSYoungGen::space_invariants() {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   const size_t alignment = heap->space_alignment();
 
   // Currently, our eden size cannot shrink to zero
@@ -494,7 +493,7 @@
   char* to_start   = (char*)to_space()->bottom();
   char* to_end     = (char*)to_space()->end();
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   const size_t alignment = heap->space_alignment();
   const bool maintain_minimum =
     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@@ -546,8 +545,6 @@
 
     // Does the optimal to-space overlap from-space?
     if (to_start < (char*)from_space()->end()) {
-      assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
       // Calculate the minimum offset possible for from_end
       size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
 
@@ -708,9 +705,7 @@
   assert(from_space()->top() == old_from_top, "from top changed!");
 
   if (PrintAdaptiveSizePolicy) {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
     gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
                   "collection: %d "
                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
@@ -843,7 +838,7 @@
 // from-space.
 size_t PSYoungGen::available_to_live() {
   size_t delta_in_survivor = 0;
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   const size_t space_alignment = heap->space_alignment();
   const size_t gen_alignment = heap->generation_alignment();
 
@@ -927,7 +922,7 @@
 
   MemRegion cmr((HeapWord*)virtual_space()->low(),
                 (HeapWord*)virtual_space()->high());
-  Universe::heap()->barrier_set()->resize_covered_region(cmr);
+  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
   space_invariants();
 }
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -41,8 +41,7 @@
 void VM_ParallelGCFailedAllocation::doit() {
   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   GCCauseSetter gccs(heap, _gc_cause);
   _result = heap->failed_mem_allocate(_word_size);
@@ -63,9 +62,7 @@
 void VM_ParallelGCSystemGC::doit() {
   SvcGCMarker sgcm(SvcGCMarker::FULL);
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
-    "must be a ParallelScavengeHeap");
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
   GCCauseSetter gccs(heap, _gc_cause);
   if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
--- a/hotspot/src/share/vm/memory/cardGeneration.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/memory/cardGeneration.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
   // which would cause problems when we commit/uncommit memory, and when we
   // clear and dirty cards.
   guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
-  if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
+  if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
     // Don't check at the very end of the heap as we'll assert that we're probing off
     // the end if we try.
     guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
@@ -78,7 +78,7 @@
        heap_word_size(_virtual_space.committed_size());
     MemRegion mr(space()->bottom(), new_word_size);
     // Expand card table
-    Universe::heap()->barrier_set()->resize_covered_region(mr);
+    GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
     // Expand shared block offset array
     _bts->resize(new_word_size);
 
@@ -170,7 +170,7 @@
   _bts->resize(new_word_size);
   MemRegion mr(space()->bottom(), new_word_size);
   // Shrink the card table
-  Universe::heap()->barrier_set()->resize_covered_region(mr);
+  GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
 
   if (Verbose && PrintGC) {
     size_t new_mem_size = _virtual_space.committed_size();
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -38,7 +38,6 @@
   GenRemSet(),
   _cur_youngergen_card_val(youngergenP1_card)
 {
-  guarantee(Universe::heap()->kind() == CollectedHeap::GenCollectedHeap, "sanity");
   _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
   _ct_bs->initialize();
   set_bs(_ct_bs);
@@ -598,10 +597,6 @@
   // At present, we only know how to verify the card table RS for
   // generational heaps.
   VerifyCTGenClosure blk(this);
-  CollectedHeap* ch = Universe::heap();
-
-  if (ch->kind() == CollectedHeap::GenCollectedHeap) {
-    GenCollectedHeap::heap()->generation_iterate(&blk, false);
-    _ct_bs->verify();
-    }
-  }
+  GenCollectedHeap::heap()->generation_iterate(&blk, false);
+  _ct_bs->verify();
+}
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -669,7 +669,7 @@
       }
 
       // Read the gc count while the heap lock is held.
-      gc_count_before = Universe::heap()->total_collections();
+      gc_count_before = gch->total_collections();
     }
 
     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -193,7 +193,9 @@
 {
   MemRegion cmr((HeapWord*)_virtual_space.low(),
                 (HeapWord*)_virtual_space.high());
-  Universe::heap()->barrier_set()->resize_covered_region(cmr);
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  gch->barrier_set()->resize_covered_region(cmr);
 
   _eden_space = new ContiguousSpace();
   _from_space = new ContiguousSpace();
@@ -205,13 +207,13 @@
   // Compute the maximum eden and survivor space sizes. These sizes
   // are computed assuming the entire reserved space is committed.
   // These values are exported as performance counters.
-  uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
+  uintx alignment = gch->collector_policy()->space_alignment();
   uintx size = _virtual_space.reserved_size();
   _max_survivor_size = compute_survivor_size(size, alignment);
   _max_eden_size = size - (2*_max_survivor_size);
 
   // allocate the performance counters
-  GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
+  GenCollectorPolicy* gcp = (GenCollectorPolicy*)gch->collector_policy();
 
   // Generation counters -- generation 0, 3 subspaces
   _gen_counters = new GenerationCounters("new", 0, 3,
@@ -433,7 +435,7 @@
                              SpaceDecorator::DontMangle);
     MemRegion cmr((HeapWord*)_virtual_space.low(),
                   (HeapWord*)_virtual_space.high());
-    Universe::heap()->barrier_set()->resize_covered_region(cmr);
+    gch->barrier_set()->resize_covered_region(cmr);
     if (Verbose && PrintGC) {
       size_t new_size_after  = _virtual_space.committed_size();
       size_t eden_size_after = eden()->capacity();
@@ -691,7 +693,7 @@
     gc_tracer.report_promotion_failed(_promotion_failed_info);
 
     // Reset the PromotionFailureALot counters.
-    NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+    NOT_PRODUCT(gch->reset_promotion_should_fail();)
   }
   if (PrintGC && !PrintGCDetails) {
     gch->print_heap_change(gch_prev_used);
--- a/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp	Mon Apr 13 15:47:48 2015 +0200
@@ -25,9 +25,9 @@
 #ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
 #define SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
 
-#include "gc_interface/collectedHeap.hpp"
 #include "memory/cardTableRS.hpp"
 #include "memory/defNewGeneration.hpp"
+#include "memory/genCollectedHeap.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/space.hpp"
 
@@ -60,7 +60,7 @@
   // We could check that p is also in an older generation, but
   // dirty cards in the youngest gen are never scanned, so the
   // extra check probably isn't worthwhile.
-  if (Universe::heap()->is_in_reserved(p)) {
+  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->inline_write_ref_field_gc(p, obj);
   }
@@ -84,7 +84,7 @@
   // we set a younger_gen card if we have an older->youngest
   // generation pointer.
   oop obj = oopDesc::load_decode_heap_oop_not_null(p);
-  if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
+  if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
     _rs->inline_write_ref_field_gc(p, obj);
   }
 }
--- a/hotspot/src/share/vm/memory/generation.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/memory/generation.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -187,7 +187,7 @@
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 
 #ifndef PRODUCT
-  if (Universe::heap()->promotion_should_fail()) {
+  if (GenCollectedHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Fri Apr 10 13:58:36 2015 +0200
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Mon Apr 13 15:47:48 2015 +0200
@@ -78,12 +78,6 @@
   InstanceKlass::oop_verify_on(obj, st);
   // Verify referent field
   oop referent = java_lang_ref_Reference::referent(obj);
-
-  // We should make this general to all heaps
-  GenCollectedHeap* gch = NULL;
-  if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap)
-    gch = GenCollectedHeap::heap();
-
   if (referent != NULL) {
     guarantee(referent->is_oop(), "referent field heap failed");
   }