8224815: Remove non-GC uses of CollectedHeap::is_in_reserved()
authoreosterlund
Thu, 05 Sep 2019 08:26:49 +0200
changeset 58015 dd84de796f2c
parent 58014 aba258cd7df8
child 58016 c8bc506106e3
8224815: Remove non-GC uses of CollectedHeap::is_in_reserved() Reviewed-by: stefank, coleenp
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp
src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
src/hotspot/cpu/x86/relocInfo_x86.cpp
src/hotspot/cpu/x86/x86_64.ad
src/hotspot/share/ci/ciObjectFactory.cpp
src/hotspot/share/code/debugInfo.cpp
src/hotspot/share/gc/cms/cmsHeap.hpp
src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp
src/hotspot/share/gc/epsilon/epsilonHeap.cpp
src/hotspot/share/gc/epsilon/epsilonHeap.hpp
src/hotspot/share/gc/g1/g1Allocator.inline.hpp
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.hpp
src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
src/hotspot/share/gc/g1/g1OopClosures.inline.hpp
src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
src/hotspot/share/gc/serial/markSweep.cpp
src/hotspot/share/gc/serial/markSweep.inline.hpp
src/hotspot/share/gc/shared/blockOffsetTable.cpp
src/hotspot/share/gc/shared/collectedHeap.cpp
src/hotspot/share/gc/shared/collectedHeap.hpp
src/hotspot/share/gc/shared/gcVMOperations.cpp
src/hotspot/share/gc/shared/genCollectedHeap.cpp
src/hotspot/share/gc/shared/genCollectedHeap.hpp
src/hotspot/share/gc/shared/markBitMap.cpp
src/hotspot/share/gc/shared/referenceProcessor.cpp
src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
src/hotspot/share/gc/z/zCollectedHeap.cpp
src/hotspot/share/gc/z/zCollectedHeap.hpp
src/hotspot/share/interpreter/interpreterRuntime.cpp
src/hotspot/share/jvmci/jvmciRuntime.cpp
src/hotspot/share/memory/filemap.cpp
src/hotspot/share/memory/filemap.hpp
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/memory/universe.hpp
src/hotspot/share/memory/virtualspace.cpp
src/hotspot/share/memory/virtualspace.hpp
src/hotspot/share/oops/compressedOops.cpp
src/hotspot/share/oops/compressedOops.hpp
src/hotspot/share/oops/compressedOops.inline.hpp
src/hotspot/share/oops/oop.cpp
src/hotspot/share/oops/oop.hpp
src/hotspot/share/oops/oop.inline.hpp
src/hotspot/share/oops/oopsHierarchy.hpp
src/hotspot/share/oops/symbol.cpp
src/hotspot/share/opto/machnode.cpp
src/hotspot/share/prims/jvmtiTagMap.cpp
src/hotspot/share/runtime/jniHandles.cpp
test/hotspot/gtest/gc/shared/test_collectedHeap.cpp
test/hotspot/jtreg/gc/g1/TestLargePageUseForHeap.java
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -3953,7 +3953,7 @@
     assert (UseCompressedOops, "should only be used for compressed oops");
     assert (Universe::heap() != NULL, "java heap should be initialized");
     assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
-    assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
+    assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
   }
 #endif
   int oop_index = oop_recorder()->find_index(obj);
@@ -3968,7 +3968,7 @@
   assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int index = oop_recorder()->find_index(k);
-  assert(! Universe::heap()->is_in_reserved(k), "should not be an oop");
+  assert(! Universe::heap()->is_in(k), "should not be an oop");
 
   InstructionMark im(this);
   RelocationHolder rspec = metadata_Relocation::spec(index);
@@ -4052,7 +4052,7 @@
 #ifdef ASSERT
     {
       ThreadInVMfromUnknown tiv;
-      assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
+      assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
     }
 #endif
     oop_index = oop_recorder()->find_index(obj);
@@ -4082,7 +4082,7 @@
   {
     ThreadInVMfromUnknown tiv;
     assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
-    assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+    assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
   }
 #endif
   int oop_index = oop_recorder()->find_index(obj);
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -409,7 +409,7 @@
 #ifdef ASSERT
     {
       ThreadInVMfromNative tiv(JavaThread::current());
-      assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
+      assert(Universe::heap()->is_in(JNIHandles::resolve(o)), "should be real oop");
     }
 #endif
     int oop_index = __ oop_recorder()->find_index(o);
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -990,7 +990,7 @@
   {
     ThreadInVMfromUnknown tiv;
     assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
-    assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+    assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
   }
 #endif
   int oop_index = oop_recorder()->find_index(obj);
--- a/src/hotspot/cpu/x86/relocInfo_x86.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/cpu/x86/relocInfo_x86.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -51,7 +51,7 @@
   } else if (which == Assembler::narrow_oop_operand) {
     address disp = Assembler::locate_operand(addr(), which);
     // both compressed oops and compressed classes look the same
-    if (Universe::heap()->is_in_reserved((oop)x)) {
+    if (CompressedOops::is_in((void*)x)) {
     if (verify_only) {
       guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
     } else {
--- a/src/hotspot/cpu/x86/x86_64.ad	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu Sep 05 08:26:49 2019 +0200
@@ -546,7 +546,7 @@
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
-    assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
+    assert(Universe::heap()->is_in((address)(intptr_t)d32), "should be real oop");
     assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)), "cannot embed broken oops in code");
   }
 #endif
@@ -573,7 +573,7 @@
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
-    assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
+    assert(Universe::heap()->is_in((address)d64), "should be real oop");
     assert(oopDesc::is_oop(cast_to_oop(d64)), "cannot embed broken oops in code");
   }
 #endif
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -239,7 +239,7 @@
 ciObject* ciObjectFactory::get(oop key) {
   ASSERT_IN_VM;
 
-  assert(Universe::heap()->is_in_reserved(key), "must be");
+  assert(Universe::heap()->is_in(key), "must be");
 
   NonPermObject* &bucket = find_non_perm(key);
   if (bucket != NULL) {
@@ -252,7 +252,7 @@
   ciObject* new_object = create_new_object(keyHandle());
   assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded");
   init_ident_of(new_object);
-  assert(Universe::heap()->is_in_reserved(new_object->get_oop()), "must be");
+  assert(Universe::heap()->is_in(new_object->get_oop()), "must be");
 
   // Not a perm-space object.
   insert_non_perm(bucket, keyHandle(), new_object);
@@ -644,7 +644,7 @@
 // If there is no entry in the cache corresponding to this oop, return
 // the null tail of the bucket into which the oop should be inserted.
 ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
-  assert(Universe::heap()->is_in_reserved(key), "must be");
+  assert(Universe::heap()->is_in(key), "must be");
   ciMetadata* klass = get_metadata(key->klass());
   NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
   for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
@@ -672,7 +672,7 @@
 //
 // Insert a ciObject into the non-perm table.
 void ciObjectFactory::insert_non_perm(ciObjectFactory::NonPermObject* &where, oop key, ciObject* obj) {
-  assert(Universe::heap()->is_in_reserved_or_null(key), "must be");
+  assert(Universe::heap()->is_in_or_null(key), "must be");
   assert(&where != &emptyBucket, "must not try to fill empty bucket");
   NonPermObject* p = new (arena()) NonPermObject(where, key, obj);
   assert(where == p && is_equal(p, key) && p->object() == obj, "entry must match");
--- a/src/hotspot/share/code/debugInfo.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/code/debugInfo.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -225,7 +225,7 @@
     // thread is already in VM state.
     ThreadInVMfromUnknown tiv;
     assert(JNIHandles::resolve(value()) == NULL ||
-           Universe::heap()->is_in_reserved(JNIHandles::resolve(value())),
+           Universe::heap()->is_in(JNIHandles::resolve(value())),
            "Should be in heap");
  }
 #endif
@@ -246,7 +246,7 @@
 ConstantOopReadValue::ConstantOopReadValue(DebugInfoReadStream* stream) {
   _value = Handle(Thread::current(), stream->read_oop());
   assert(_value() == NULL ||
-         Universe::heap()->is_in_reserved(_value()), "Should be in heap");
+         Universe::heap()->is_in(_value()), "Should be in heap");
 }
 
 void ConstantOopReadValue::write_on(DebugInfoWriteStream* stream) {
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -135,6 +135,10 @@
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
   void collect_mostly_concurrent(GCCause::Cause cause);
+
+  // CMS forwards some non-heap value into the mark oop to reserve oops during
+  // promotion, so we can't assert about obj alignment or that the forwardee is in heap
+  virtual void check_oop_location(void* addr) const {}
 };
 
 #endif // SHARE_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -70,7 +70,7 @@
 JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
   if (UseConcMarkSweepGC) {
     // ParGCCardsPerStrideChunk should be compared with card table size.
-    size_t heap_size = Universe::heap()->reserved_region().word_size();
+    size_t heap_size = CMSHeap::heap()->reserved_region().word_size();
     CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
     size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
 
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -39,13 +39,13 @@
   size_t max_byte_size  = align_up(MaxHeapSize, align);
 
   // Initialize backing storage
-  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
+  ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
   _virtual_space.initialize(heap_rs, init_byte_size);
 
   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
 
-  initialize_reserved_region(reserved_region.start(), reserved_region.end());
+  initialize_reserved_region(heap_rs);
 
   _space = new ContiguousSpace();
   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -136,6 +136,9 @@
     return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
   }
 
+  MemRegion reserved_region() const { return _reserved; }
+  bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
+
   virtual void print_on(outputStream* st) const;
   virtual void print_tracing_info() const;
   virtual bool print_location(outputStream* st, void* addr) const;
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -99,11 +99,11 @@
 
   _archive_check_enabled = true;
   size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
-  _closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
-                                        (HeapWord*)Universe::heap()->base() + length,
+  _closed_archive_region_map.initialize(G1CollectedHeap::heap()->base(),
+                                        G1CollectedHeap::heap()->base() + length,
                                         HeapRegion::GrainBytes);
-  _open_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
-                                      (HeapWord*)Universe::heap()->base() + length,
+  _open_archive_region_map.initialize(G1CollectedHeap::heap()->base(),
+                                      G1CollectedHeap::heap()->base() + length,
                                       HeapRegion::GrainBytes);
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -1666,13 +1666,13 @@
   // If this happens then we could end up using a non-optimal
   // compressed oops mode.
 
-  ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
-                                                 HeapAlignment);
-
-  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
+  ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
+                                                     HeapAlignment);
+
+  initialize_reserved_region(heap_rs);
 
   // Create the barrier set for the entire reserved region.
-  G1CardTable* ct = new G1CardTable(reserved_region());
+  G1CardTable* ct = new G1CardTable(heap_rs.region());
   ct->initialize();
   G1BarrierSet* bs = new G1BarrierSet(ct);
   bs->initialize();
@@ -1742,6 +1742,7 @@
 
   _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
   _card_table->initialize(cardtable_storage);
+
   // Do later initialization work for concurrent refinement.
   _hot_card_cache->initialize(card_counts_storage);
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -1129,6 +1129,18 @@
     return _hrm->reserved();
   }
 
+  MemRegion reserved_region() const {
+    return _reserved;
+  }
+
+  HeapWord* base() const {
+    return _reserved.start();
+  }
+
+  bool is_in_reserved(const void* addr) const {
+    return _reserved.contains(addr);
+  }
+
   G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
 
   G1CardTable* card_table() const {
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -86,7 +86,7 @@
   }
 
   // Forwarded, just update.
-  assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
+  assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space");
   RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
 }
 
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -115,8 +115,7 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   // can't do because of races
   // assert(oopDesc::is_oop_or_null(obj), "expected an oop");
-  assert(check_obj_alignment(obj), "not oop aligned");
-  assert(g1h->is_in_reserved(obj), "must be in heap");
+  g1h->check_oop_location(obj);
 
   HeapRegion* from = g1h->heap_region_containing(p);
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -63,7 +63,7 @@
 jint ParallelScavengeHeap::initialize() {
   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 
-  ReservedSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
+  ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 
   os::trace_page_sizes("Heap",
                        MinHeapSize,
@@ -72,9 +72,9 @@
                        heap_rs.base(),
                        heap_rs.size());
 
-  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region(heap_rs);
 
-  PSCardTable* card_table = new PSCardTable(reserved_region());
+  PSCardTable* card_table = new PSCardTable(heap_rs.region());
   card_table->initialize();
   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
   barrier_set->initialize();
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -168,6 +168,9 @@
   bool is_in_young(oop p);  // reserved part
   bool is_in_old(oop p);    // reserved part
 
+  MemRegion reserved_region() const { return _reserved; }
+  HeapWord* base() const { return _reserved.start(); }
+
   // Memory allocation.   "gc_time_limit_was_exceeded" will
   // be set to true if the adaptive size policy determine that
   // an excessive amount of time is being spent doing collections
--- a/src/hotspot/share/gc/serial/markSweep.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/serial/markSweep.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -127,7 +127,7 @@
 void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
 
 template <class T> inline void MarkSweep::follow_root(T* p) {
-  assert(!Universe::heap()->is_in_reserved(p),
+  assert(!Universe::heap()->is_in(p),
          "roots shouldn't be things within the heap");
   T heap_oop = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(heap_oop)) {
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -87,8 +87,7 @@
            "should be forwarded");
 
     if (new_obj != NULL) {
-      assert(Universe::heap()->is_in_reserved(new_obj),
-             "should be in object space");
+      DEBUG_ONLY(Universe::heap()->check_oop_location((HeapWord*)new_obj);)
       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
     }
   }
--- a/src/hotspot/share/gc/shared/blockOffsetTable.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -244,10 +244,10 @@
 BlockOffsetArray::do_block_internal(HeapWord* blk_start,
                                     HeapWord* blk_end,
                                     Action action, bool reducing) {
-  assert(Universe::heap()->is_in_reserved(blk_start),
-         "reference must be into the heap");
-  assert(Universe::heap()->is_in_reserved(blk_end-1),
-         "limit must be within the heap");
+  assert(_sp->is_in_reserved(blk_start),
+         "reference must be into the space");
+  assert(_sp->is_in_reserved(blk_end-1),
+         "limit must be within the space");
   // This is optimized to make the test fast, assuming we only rarely
   // cross boundaries.
   uintptr_t end_ui = (uintptr_t)(blk_end - 1);
@@ -718,10 +718,10 @@
          "blk_start should be at or before threshold");
   assert(pointer_delta(_next_offset_threshold, blk_start) <= BOTConstants::N_words,
          "offset should be <= BlockOffsetSharedArray::N");
-  assert(Universe::heap()->is_in_reserved(blk_start),
-         "reference must be into the heap");
-  assert(Universe::heap()->is_in_reserved(blk_end-1),
-         "limit must be within the heap");
+  assert(_sp->is_in_reserved(blk_start),
+         "reference must be into the space");
+  assert(_sp->is_in_reserved(blk_end-1),
+         "limit must be within the space");
   assert(_next_offset_threshold ==
          _array->_reserved.start() + _next_offset_index*BOTConstants::N_words,
          "index must agree with threshold");
@@ -775,8 +775,6 @@
 }
 
 HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() {
-  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
-         "just checking");
   _next_offset_index = _array->index_for(_bottom);
   _next_offset_index++;
   _next_offset_threshold =
@@ -785,8 +783,6 @@
 }
 
 void BlockOffsetArrayContigSpace::zero_bottom_entry() {
-  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
-         "just checking");
   size_t bottom_index = _array->index_for(_bottom);
   _array->set_offset_array(bottom_index, 0);
 }
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -91,7 +91,7 @@
   size_t capacity_in_words = capacity() / HeapWordSize;
 
   return VirtualSpaceSummary(
-    reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
+    _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
 }
 
 GCHeapSummary CollectedHeap::create_heap_summary() {
@@ -178,11 +178,11 @@
     return false;
   }
 
-  if (!is_in_reserved(object)) {
+  if (!is_in(object)) {
     return false;
   }
 
-  if (is_in_reserved(object->klass_or_null())) {
+  if (is_in(object->klass_or_null())) {
     return false;
   }
 
@@ -343,6 +343,11 @@
 }
 #endif // PRODUCT
 
+void CollectedHeap::check_oop_location(void* addr) const {
+  assert(check_obj_alignment(addr), "address is not aligned");
+  assert(_reserved.contains(addr),  "address is not in reserved heap");
+}
+
 size_t CollectedHeap::max_tlab_size() const {
   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
   // This restriction could be removed by enabling filling with multiple arrays.
@@ -371,8 +376,8 @@
 {
   assert(words >= min_fill_size(), "too small to fill");
   assert(is_object_aligned(words), "unaligned size");
-  assert(Universe::heap()->is_in_reserved(start), "not in heap");
-  assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
+  DEBUG_ONLY(Universe::heap()->check_oop_location(start);)
+  DEBUG_ONLY(Universe::heap()->check_oop_location(start + words - MinObjAlignment);)
 }
 
 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
@@ -516,12 +521,12 @@
   full_gc_dump(timer, false);
 }
 
-void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
+void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
   // It is important to do this in a way such that concurrent readers can't
   // temporarily think something is in the heap.  (Seen this happen in asserts.)
   _reserved.set_word_size(0);
-  _reserved.set_start(start);
-  _reserved.set_end(end);
+  _reserved.set_start((HeapWord*)rs.base());
+  _reserved.set_end((HeapWord*)rs.end());
 }
 
 void CollectedHeap::post_initialize() {
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -51,6 +51,7 @@
 class GCMemoryManager;
 class MemoryPool;
 class MetaspaceSummary;
+class ReservedHeapSpace;
 class SoftRefPolicy;
 class Thread;
 class ThreadClosure;
@@ -102,9 +103,10 @@
  private:
   GCHeapLog* _gc_heap_log;
 
+ protected:
+  // Not used by all GCs
   MemRegion _reserved;
 
- protected:
   bool _is_gc_active;
 
   // Used for filler objects (static, but initialized in ctor).
@@ -203,9 +205,7 @@
   virtual void safepoint_synchronize_begin() {}
   virtual void safepoint_synchronize_end() {}
 
-  void initialize_reserved_region(HeapWord *start, HeapWord *end);
-  MemRegion reserved_region() const { return _reserved; }
-  address base() const { return (address)reserved_region().start(); }
+  void initialize_reserved_region(const ReservedHeapSpace& rs);
 
   virtual size_t capacity() const = 0;
   virtual size_t used() const = 0;
@@ -226,15 +226,6 @@
   // spaces).
   virtual size_t max_capacity() const = 0;
 
-  // Returns "TRUE" if "p" points into the reserved area of the heap.
-  bool is_in_reserved(const void* p) const {
-    return _reserved.contains(p);
-  }
-
-  bool is_in_reserved_or_null(const void* p) const {
-    return p == NULL || is_in_reserved(p);
-  }
-
   // Returns "TRUE" iff "p" points into the committed areas of the heap.
   // This method can be expensive so avoid using it in performance critical
   // code.
@@ -242,6 +233,11 @@
 
   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
 
+  // This function verifies that "addr" is a valid oop location, w.r.t. heap
+  // datastructures such as bitmaps and virtual memory address. It does *not*
+  // check if the location is within committed heap memory.
+  virtual void check_oop_location(void* addr) const;
+
   virtual uint32_t hash_oop(oop obj) const;
 
   void set_gc_cause(GCCause::Cause v) {
--- a/src/hotspot/share/gc/shared/gcVMOperations.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -163,7 +163,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, _gc_cause);
   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
-  assert(gch->is_in_reserved_or_null(_result), "result not in heap");
+  assert(_result == NULL || gch->is_in_reserved(_result), "result not in heap");
 
   if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
     set_gc_locked();
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -105,10 +105,7 @@
 
   // Allocate space for the heap.
 
-  char* heap_address;
-  ReservedSpace heap_rs;
-
-  heap_address = allocate(HeapAlignment, &heap_rs);
+  ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 
   if (!heap_rs.is_reserved()) {
     vm_shutdown_during_initialization(
@@ -116,9 +113,9 @@
     return JNI_ENOMEM;
   }
 
-  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region(heap_rs);
 
-  _rem_set = create_rem_set(reserved_region());
+  _rem_set = create_rem_set(heap_rs.region());
   _rem_set->initialize();
   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
   bs->initialize();
@@ -126,9 +123,9 @@
 
   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
   _young_gen = _young_gen_spec->init(young_rs, rem_set());
-  heap_rs = heap_rs.last_part(_young_gen_spec->max_size());
+  ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 
-  ReservedSpace old_rs = heap_rs.first_part(_old_gen_spec->max_size(), false, false);
+  old_rs = old_rs.first_part(_old_gen_spec->max_size(), false, false);
   _old_gen = _old_gen_spec->init(old_rs, rem_set());
   clear_incremental_collection_failed();
 
@@ -150,8 +147,7 @@
                                         GCTimeRatio);
 }
 
-char* GenCollectedHeap::allocate(size_t alignment,
-                                 ReservedSpace* heap_rs){
+ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
   // Now figure out the total size.
   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
   assert(alignment % pageSize == 0, "Must be");
@@ -166,16 +162,16 @@
          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
          SIZE_FORMAT, total_reserved, alignment);
 
-  *heap_rs = Universe::reserve_heap(total_reserved, alignment);
+  ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 
   os::trace_page_sizes("Heap",
                        MinHeapSize,
                        total_reserved,
                        alignment,
-                       heap_rs->base(),
-                       heap_rs->size());
+                       heap_rs.base(),
+                       heap_rs.size());
 
-  return heap_rs->base();
+  return heap_rs;
 }
 
 class GenIsScavengable : public BoolObjectClosure {
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -96,7 +96,7 @@
                           bool restore_marks_for_biased_locking);
 
   // Reserve aligned space for the heap as needed by the contained generations.
-  char* allocate(size_t alignment, ReservedSpace* heap_rs);
+  ReservedHeapSpace allocate(size_t alignment);
 
   // Initialize ("weak") refs processing support
   void ref_processing_init();
@@ -180,6 +180,9 @@
   bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
   bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
 
+  MemRegion reserved_region() const { return _reserved; }
+  bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
+
   GenerationSpec* young_gen_spec() const;
   GenerationSpec* old_gen_spec() const;
 
--- a/src/hotspot/share/gc/shared/markBitMap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/markBitMap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -62,7 +62,7 @@
 
 #ifdef ASSERT
 void MarkBitMap::check_mark(HeapWord* addr) {
-  assert(Universe::heap()->is_in_reserved(addr),
+  assert(Universe::heap()->is_in(addr),
          "Trying to access bitmap " PTR_FORMAT " for address " PTR_FORMAT " not in the heap.",
          p2i(this), p2i(addr));
 }
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -269,7 +269,7 @@
 
   _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered);
   _referent = java_lang_ref_Reference::referent(_current_discovered);
-  assert(Universe::heap()->is_in_reserved_or_null(_referent),
+  assert(Universe::heap()->is_in_or_null(_referent),
          "Wrong oop found in java.lang.Reference object");
   assert(allow_null_referent ?
              oopDesc::is_oop_or_null(_referent)
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -599,7 +599,7 @@
     while (*entry != NULL) {
       typeArrayOop value = (*entry)->obj();
       guarantee(value != NULL, "Object must not be NULL");
-      guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
+      guarantee(Universe::heap()->is_in(value), "Object must be on the heap");
       guarantee(!value->is_forwarded(), "Object must not be forwarded");
       guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
       bool latin1 = (*entry)->latin1();
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -179,8 +179,8 @@
   // Reserve and commit memory for heap
   //
 
-  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
-  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
+  ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
+  initialize_reserved_region(heap_rs);
   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
   _heap_region_special = heap_rs.special();
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -530,6 +530,9 @@
 
   bool is_in(const void* p) const;
 
+  MemRegion reserved_region() const { return _reserved; }
+  bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
+
   void collect(GCCause::Cause cause);
   void do_full_collection(bool clear_all_soft_refs);
 
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -67,8 +67,8 @@
     return JNI_ENOMEM;
   }
 
-  initialize_reserved_region((HeapWord*)ZAddressReservedStart,
-                             (HeapWord*)ZAddressReservedEnd);
+  Universe::calculate_verify_data((HeapWord*)ZAddressReservedStart,
+                                  (HeapWord*)ZAddressReservedEnd);
 
   return JNI_OK;
 }
@@ -286,9 +286,10 @@
 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
   const size_t capacity_in_words = capacity() / HeapWordSize;
   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
-  return VirtualSpaceSummary(reserved_region().start(),
-                             reserved_region().start() + capacity_in_words,
-                             reserved_region().start() + max_capacity_in_words);
+  HeapWord* const heap_start = (HeapWord*)ZAddressReservedStart;
+  return VirtualSpaceSummary(heap_start,
+                             heap_start + capacity_in_words,
+                             heap_start + max_capacity_in_words);
 }
 
 void ZCollectedHeap::safepoint_synchronize_begin() {
@@ -366,3 +367,11 @@
 bool ZCollectedHeap::is_oop(oop object) const {
   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 }
+
+void ZCollectedHeap::check_oop_location(void* addr) const {
+  assert(check_obj_alignment(addr), "address is not aligned");
+
+  const uintptr_t addr_int = reinterpret_cast<uintptr_t>(addr);
+  assert(addr_int >= ZAddressSpaceStart, "address is outside of the heap");
+  assert(addr_int < ZAddressSpaceEnd,    "address is outside of the heap");
+}
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -126,6 +126,7 @@
   virtual void prepare_for_verify();
   virtual void verify(VerifyOption option /* ignored */);
   virtual bool is_oop(oop object) const;
+  virtual void check_oop_location(void* addr) const;
 };
 
 #endif // SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -769,10 +769,10 @@
     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
   }
   Handle h_obj(thread, elem->obj());
-  assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
+  assert(Universe::heap()->is_in_or_null(h_obj()),
          "must be NULL or an object");
   ObjectSynchronizer::enter(h_obj, elem->lock(), CHECK);
-  assert(Universe::heap()->is_in_reserved_or_null(elem->obj()),
+  assert(Universe::heap()->is_in_or_null(elem->obj()),
          "must be NULL or an object");
 #ifdef ASSERT
   thread->last_frame().interpreter_frame_verify_monitor(elem);
@@ -786,7 +786,7 @@
   thread->last_frame().interpreter_frame_verify_monitor(elem);
 #endif
   Handle h_obj(thread, elem->obj());
-  assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
+  assert(Universe::heap()->is_in_or_null(h_obj()),
          "must be NULL or an object");
   if (elem == NULL || h_obj()->is_unlocked()) {
     THROW(vmSymbols::java_lang_IllegalMonitorStateException());
@@ -853,10 +853,10 @@
     Symbol* signature = call.signature();
     receiver = Handle(thread, last_frame.callee_receiver(signature));
 
-    assert(Universe::heap()->is_in_reserved_or_null(receiver()),
+    assert(Universe::heap()->is_in_or_null(receiver()),
            "sanity check");
     assert(receiver.is_null() ||
-           !Universe::heap()->is_in_reserved(receiver->klass()),
+           !Universe::heap()->is_in(receiver->klass()),
            "sanity check");
   }
 
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -858,17 +858,6 @@
 
 // private void CompilerToVM.registerNatives()
 JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass))
-
-#ifdef _LP64
-#ifndef TARGET_ARCH_sparc
-  uintptr_t heap_end = (uintptr_t) Universe::heap()->reserved_region().end();
-  uintptr_t allocation_end = heap_end + ((uintptr_t)16) * 1024 * 1024 * 1024;
-  guarantee(heap_end < allocation_end, "heap end too close to end of address space (might lead to erroneous TLAB allocations)");
-#endif // TARGET_ARCH_sparc
-#else
-  fatal("check TLAB allocation code for address space conflicts");
-#endif
-
   JNI_JVMCIENV(thread, env);
 
   if (!EnableJVMCI) {
--- a/src/hotspot/share/memory/filemap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/memory/filemap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -231,7 +231,7 @@
   _narrow_klass_shift = CompressedKlassPointers::shift();
   _shared_path_table = mapinfo->_shared_path_table;
   if (HeapShared::is_heap_object_archiving_allowed()) {
-    _heap_reserved = Universe::heap()->reserved_region();
+    _heap_end = CompressedOops::end();
   }
 
   // The following fields are for sanity checks for whether this archive
@@ -1500,8 +1500,6 @@
     // referenced objects are replaced. See HeapShared::initialize_from_archived_subgraph().
   }
 
-  MemRegion heap_reserved = Universe::heap()->reserved_region();
-
   log_info(cds)("CDS archive was created with max heap size = " SIZE_FORMAT "M, and the following configuration:",
                 max_heap_size()/M);
   log_info(cds)("    narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
@@ -1510,7 +1508,7 @@
                 narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
 
   log_info(cds)("The current max heap size = " SIZE_FORMAT "M, HeapRegion::GrainBytes = " SIZE_FORMAT,
-                heap_reserved.byte_size()/M, HeapRegion::GrainBytes);
+                MaxHeapSize/M, HeapRegion::GrainBytes);
   log_info(cds)("    narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
                 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
   log_info(cds)("    narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
@@ -1529,10 +1527,10 @@
     _heap_pointers_need_patching = true;
   } else {
     MemRegion range = get_heap_regions_range_with_current_oop_encoding_mode();
-    if (!heap_reserved.contains(range)) {
+    if (!CompressedOops::is_in(range)) {
       log_info(cds)("CDS heap data need to be relocated because");
       log_info(cds)("the desired range " PTR_FORMAT " - "  PTR_FORMAT, p2i(range.start()), p2i(range.end()));
-      log_info(cds)("is outside of the heap " PTR_FORMAT " - "  PTR_FORMAT, p2i(heap_reserved.start()), p2i(heap_reserved.end()));
+      log_info(cds)("is outside of the heap " PTR_FORMAT " - "  PTR_FORMAT, p2i(CompressedOops::begin()), p2i(CompressedOops::end()));
       _heap_pointers_need_patching = true;
     }
   }
@@ -1548,8 +1546,8 @@
     // At run time, they may not be inside the heap, so we move them so
     // that they are now near the top of the runtime time. This can be done by
     // the simple math of adding the delta as shown above.
-    address dumptime_heap_end = (address)_header->_heap_reserved.end();
-    address runtime_heap_end = (address)heap_reserved.end();
+    address dumptime_heap_end = (address)_header->_heap_end;
+    address runtime_heap_end = (address)CompressedOops::end();
     delta = runtime_heap_end - dumptime_heap_end;
   }
 
--- a/src/hotspot/share/memory/filemap.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/memory/filemap.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -142,7 +142,7 @@
   size_t  _cds_i2i_entry_code_buffers_size;
   size_t  _core_spaces_size;        // number of bytes allocated by the core spaces
                                     // (mc, md, ro, rw and od).
-  MemRegion _heap_reserved;         // reserved region for the entire heap at dump time.
+  address _heap_end;                // heap end at dump time.
   bool _base_archive_is_default;    // indicates if the base archive is the system default one
 
   // The following fields are all sanity checks for whether this archive
--- a/src/hotspot/share/memory/metaspace.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/memory/metaspace.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -1268,7 +1268,7 @@
   {
 #ifdef _LP64
     if (using_class_space()) {
-      char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
+      char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
       allocate_metaspace_compressed_klass_ptrs(base, 0);
     }
 #endif // _LP64
--- a/src/hotspot/share/memory/universe.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -666,8 +666,6 @@
     return status;
   }
 
-  CompressedOops::initialize();
-
   Universe::initialize_tlab();
 
   Metaspace::global_initialize();
@@ -747,7 +745,7 @@
   }
 }
 
-ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
+ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 
   assert(alignment <= Arguments::conservative_max_heap_alignment(),
          "actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
@@ -770,16 +768,16 @@
            "must be exactly of required size and alignment");
     // We are good.
 
-    if (UseCompressedOops) {
-      // Universe::initialize_heap() will reset this to NULL if unscaled
-      // or zero-based narrow oops are actually used.
-      // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
-      CompressedOops::set_base((address)total_rs.compressed_oop_base());
-    }
-
     if (AllocateHeapAt != NULL) {
       log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
     }
+
+    if (UseCompressedOops) {
+      CompressedOops::initialize(total_rs);
+    }
+
+    Universe::calculate_verify_data((HeapWord*)total_rs.base(), (HeapWord*)total_rs.end());
+
     return total_rs;
   }
 
@@ -1171,14 +1169,10 @@
 // Oop verification (see MacroAssembler::verify_oop)
 
 uintptr_t Universe::verify_oop_mask() {
-  MemRegion m = heap()->reserved_region();
-  calculate_verify_data(m.start(), m.end());
   return _verify_oop_mask;
 }
 
 uintptr_t Universe::verify_oop_bits() {
-  MemRegion m = heap()->reserved_region();
-  calculate_verify_data(m.start(), m.end());
   return _verify_oop_bits;
 }
 
--- a/src/hotspot/share/memory/universe.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/memory/universe.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -212,9 +212,9 @@
   static uintptr_t _verify_oop_mask;
   static uintptr_t _verify_oop_bits;
 
+ public:
   static void calculate_verify_data(HeapWord* low_boundary, HeapWord* high_boundary) PRODUCT_RETURN;
 
- public:
   // Known classes in the VM
   static Klass* boolArrayKlassObj()                 { return typeArrayKlassObj(T_BOOLEAN); }
   static Klass* byteArrayKlassObj()                 { return typeArrayKlassObj(T_BYTE); }
@@ -326,7 +326,7 @@
   static CollectedHeap* heap() { return _collectedHeap; }
 
   // Reserve Java heap and determine CompressedOops mode
-  static ReservedSpace reserve_heap(size_t heap_size, size_t alignment);
+  static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment);
 
   // Historic gc information
   static size_t get_heap_free_at_last_gc()             { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
--- a/src/hotspot/share/memory/virtualspace.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/memory/virtualspace.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -637,6 +637,10 @@
   }
 }
 
+MemRegion ReservedHeapSpace::region() const {
+  return MemRegion((HeapWord*)base(), (HeapWord*)end());
+}
+
 // Reserve space for code segment.  Same as Java heap only we mark this as
 // executable.
 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
--- a/src/hotspot/share/memory/virtualspace.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/memory/virtualspace.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_MEMORY_VIRTUALSPACE_HPP
 #define SHARE_MEMORY_VIRTUALSPACE_HPP
 
+#include "memory/memRegion.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class outputStream;
@@ -122,7 +123,8 @@
   ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, const char* heap_allocation_directory = NULL);
   // Returns the base to be used for compression, i.e. so that null can be
   // encoded safely and implicit null checks can work.
-  char *compressed_oop_base() { return _base - _noaccess_prefix; }
+  char *compressed_oop_base() const { return _base - _noaccess_prefix; }
+  MemRegion region() const;
 };
 
 // Class encapsulating behavior specific memory space for Code
--- a/src/hotspot/share/oops/compressedOops.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/compressedOops.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -34,8 +34,7 @@
 
 // For UseCompressedOops.
 NarrowPtrStruct CompressedOops::_narrow_oop = { NULL, 0, true };
-
-address CompressedOops::_narrow_ptrs_base;
+MemRegion       CompressedOops::_heap_address_range;
 
 // Choose the heap base address and oop encoding mode
 // when compressed oops are used:
@@ -44,41 +43,43 @@
 // ZeroBased - Use zero based compressed oops with encoding when
 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 // HeapBased - Use compressed oops with heap base + encoding.
-void CompressedOops::initialize() {
+void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
 #ifdef _LP64
-  if (UseCompressedOops) {
-    // Subtract a page because something can get allocated at heap base.
-    // This also makes implicit null checking work, because the
-    // memory+1 page below heap_base needs to cause a signal.
-    // See needs_explicit_null_check.
-    // Only set the heap base for compressed oops because it indicates
-    // compressed oops for pstack code.
-    if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
-      // Didn't reserve heap below 4Gb.  Must shift.
-      set_shift(LogMinObjAlignmentInBytes);
-    }
-    if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
-      // Did reserve heap below 32Gb. Can use base == 0;
-      set_base(0);
-    }
-    AOTLoader::set_narrow_oop_shift();
+  // Subtract a page because something can get allocated at heap base.
+  // This also makes implicit null checking work, because the
+  // memory+1 page below heap_base needs to cause a signal.
+  // See needs_explicit_null_check.
+  // Only set the heap base for compressed oops because it indicates
+  // compressed oops for pstack code.
+  if ((uint64_t)heap_space.end() > UnscaledOopHeapMax) {
+    // Didn't reserve heap below 4Gb.  Must shift.
+    set_shift(LogMinObjAlignmentInBytes);
+  }
+  if ((uint64_t)heap_space.end() <= OopEncodingHeapMax) {
+    // Did reserve heap below 32Gb. Can use base == 0;
+    set_base(0);
+  } else {
+    set_base((address)heap_space.compressed_oop_base());
+  }
 
-    set_ptrs_base(base());
+  AOTLoader::set_narrow_oop_shift();
+
+  _heap_address_range = heap_space.region();
 
-    LogTarget(Info, gc, heap, coops) lt;
-    if (lt.is_enabled()) {
-      ResourceMark rm;
-      LogStream ls(lt);
-      print_mode(&ls);
-    }
+  LogTarget(Info, gc, heap, coops) lt;
+  if (lt.is_enabled()) {
+    ResourceMark rm;
+    LogStream ls(lt);
+    print_mode(&ls);
+  }
 
-    // Tell tests in which mode we run.
-    Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
-                                                   mode_to_string(mode()),
-                                                   false));
-  }
+  // Tell tests in which mode we run.
+  Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
+                                                 mode_to_string(mode()),
+                                                 false));
+
   // base() is one page below the heap.
-  assert((intptr_t)base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size()) ||
+  assert((intptr_t)base() <= (intptr_t)(_heap_address_range.start() - os::vm_page_size()) ||
          base() == NULL, "invalid value");
   assert(shift() == LogMinObjAlignmentInBytes ||
          shift() == 0, "invalid value");
@@ -99,8 +100,12 @@
   _narrow_oop._use_implicit_null_checks   = use;
 }
 
-void CompressedOops::set_ptrs_base(address addr) {
-  _narrow_ptrs_base = addr;
+bool CompressedOops::is_in(void* addr) {
+  return _heap_address_range.contains(addr);
+}
+
+bool CompressedOops::is_in(MemRegion mr) {
+  return _heap_address_range.contains(mr);
 }
 
 CompressedOops::Mode CompressedOops::mode() {
@@ -155,7 +160,7 @@
 
 void CompressedOops::print_mode(outputStream* st) {
   st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
-            p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
+            p2i(_heap_address_range.start()), _heap_address_range.byte_size()/M);
 
   st->print(", Compressed Oops mode: %s", mode_to_string(mode()));
 
--- a/src/hotspot/share/oops/compressedOops.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/compressedOops.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -26,10 +26,12 @@
 #define SHARE_OOPS_COMPRESSEDOOPS_HPP
 
 #include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class outputStream;
+class ReservedHeapSpace;
 
 struct NarrowPtrStruct {
   // Base address for oop-within-java-object materialization.
@@ -49,7 +51,8 @@
   // For UseCompressedOops.
   static NarrowPtrStruct _narrow_oop;
 
-  static address _narrow_ptrs_base;
+  // The address range of the heap
+  static MemRegion _heap_address_range;
 
 public:
   // For UseCompressedOops
@@ -73,21 +76,24 @@
     AnyNarrowOopMode = 4
   };
 
-  static void initialize();
+  static void initialize(const ReservedHeapSpace& heap_space);
 
   static void set_base(address base);
   static void set_shift(int shift);
   static void set_use_implicit_null_checks(bool use);
 
-  static void set_ptrs_base(address addr);
-
-  static address  base()                     { return  _narrow_oop._base; }
+  static address  base()                     { return _narrow_oop._base; }
+  static address  begin()                    { return (address)_heap_address_range.start(); }
+  static address  end()                      { return (address)_heap_address_range.end(); }
   static bool     is_base(void* addr)        { return (base() == (address)addr); }
-  static int      shift()                    { return  _narrow_oop._shift; }
-  static bool     use_implicit_null_checks() { return  _narrow_oop._use_implicit_null_checks; }
+  static int      shift()                    { return _narrow_oop._shift; }
+  static bool     use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
 
-  static address* ptrs_base_addr()           { return &_narrow_ptrs_base; }
-  static address  ptrs_base()                { return _narrow_ptrs_base; }
+  static address* ptrs_base_addr()           { return &_narrow_oop._base; }
+  static address  ptrs_base()                { return _narrow_oop._base; }
+
+  static bool is_in(void* addr);
+  static bool is_in(MemRegion mr);
 
   static Mode mode();
   static const char* mode_to_string(Mode mode);
--- a/src/hotspot/share/oops/compressedOops.inline.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/compressedOops.inline.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -57,8 +57,7 @@
 
 inline narrowOop CompressedOops::encode_not_null(oop v) {
   assert(!is_null(v), "oop value can never be zero");
-  assert(check_obj_alignment(v), "Address not aligned");
-  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
+  DEBUG_ONLY(Universe::heap()->check_oop_location(v);)
   uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base(), 1));
   assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
   uint64_t result = pd >> shift();
--- a/src/hotspot/share/oops/oop.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/oop.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -35,6 +35,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
+#include "utilities/macros.hpp"
 
 bool always_do_update_barrier = false;
 
@@ -123,14 +124,6 @@
   return obj == NULL ? true : is_oop(obj, ignore_mark_word);
 }
 
-#ifndef PRODUCT
-#if INCLUDE_CDS_JAVA_HEAP
-bool oopDesc::is_archived_object(oop p) {
-  return HeapShared::is_archived_object(p);
-}
-#endif
-#endif // PRODUCT
-
 VerifyOopClosure VerifyOopClosure::verify_oop;
 
 template <class T> void VerifyOopClosure::do_oop_work(T* p) {
@@ -215,3 +208,13 @@
 
 jdouble oopDesc::double_field_acquire(int offset) const               { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
 void oopDesc::release_double_field_put(int offset, jdouble value)     { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
+
+#ifdef ASSERT
+void oopDesc::verify_forwardee(oop forwardee) {
+  Universe::heap()->check_oop_location(forwardee);
+#if INCLUDE_CDS_JAVA_HEAP
+  assert(!HeapShared::is_archived_object(forwardee) && !HeapShared::is_archived_object(this),
+         "forwarding archive object");
+#endif
+}
+#endif
--- a/src/hotspot/share/oops/oop.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/oop.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -256,9 +256,6 @@
   // asserts and guarantees
   static bool is_oop(oop obj, bool ignore_mark_word = false);
   static bool is_oop_or_null(oop obj, bool ignore_mark_word = false);
-#ifndef PRODUCT
-  static bool is_archived_object(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false);
-#endif
 
   // garbage collection
   inline bool is_gc_marked() const;
@@ -266,6 +263,8 @@
   // Forward pointer operations for scavenge
   inline bool is_forwarded() const;
 
+  void verify_forwardee(oop forwardee) NOT_DEBUG_RETURN;
+
   inline void forward_to(oop p);
   inline bool cas_forward_to(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);
 
--- a/src/hotspot/share/oops/oop.inline.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/oop.inline.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -350,13 +350,7 @@
 
 // Used by scavengers
 void oopDesc::forward_to(oop p) {
-  assert(check_obj_alignment(p),
-         "forwarding to something not aligned");
-  assert(Universe::heap()->is_in_reserved(p),
-         "forwarding to something not in heap");
-  assert(!is_archived_object(oop(this)) &&
-         !is_archived_object(p),
-         "forwarding archive object");
+  verify_forwardee(p);
   markWord m = markWord::encode_pointer_as_mark(p);
   assert(m.decode_pointer() == p, "encoding must be reversable");
   set_mark_raw(m);
@@ -364,22 +358,14 @@
 
 // Used by parallel scavengers
 bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
-  assert(check_obj_alignment(p),
-         "forwarding to something not aligned");
-  assert(Universe::heap()->is_in_reserved(p),
-         "forwarding to something not in heap");
+  verify_forwardee(p);
   markWord m = markWord::encode_pointer_as_mark(p);
   assert(m.decode_pointer() == p, "encoding must be reversable");
   return cas_set_mark_raw(m, compare, order) == compare;
 }
 
 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
-  // CMS forwards some non-heap value into the mark oop to reserve oops during
-  // promotion, so the next two asserts do not hold.
-  assert(UseConcMarkSweepGC || check_obj_alignment(p),
-         "forwarding to something not aligned");
-  assert(UseConcMarkSweepGC || Universe::heap()->is_in_reserved(p),
-         "forwarding to something not in heap");
+  verify_forwardee(p);
   markWord m = markWord::encode_pointer_as_mark(p);
   assert(m.decode_pointer() == p, "encoding must be reversable");
   markWord old_mark = cas_set_mark_raw(m, compare, order);
--- a/src/hotspot/share/oops/oopsHierarchy.hpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp	Thu Sep 05 08:26:49 2019 +0200
@@ -190,8 +190,8 @@
   return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
 }
 
-inline bool check_obj_alignment(oop obj) {
-  return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
+inline bool check_obj_alignment(void* ptr) {
+  return (uintptr_t(ptr) & MinObjAlignmentInBytesMask) == 0;
 }
 
 // The metadata hierarchy is separate from the oop hierarchy
--- a/src/hotspot/share/oops/symbol.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/oops/symbol.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -394,7 +394,7 @@
   if (!os::is_readable_range(s, s + 1)) return false;
 
   // Symbols are not allocated in Java heap.
-  if (Universe::heap()->is_in_reserved(s)) return false;
+  if (Universe::heap()->is_in(s)) return false;
 
   int len = s->utf8_length();
   if (len < 0) return false;
--- a/src/hotspot/share/opto/machnode.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/opto/machnode.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -348,7 +348,7 @@
       return TypePtr::BOTTOM;
     }
     // %%% make offset be intptr_t
-    assert(!Universe::heap()->is_in_reserved(cast_to_oop(offset)), "must be a raw ptr");
+    assert(!Universe::heap()->is_in(cast_to_oop(offset)), "must be a raw ptr");
     return TypeRawPtr::BOTTOM;
   }
 
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -1545,7 +1545,7 @@
         // SATB marking similar to other j.l.ref.Reference referents. This is
         // achieved by using a phantom load in the object() accessor.
         oop o = entry->object();
-        assert(o != NULL && Universe::heap()->is_in_reserved(o), "sanity check");
+        assert(o != NULL && Universe::heap()->is_in(o), "sanity check");
         jobject ref = JNIHandles::make_local(JavaThread::current(), o);
         _object_results->append(ref);
         _tag_results->append((uint64_t)entry->tag());
@@ -2572,7 +2572,7 @@
       return;
     }
 
-    assert(Universe::heap()->is_in_reserved(o), "should be impossible");
+    assert(Universe::heap()->is_in(o), "should be impossible");
 
     jvmtiHeapReferenceKind kind = root_kind();
     if (kind == JVMTI_HEAP_REFERENCE_SYSTEM_CLASS) {
@@ -2964,7 +2964,7 @@
       oop fld_o = o->obj_field(field->field_offset());
       // ignore any objects that aren't visible to profiler
       if (fld_o != NULL) {
-        assert(Universe::heap()->is_in_reserved(fld_o), "unsafe code should not "
+        assert(Universe::heap()->is_in(fld_o), "unsafe code should not "
                "have references to Klass* anymore");
         int slot = field->field_index();
         if (!CallbackInvoker::report_field_reference(o, fld_o, slot)) {
--- a/src/hotspot/share/runtime/jniHandles.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/src/hotspot/share/runtime/jniHandles.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -483,7 +483,7 @@
 
 
 jobject JNIHandleBlock::allocate_handle(oop obj) {
-  assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+  assert(Universe::heap()->is_in(obj), "sanity check");
   if (_top == 0) {
     // This is the first allocation or the initial block got zapped when
     // entering a native function. If we have any following blocks they are
--- a/test/hotspot/gtest/gc/shared/test_collectedHeap.cpp	Thu Sep 05 08:26:44 2019 +0200
+++ b/test/hotspot/gtest/gc/shared/test_collectedHeap.cpp	Thu Sep 05 08:26:49 2019 +0200
@@ -30,25 +30,12 @@
   CollectedHeap* heap = Universe::heap();
 
   uintptr_t epsilon = (uintptr_t) MinObjAlignment;
-  uintptr_t heap_start = (uintptr_t) heap->reserved_region().start();
-  uintptr_t heap_end = (uintptr_t) heap->reserved_region().end();
+  uintptr_t outside_heap = (uintptr_t) &epsilon;
 
   // Test that NULL is not in the heap.
   ASSERT_FALSE(heap->is_in(NULL)) << "NULL is unexpectedly in the heap";
 
-  // Test that a pointer to before the heap start is reported as outside the heap.
-  ASSERT_GE(heap_start, ((uintptr_t) NULL + epsilon))
-          << "Sanity check - heap should not start at 0";
-
-  void* before_heap = (void*) (heap_start - epsilon);
-  ASSERT_FALSE(heap->is_in(before_heap)) << "before_heap: " << p2i(before_heap)
-          << " is unexpectedly in the heap";
-
-  // Test that a pointer to after the heap end is reported as outside the heap.
-  ASSERT_LE(heap_end, ((uintptr_t)-1 - epsilon))
-          << "Sanity check - heap should not end at the end of address space";
-
-  void* after_heap = (void*) (heap_end + epsilon);
-  ASSERT_FALSE(heap->is_in(after_heap)) << "after_heap: " << p2i(after_heap)
+  // Test that a pointer to outside the heap start is reported as outside the heap.
+  ASSERT_FALSE(heap->is_in((void*)outside_heap)) << "outside_heap: " << outside_heap
           << " is unexpectedly in the heap";
 }
--- a/test/hotspot/jtreg/gc/g1/TestLargePageUseForHeap.java	Thu Sep 05 08:26:44 2019 +0200
+++ b/test/hotspot/jtreg/gc/g1/TestLargePageUseForHeap.java	Thu Sep 05 08:26:49 2019 +0200
@@ -67,7 +67,9 @@
         String errorStr = "Reserve regular memory without large pages";
         String heapPattern = ".*Heap: ";
         // If errorStr is printed just before heap page log, reservation for Java Heap is failed.
-        String result = output.firstMatch(errorStr + "\n" + heapPattern);
+        String result = output.firstMatch(errorStr + "\n" +
+                                          "(?:.*Heap address: .*\n)?" // Heap address: 0x00000000f8000000, size: 128 MB, Compressed Oops mode: 32-bit
+                                          + heapPattern);
         if (result != null) {
             return false;
         }
@@ -149,4 +151,3 @@
         return longValue * multiplier;
     }
 }
-