8211425: Allocation of old generation of java heap on alternate memory devices - G1 GC
authorsangheki
Fri, 21 Dec 2018 08:18:59 -0800
changeset 53116 bb03098c4dde
parent 53115 b5c41404f2d1
child 53117 37930c6ba6d7
8211425: Allocation of old generation of java heap on alternate memory devices - G1 GC 8202286: Allocation of old generation of Java heap on alternate memory devices Summary: Enable an experimental feature in HotSpot JVM to allocate old generation of G1 GC on an alternative memory device, such as NV-DIMMs. Reviewed-by: sangheki, sjohanss Contributed-by: kishor.kharbas@intel.com
src/hotspot/os/linux/os_linux.cpp
src/hotspot/share/gc/g1/g1Allocator.inline.hpp
src/hotspot/share/gc/g1/g1Arguments.cpp
src/hotspot/share/gc/g1/g1CardCounts.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.hpp
src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
src/hotspot/share/gc/g1/g1CollectorPolicy.cpp
src/hotspot/share/gc/g1/g1CollectorPolicy.hpp
src/hotspot/share/gc/g1/g1HeapVerifier.cpp
src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.cpp
src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.hpp
src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp
src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp
src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp
src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp
src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp
src/hotspot/share/gc/g1/g1Policy.cpp
src/hotspot/share/gc/g1/g1Policy.hpp
src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp
src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp
src/hotspot/share/gc/g1/g1VMOperations.cpp
src/hotspot/share/gc/g1/g1YoungGenSizer.cpp
src/hotspot/share/gc/g1/g1YoungGenSizer.hpp
src/hotspot/share/gc/g1/g1_globals.hpp
src/hotspot/share/gc/g1/heapRegionManager.cpp
src/hotspot/share/gc/g1/heapRegionManager.hpp
src/hotspot/share/gc/g1/heapRegionSet.cpp
src/hotspot/share/gc/g1/heapRegionSet.hpp
src/hotspot/share/gc/g1/heapRegionType.cpp
src/hotspot/share/gc/g1/heapRegionType.hpp
src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp
src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp
src/hotspot/share/gc/g1/vmStructs_g1.hpp
src/hotspot/share/gc/shared/gcArguments.cpp
src/hotspot/share/gc/shared/gcArguments.hpp
src/hotspot/share/prims/whitebox.cpp
src/hotspot/share/runtime/arguments.cpp
src/hotspot/share/runtime/globals.hpp
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java
test/hotspot/jtreg/TEST.groups
test/hotspot/jtreg/gc/nvdimm/TestAllocateOldGenAt.java
test/hotspot/jtreg/gc/nvdimm/TestAllocateOldGenAtError.java
test/hotspot/jtreg/gc/nvdimm/TestAllocateOldGenAtMultiple.java
test/hotspot/jtreg/gc/nvdimm/TestHumongousObjectsOnNvdimm.java
test/hotspot/jtreg/gc/nvdimm/TestOldObjectsOnNvdimm.java
test/hotspot/jtreg/gc/nvdimm/TestYoungObjectsOnDram.java
test/lib/sun/hotspot/WhiteBox.java
--- a/src/hotspot/os/linux/os_linux.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/os/linux/os_linux.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -5073,7 +5073,7 @@
   // initialize thread priority policy
   prio_init();
 
-  if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
+  if (!FLAG_IS_DEFAULT(AllocateHeapAt) || !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
     set_coredump_filter(DAX_SHARED_BIT);
   }
 
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -97,7 +97,7 @@
   }
 
   _archive_check_enabled = true;
-  size_t length = Universe::heap()->max_capacity();
+  size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
   _closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
                                         (HeapWord*)Universe::heap()->base() + length,
                                         HeapRegion::GrainBytes);
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -28,6 +28,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/gcArguments.inline.hpp"
 #include "gc/shared/workerPolicy.hpp"
@@ -156,5 +157,9 @@
 }
 
 CollectedHeap* G1Arguments::create_heap() {
-  return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
+  if (AllocateOldGenAt != NULL) {
+    return create_heap_with_policy<G1CollectedHeap, G1HeterogeneousCollectorPolicy>();
+  } else {
+    return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
+  }
 }
--- a/src/hotspot/share/gc/g1/g1CardCounts.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -63,7 +63,7 @@
 }
 
 void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
-  assert(_g1h->max_capacity() > 0, "initialization order");
+  assert(_g1h->max_reserved_capacity() > 0, "initialization order");
   assert(_g1h->capacity() == 0, "initialization order");
 
   if (G1ConcRSHotCardLimit > 0) {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -161,12 +161,12 @@
 
 // Private methods.
 
-HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
+HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {
   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
-  HeapRegion* res = _hrm.allocate_free_region(is_old);
+  HeapRegion* res = _hrm->allocate_free_region(type);
 
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
     // Currently, only attempts to allocate GC alloc regions set
@@ -183,7 +183,7 @@
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
       // In either case allocate_free_region() will check for NULL.
-      res = _hrm.allocate_free_region(is_old);
+      res = _hrm->allocate_free_region(type);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
@@ -330,16 +330,16 @@
     // Only one region to allocate, try to use a fast path by directly allocating
     // from the free lists. Do not try to expand here, we will potentially do that
     // later.
-    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
+    HeapRegion* hr = new_region(word_size, HeapRegionType::Humongous, false /* do_expand */);
     if (hr != NULL) {
       first = hr->hrm_index();
     }
   } else {
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
-    first = _hrm.find_contiguous_only_empty(obj_regions);
+    first = _hrm->find_contiguous_only_empty(obj_regions);
     if (first != G1_NO_HRM_INDEX) {
-      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+      _hrm->allocate_free_regions_starting_at(first, obj_regions);
     }
   }
 
@@ -347,14 +347,14 @@
     // Policy: We could not find enough regions for the humongous object in the
     // free list. Look through the heap to find a mix of free and uncommitted regions.
     // If so, try expansion.
-    first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
+    first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
     if (first != G1_NO_HRM_INDEX) {
       // We found something. Make sure these regions are committed, i.e. expand
       // the heap. Alternatively we could do a defragmentation GC.
       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
                                     word_size * HeapWordSize);
 
-      _hrm.expand_at(first, obj_regions, workers());
+      _hrm->expand_at(first, obj_regions, workers());
       g1_policy()->record_new_heap_size(num_regions());
 
 #ifdef ASSERT
@@ -365,7 +365,7 @@
         assert(is_on_master_free_list(hr), "sanity");
       }
 #endif
-      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+      _hrm->allocate_free_regions_starting_at(first, obj_regions);
     } else {
       // Policy: Potentially trigger a defragmentation GC.
     }
@@ -554,7 +554,7 @@
 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   for (size_t i = 0; i < count; i++) {
     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
       return false;
@@ -571,7 +571,7 @@
   assert(count != 0, "No MemRegions provided");
   MutexLockerEx x(Heap_lock);
 
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord* prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
 
@@ -605,7 +605,7 @@
     // range ended, and adjust the start address so we don't try to allocate
     // the same region again. If the current range is entirely within that
     // region, skip it, just adjusting the recorded top.
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
       start_address = start_region->end();
       if (start_address > last_address) {
@@ -615,12 +615,12 @@
       }
       start_region->set_top(start_address);
       curr_range = MemRegion(start_address, last_address + 1);
-      start_region = _hrm.addr_to_region(start_address);
+      start_region = _hrm->addr_to_region(start_address);
     }
 
     // Perform the actual region allocation, exiting if it fails.
     // Then note how much new space we have allocated.
-    if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
+    if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) {
       return false;
     }
     increase_used(word_size * HeapWordSize);
@@ -632,8 +632,8 @@
 
     // Mark each G1 region touched by the range as archive, add it to
     // the old set, and set top.
-    HeapRegion* curr_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* curr_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
     prev_last_region = last_region;
 
     while (curr_region != NULL) {
@@ -650,7 +650,7 @@
       HeapRegion* next_region;
       if (curr_region != last_region) {
         top = curr_region->end();
-        next_region = _hrm.next_region_in_heap(curr_region);
+        next_region = _hrm->next_region_in_heap(curr_region);
       } else {
         top = last_address + 1;
         next_region = NULL;
@@ -671,7 +671,7 @@
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord *prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
 
@@ -691,8 +691,8 @@
            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
            p2i(start_address), p2i(prev_last_addr));
 
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
     HeapWord* bottom_address = start_region->bottom();
 
     // Check for a range beginning in the same region in which the
@@ -708,7 +708,7 @@
       guarantee(curr_region->is_archive(),
                 "Expected archive region at index %u", curr_region->hrm_index());
       if (curr_region != last_region) {
-        curr_region = _hrm.next_region_in_heap(curr_region);
+        curr_region = _hrm->next_region_in_heap(curr_region);
       } else {
         curr_region = NULL;
       }
@@ -757,7 +757,7 @@
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
-  MemRegion reserved = _hrm.reserved();
+  MemRegion reserved = _hrm->reserved();
   HeapWord* prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
   size_t size_used = 0;
@@ -779,8 +779,8 @@
     size_used += ranges[i].byte_size();
     prev_last_addr = last_address;
 
-    HeapRegion* start_region = _hrm.addr_to_region(start_address);
-    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+    HeapRegion* start_region = _hrm->addr_to_region(start_address);
+    HeapRegion* last_region = _hrm->addr_to_region(last_address);
 
     // Check for ranges that start in the same G1 region in which the previous
     // range ended, and adjust the start address so we don't try to free
@@ -791,7 +791,7 @@
       if (start_address > last_address) {
         continue;
       }
-      start_region = _hrm.addr_to_region(start_address);
+      start_region = _hrm->addr_to_region(start_address);
     }
     prev_last_region = last_region;
 
@@ -806,11 +806,11 @@
       curr_region->set_free();
       curr_region->set_top(curr_region->bottom());
       if (curr_region != last_region) {
-        curr_region = _hrm.next_region_in_heap(curr_region);
+        curr_region = _hrm->next_region_in_heap(curr_region);
       } else {
         curr_region = NULL;
       }
-      _hrm.shrink_at(curr_index, 1);
+      _hrm->shrink_at(curr_index, 1);
       uncommitted_regions++;
     }
 
@@ -1024,6 +1024,8 @@
   abandon_collection_set(collection_set());
 
   tear_down_region_sets(false /* free_list_only */);
+
+  hrm()->prepare_for_full_collection_start();
 }
 
 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
@@ -1035,6 +1037,8 @@
 }
 
 void G1CollectedHeap::prepare_heap_for_mutators() {
+  hrm()->prepare_for_full_collection_end();
+
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   ClassLoaderDataGraph::purge();
   MetaspaceUtils::verify_metrics();
@@ -1071,7 +1075,7 @@
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
-  _hrm.verify_optional();
+  _hrm->verify_optional();
   _verifier->verify_region_sets_optional();
   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
   // Clear the previous marking bitmap, if needed for bitmap verification.
@@ -1325,7 +1329,7 @@
 
 
   if (expand(expand_bytes, _workers)) {
-    _hrm.verify_optional();
+    _hrm->verify_optional();
     _verifier->verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
                                            false /* expect_null_mutator_alloc_region */);
@@ -1350,7 +1354,7 @@
   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
   assert(regions_to_expand > 0, "Must expand by at least one region");
 
-  uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
+  uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
   if (expand_time_ms != NULL) {
     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
   }
@@ -1365,7 +1369,7 @@
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
-        _hrm.available() >= regions_to_expand) {
+        _hrm->available() >= regions_to_expand) {
       // We had head room...
       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
@@ -1380,7 +1384,7 @@
                                          HeapRegion::GrainBytes);
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
-  uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
+  uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
 
@@ -1408,7 +1412,7 @@
   shrink_helper(shrink_bytes);
   rebuild_region_sets(true /* free_list_only */);
 
-  _hrm.verify_optional();
+  _hrm->verify_optional();
   _verifier->verify_region_sets_optional();
 }
 
@@ -1486,7 +1490,7 @@
   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
   _bot(NULL),
   _listener(),
-  _hrm(),
+  _hrm(NULL),
   _allocator(NULL),
   _verifier(NULL),
   _summary_bytes_used(0),
@@ -1505,7 +1509,7 @@
   _survivor(),
   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
-  _g1_policy(new G1Policy(_gc_timer_stw)),
+  _g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
   _heap_sizing_policy(NULL),
   _collection_set(this, _g1_policy),
   _hot_card_cache(NULL),
@@ -1632,7 +1636,7 @@
   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 
   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
-  size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes();
   size_t heap_alignment = collector_policy()->heap_alignment();
 
   // Ensure that the sizes are properly aligned.
@@ -1692,12 +1696,17 @@
   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
   size_t page_size = actual_reserved_page_size(heap_rs);
   G1RegionToSpaceMapper* heap_storage =
-    G1RegionToSpaceMapper::create_mapper(g1_rs,
-                                         g1_rs.size(),
-                                         page_size,
-                                         HeapRegion::GrainBytes,
-                                         1,
-                                         mtJavaHeap);
+    G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
+                                              g1_rs.size(),
+                                              page_size,
+                                              HeapRegion::GrainBytes,
+                                              1,
+                                              mtJavaHeap);
+  if(heap_storage == NULL) {
+    vm_shutdown_during_initialization("Could not initialize G1 heap");
+    return JNI_ERR;
+  }
+
   os::trace_page_sizes("Heap",
                        collector_policy()->min_heap_byte_size(),
                        max_byte_size,
@@ -1728,7 +1737,9 @@
   G1RegionToSpaceMapper* next_bitmap_storage =
     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
 
-  _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+  _hrm = HeapRegionManager::create_manager(this, g1_collector_policy());
+
+  _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
   _card_table->initialize(cardtable_storage);
   // Do later initialization work for concurrent refinement.
   _hot_card_cache->initialize(card_counts_storage);
@@ -1743,20 +1754,20 @@
   guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
   // Also create a G1 rem set.
   _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
-  _g1_rem_set->initialize(max_capacity(), max_regions());
+  _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
 
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
             "too many cards per region");
 
-  FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
+  FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
 
   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
 
   {
-    HeapWord* start = _hrm.reserved().start();
-    HeapWord* end = _hrm.reserved().end();
+    HeapWord* start = _hrm->reserved().start();
+    HeapWord* end = _hrm->reserved().end();
     size_t granularity = HeapRegion::GrainBytes;
 
     _in_cset_fast_test.initialize(start, end, granularity);
@@ -1807,7 +1818,7 @@
 
   // Here we allocate the dummy HeapRegion that is required by the
   // G1AllocRegion class.
-  HeapRegion* dummy_region = _hrm.get_dummy_region();
+  HeapRegion* dummy_region = _hrm->get_dummy_region();
 
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
@@ -1927,16 +1938,20 @@
   return _collector_policy;
 }
 
+G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
+  return _collector_policy;
+}
+
 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
   return &_soft_ref_policy;
 }
 
 size_t G1CollectedHeap::capacity() const {
-  return _hrm.length() * HeapRegion::GrainBytes;
+  return _hrm->length() * HeapRegion::GrainBytes;
 }
 
 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
-  return _hrm.total_free_bytes();
+  return _hrm->total_free_bytes();
 }
 
 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
@@ -2002,6 +2017,18 @@
   }
 }
 
+bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
+  if(g1_policy()->force_upgrade_to_full()) {
+    return true;
+  } else if (should_do_concurrent_full_gc(_gc_cause)) {
+    return false;
+  } else if (has_regions_left_for_allocation()) {
+    return false;
+  } else {
+    return true;
+  }
+}
+
 #ifndef PRODUCT
 void G1CollectedHeap::allocate_dummy_regions() {
   // Let's fill up most of the region
@@ -2152,7 +2179,7 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_hrm.reserved().contains(p)) {
+  if (_hrm->reserved().contains(p)) {
     // Given that we know that p is in the reserved space,
     // heap_region_containing() should successfully
     // return the containing region.
@@ -2166,7 +2193,7 @@
 #ifdef ASSERT
 bool G1CollectedHeap::is_in_exact(const void* p) const {
   bool contains = reserved_region().contains(p);
-  bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
+  bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
   if (contains && available) {
     return true;
   } else {
@@ -2197,18 +2224,18 @@
 }
 
 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
-  _hrm.iterate(cl);
+  _hrm->iterate(cl);
 }
 
 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
                                                                  HeapRegionClaimer *hrclaimer,
                                                                  uint worker_id) const {
-  _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
+  _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
 }
 
 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
                                                          HeapRegionClaimer *hrclaimer) const {
-  _hrm.par_iterate(cl, hrclaimer, 0);
+  _hrm->par_iterate(cl, hrclaimer, 0);
 }
 
 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
@@ -2257,7 +2284,11 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _hrm.reserved().byte_size();
+  return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
+}
+
+size_t G1CollectedHeap::max_reserved_capacity() const {
+  return _hrm->max_length() * HeapRegion::GrainBytes;
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -2347,8 +2378,8 @@
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(_hrm.reserved().start()),
-            p2i(_hrm.reserved().end()));
+            p2i(_hrm->reserved().start()),
+            p2i(_hrm->reserved().end()));
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   uint young_regions = young_regions_count();
@@ -3131,7 +3162,7 @@
     // output from the concurrent mark thread interfering with this
     // logging output either.
 
-    _hrm.verify_optional();
+    _hrm->verify_optional();
     _verifier->verify_region_sets_optional();
 
     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
@@ -3947,7 +3978,7 @@
                                   bool locked) {
   assert(!hr->is_free(), "the region should not be free");
   assert(!hr->is_empty(), "the region should not be empty");
-  assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
+  assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
 
   if (G1VerifyBitmaps) {
@@ -3988,7 +4019,7 @@
   assert(list != NULL, "list can't be null");
   if (!list->is_empty()) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    _hrm.insert_list_into_free_list(list);
+    _hrm->insert_list_into_free_list(list);
   }
 }
 
@@ -4521,7 +4552,7 @@
     // this is that during a full GC string deduplication needs to know if
     // a collected region was young or old when the full GC was initiated.
   }
-  _hrm.remove_all_free_regions();
+  _hrm->remove_all_free_regions();
 }
 
 void G1CollectedHeap::increase_used(size_t bytes) {
@@ -4596,7 +4627,7 @@
     _survivor.clear();
   }
 
-  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
@@ -4623,7 +4654,7 @@
   bool should_allocate = g1_policy()->should_allocate_mutator_region();
   if (force || should_allocate) {
     HeapRegion* new_alloc_region = new_region(word_size,
-                                              false /* is_old */,
+                                              HeapRegionType::Eden,
                                               false /* do_expand */);
     if (new_alloc_region != NULL) {
       set_region_short_lived_locked(new_alloc_region);
@@ -4667,13 +4698,19 @@
     return NULL;
   }
 
-  const bool is_survivor = dest.is_young();
+  HeapRegionType type;
+  if (dest.is_young()) {
+    type = HeapRegionType::Survivor;
+  } else {
+    type = HeapRegionType::Old;
+  }
 
   HeapRegion* new_alloc_region = new_region(word_size,
-                                            !is_survivor,
+                                            type,
                                             true /* do_expand */);
+
   if (new_alloc_region != NULL) {
-    if (is_survivor) {
+    if (type.is_survivor()) {
       new_alloc_region->set_survivor();
       _survivor.add(new_alloc_region);
       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
@@ -4705,14 +4742,14 @@
 
 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
   bool expanded = false;
-  uint index = _hrm.find_highest_free(&expanded);
+  uint index = _hrm->find_highest_free(&expanded);
 
   if (index != G1_NO_HRM_INDEX) {
     if (expanded) {
       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
                                 HeapRegion::GrainWords * HeapWordSize);
     }
-    _hrm.allocate_free_regions_starting_at(index, 1);
+    _hrm->allocate_free_regions_starting_at(index, 1);
     return region_at(index);
   }
   return NULL;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -45,6 +45,7 @@
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/heapRegionManager.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
@@ -194,7 +195,7 @@
   G1RegionMappingChangedListener _listener;
 
   // The sequence of all heap regions in the heap.
-  HeapRegionManager _hrm;
+  HeapRegionManager* _hrm;
 
   // Manages all allocations with regions except humongous object allocations.
   G1Allocator* _allocator;
@@ -267,6 +268,9 @@
   // (e) cause == _wb_conc_mark
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
+  // Return true if should upgrade to full gc after an incremental one.
+  bool should_upgrade_to_full_gc(GCCause::Cause cause);
+
   // indicates whether we are in young or mixed GC mode
   G1CollectorState _collector_state;
 
@@ -369,9 +373,9 @@
   // Try to allocate a single non-humongous HeapRegion sufficient for
   // an allocation of the given word_size. If do_expand is true,
   // attempt to expand the heap if necessary to satisfy the allocation
-  // request. If the region is to be used as an old region or for a
-  // humongous object, set is_old to true. If not, to false.
-  HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
+  // request. 'type' takes the type of region to be allocated. (Use constants
+  // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
+  HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
 
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
@@ -957,10 +961,13 @@
   // The current policy object for the collector.
   G1Policy* g1_policy() const { return _g1_policy; }
 
+  HeapRegionManager* hrm() const { return _hrm; }
+
   const G1CollectionSet* collection_set() const { return &_collection_set; }
   G1CollectionSet* collection_set() { return &_collection_set; }
 
   virtual CollectorPolicy* collector_policy() const;
+  virtual G1CollectorPolicy* g1_collector_policy() const;
 
   virtual SoftRefPolicy* soft_ref_policy();
 
@@ -1009,7 +1016,7 @@
   // But G1CollectedHeap doesn't yet support this.
 
   virtual bool is_maximal_no_gc() const {
-    return _hrm.available() == 0;
+    return _hrm->available() == 0;
   }
 
   // Returns whether there are any regions left in the heap for allocation.
@@ -1018,19 +1025,22 @@
   }
 
   // The current number of regions in the heap.
-  uint num_regions() const { return _hrm.length(); }
+  uint num_regions() const { return _hrm->length(); }
 
   // The max number of regions in the heap.
-  uint max_regions() const { return _hrm.max_length(); }
+  uint max_regions() const { return _hrm->max_length(); }
+
+  // Max number of regions that can be comitted.
+  uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
 
   // The number of regions that are completely free.
-  uint num_free_regions() const { return _hrm.num_free_regions(); }
+  uint num_free_regions() const { return _hrm->num_free_regions(); }
 
   // The number of regions that can be allocated into.
-  uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
+  uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
 
   MemoryUsage get_auxiliary_data_memory_usage() const {
-    return _hrm.get_auxiliary_data_memory_usage();
+    return _hrm->get_auxiliary_data_memory_usage();
   }
 
   // The number of regions that are not completely free.
@@ -1038,7 +1048,7 @@
 
 #ifdef ASSERT
   bool is_on_master_free_list(HeapRegion* hr) {
-    return _hrm.is_free(hr);
+    return _hrm->is_free(hr);
   }
 #endif // ASSERT
 
@@ -1095,13 +1105,13 @@
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
   bool is_in_g1_reserved(const void* p) const {
-    return _hrm.reserved().contains(p);
+    return _hrm->reserved().contains(p);
   }
 
   // Returns a MemRegion that corresponds to the space that has been
   // reserved for the heap
   MemRegion g1_reserved() const {
-    return _hrm.reserved();
+    return _hrm->reserved();
   }
 
   virtual bool is_in_closed_subset(const void* p) const;
@@ -1227,6 +1237,9 @@
   // Print the maximum heap capacity.
   virtual size_t max_capacity() const;
 
+  // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
+  virtual size_t max_reserved_capacity() const;
+
   virtual jlong millis_since_last_gc();
 
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -57,13 +57,13 @@
 // Inline functions for G1CollectedHeap
 
 // Return the region with the given index. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
+inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
 
 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
+inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
 
 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
-  return _hrm.next_region_in_humongous(hr);
+  return _hrm->next_region_in_humongous(hr);
 }
 
 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
@@ -74,7 +74,7 @@
 }
 
 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
-  return _hrm.reserved().start() + index * HeapRegion::GrainWords;
+  return _hrm->reserved().start() + index * HeapRegion::GrainWords;
 }
 
 template <class T>
@@ -83,7 +83,7 @@
   assert(is_in_g1_reserved((const void*) addr),
          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
-  return _hrm.addr_to_region((HeapWord*) addr);
+  return _hrm->addr_to_region((HeapWord*) addr);
 }
 
 template <class T>
@@ -266,12 +266,12 @@
 }
 
 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
-  assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+  assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
   _humongous_reclaim_candidates.set_candidate(region, value);
 }
 
 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
-  assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+  assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
   return _humongous_reclaim_candidates.is_candidate(region);
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectorPolicy.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,3 +55,11 @@
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
+
+size_t G1CollectorPolicy::heap_reserved_size_bytes() const {
+  return _max_heap_byte_size;
+}
+
+bool G1CollectorPolicy::is_hetero_heap() const {
+  return false;
+}
--- a/src/hotspot/share/gc/g1/g1CollectorPolicy.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectorPolicy.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
 
 public:
   G1CollectorPolicy();
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
 };
-
 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -603,14 +603,14 @@
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   // First, check the explicit lists.
-  _g1h->_hrm.verify();
+  _g1h->_hrm->verify();
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
   _g1h->heap_region_iterate(&cl);
-  cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
 }
 
 void G1HeapVerifier::prepare_for_verify() {
@@ -851,7 +851,7 @@
 
 bool G1HeapVerifier::check_cset_fast_test() {
   G1CheckCSetFastTableClosure cl;
-  _g1h->_hrm.iterate(&cl);
+  _g1h->_hrm->iterate(&cl);
   return !cl.failures();
 }
 #endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/os.hpp"
+#include "utilities/formatBuffer.hpp"
+
+const double G1HeterogeneousCollectorPolicy::MaxRamFractionForYoung = 0.8;
+size_t G1HeterogeneousCollectorPolicy::MaxMemoryForYoung;
+
+static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
+  julong phys_mem;
+  // If MaxRam is specified, we use that as maximum physical memory available.
+  if (FLAG_IS_DEFAULT(MaxRAM)) {
+    phys_mem = os::physical_memory();
+    calc_str.append("Physical_Memory");
+  } else {
+    phys_mem = (julong)MaxRAM;
+    calc_str.append("MaxRAM");
+  }
+
+  julong reasonable_max = phys_mem;
+
+  // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
+  // reasonable max size of young generation.
+  if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
+    reasonable_max = (julong)(phys_mem / MaxRAMFraction);
+    calc_str.append(" / MaxRAMFraction");
+  }  else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
+    reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+    calc_str.append(" * MaxRAMPercentage / 100");
+  }  else {
+    // We use our own fraction to calculate max size of young generation.
+    reasonable_max = phys_mem * max_ram_fraction_for_young;
+    calc_str.append(" * %0.2f", max_ram_fraction_for_young);
+  }
+
+  return (size_t)reasonable_max;
+}
+
+void G1HeterogeneousCollectorPolicy::initialize_flags() {
+
+  FormatBuffer<100> calc_str("");
+
+  MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
+
+  if (MaxNewSize > MaxMemoryForYoung) {
+    if (FLAG_IS_CMDLINE(MaxNewSize)) {
+      log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            MaxMemoryForYoung, calc_str.buffer());
+    } else {
+      log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
+                         "Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
+    }
+    MaxNewSize = MaxMemoryForYoung;
+  }
+  if (NewSize > MaxMemoryForYoung) {
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
+                            MaxMemoryForYoung, calc_str.buffer());
+    }
+    NewSize = MaxMemoryForYoung;
+  }
+
+  // After setting new size flags, call base class initialize_flags()
+  G1CollectorPolicy::initialize_flags();
+}
+
+size_t G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() {
+  return MaxMemoryForYoung;
+}
+
+size_t G1HeterogeneousCollectorPolicy::heap_reserved_size_bytes() const {
+    return 2 * _max_heap_byte_size;
+}
+
+bool G1HeterogeneousCollectorPolicy::is_hetero_heap() const {
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
+
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
+
+class G1HeterogeneousCollectorPolicy : public G1CollectorPolicy {
+private:
+  // Max fraction of dram to use for young generation when MaxRAMFraction and
+  // MaxRAMPercentage are not specified on commandline.
+  static const double MaxRamFractionForYoung;
+  static size_t MaxMemoryForYoung;
+
+protected:
+  virtual void initialize_flags();
+
+public:
+  G1HeterogeneousCollectorPolicy() {}
+  virtual size_t heap_reserved_size_bytes() const;
+  virtual bool is_hetero_heap() const;
+  static size_t reasonable_max_memory_for_young();
+};
+
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+
+G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
+  G1Policy(policy, gc_timer), _manager(NULL) {}
+
+// We call the super class init(), after which we provision young_list_target_length() regions in dram.
+void G1HeterogeneousHeapPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
+  G1Policy::init(g1h, collection_set);
+  _manager = HeterogeneousHeapRegionManager::manager();
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+// After a collection pause, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
+void G1HeterogeneousHeapPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
+  G1Policy::record_collection_pause_end(pause_time_ms, cards_scanned, heap_used_bytes_before_gc);
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+// After a full collection, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
+void G1HeterogeneousHeapPolicy::record_full_collection_end() {
+  G1Policy::record_full_collection_end();
+  _manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
+}
+
+bool G1HeterogeneousHeapPolicy::force_upgrade_to_full() {
+  if (_manager->has_borrowed_regions()) {
+    return true;
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
+
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+
+class G1HeterogeneousHeapPolicy : public G1Policy {
+  // Stash a pointer to the hrm.
+  HeterogeneousHeapRegionManager* _manager;
+
+public:
+  G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
+
+  // initialize policy
+  virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
+  // Record end of an evacuation pause.
+  virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
+  // Record the end of full collection.
+  virtual void record_full_collection_end();
+
+  virtual bool force_upgrade_to_full();
+};
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
+#include "gc/g1/heapRegion.hpp"
+
+G1HeterogeneousHeapYoungGenSizer::G1HeterogeneousHeapYoungGenSizer() : G1YoungGenSizer() {
+  // will be used later when min and max young size is calculated.
+  _max_young_length = (uint)(G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
+}
+
+// Since heap is sized potentially to larger value accounting for dram + nvdimm, we need to limit
+// max young gen size to the available dram.
+// Call parent class method first and then adjust sizes based on available dram
+void G1HeterogeneousHeapYoungGenSizer::adjust_max_new_size(uint number_of_heap_regions) {
+  G1YoungGenSizer::adjust_max_new_size(number_of_heap_regions);
+  adjust_lengths_based_on_dram_memory();
+}
+
+void G1HeterogeneousHeapYoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
+  G1YoungGenSizer::heap_size_changed(new_number_of_heap_regions);
+  adjust_lengths_based_on_dram_memory();
+}
+
+void G1HeterogeneousHeapYoungGenSizer::adjust_lengths_based_on_dram_memory() {
+  _min_desired_young_length = MIN2(_min_desired_young_length, _max_young_length);
+  _max_desired_young_length = MIN2(_max_desired_young_length, _max_young_length);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
+#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
+
+#include "gc/g1/g1YoungGenSizer.hpp"
+
+// This class prevents the size of young generation of G1 heap to exceed dram
+// memory available. If set on command line, MaxRAM and MaxRAMFraction/MaxRAMPercentage
+// are used to determine the maximum size that young generation can grow.
+// Else we set the maximum size to 80% of dram available in the system.
+
+class G1HeterogeneousHeapYoungGenSizer : public G1YoungGenSizer {
+private:
+  // maximum no of regions that young generation can grow to. Calculated in constructor.
+  uint _max_young_length;
+  void adjust_lengths_based_on_dram_memory();
+
+public:
+  G1HeterogeneousHeapYoungGenSizer();
+
+  // Calculate the maximum length of the young gen given the number of regions
+  // depending on the sizing algorithm.
+  virtual void adjust_max_new_size(uint number_of_heap_regions);
+
+  virtual void heap_size_changed(uint new_number_of_heap_regions);
+};
+
+#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -100,6 +100,12 @@
   return reserved_size() - committed_size();
 }
 
+void G1PageBasedVirtualSpace::commit_and_set_special() {
+  commit_internal(addr_to_page_index(_low_boundary), addr_to_page_index(_high_boundary));
+  _special = true;
+  _dirty.initialize(reserved_size()/_page_size);
+}
+
 size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
   return (addr - _low_boundary) / _page_size;
 }
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -136,6 +136,8 @@
   // Memory left to use/expand in this virtual space.
   size_t uncommitted_size() const;
 
+  void commit_and_set_special();
+
   bool contains(const void* p) const;
 
   MemRegion reserved() {
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -29,6 +29,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
 #include "gc/g1/g1IHOPControl.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -46,7 +47,7 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/pair.hpp"
 
-G1Policy::G1Policy(STWGCTimer* gc_timer) :
+G1Policy::G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
   _predictor(G1ConfidencePercent / 100.0),
   _analytics(new G1Analytics(&_predictor)),
   _remset_tracker(),
@@ -62,7 +63,7 @@
   _survivor_surv_rate_group(new SurvRateGroup()),
   _reserve_factor((double) G1ReservePercent / 100.0),
   _reserve_regions(0),
-  _young_gen_sizer(),
+  _young_gen_sizer(G1YoungGenSizer::create_gen_sizer(policy)),
   _free_regions_at_end_of_collection(0),
   _max_rs_lengths(0),
   _rs_lengths_prediction(0),
@@ -83,6 +84,15 @@
 
 G1Policy::~G1Policy() {
   delete _ihop_control;
+  delete _young_gen_sizer;
+}
+
+G1Policy* G1Policy::create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw) {
+  if (policy->is_hetero_heap()) {
+    return new G1HeterogeneousHeapPolicy(policy, gc_timer_stw);
+  } else {
+    return new G1Policy(policy, gc_timer_stw);
+  }
 }
 
 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
@@ -94,9 +104,9 @@
   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 
   if (!adaptive_young_list_length()) {
-    _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
+    _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
-  _young_gen_sizer.adjust_max_new_size(_g1h->max_regions());
+  _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
 
   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 
@@ -176,7 +186,7 @@
   // smaller than 1.0) we'll get 1.
   _reserve_regions = (uint) ceil(reserve_regions_d);
 
-  _young_gen_sizer.heap_size_changed(new_number_of_regions);
+  _young_gen_sizer->heap_size_changed(new_number_of_regions);
 
   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 }
@@ -195,14 +205,14 @@
   }
   desired_min_length += base_min_length;
   // make sure we don't go below any user-defined minimum bound
-  return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
+  return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 }
 
 uint G1Policy::calculate_young_list_desired_max_length() const {
   // Here, we might want to also take into account any additional
   // constraints (i.e., user-defined minimum bound). Currently, we
   // effectively don't set this bound.
-  return _young_gen_sizer.max_desired_young_length();
+  return _young_gen_sizer->max_desired_young_length();
 }
 
 uint G1Policy::update_young_list_max_and_target_length() {
@@ -218,6 +228,7 @@
 uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
   _young_list_target_length = young_lengths.first;
+
   return young_lengths.second;
 }
 
@@ -900,7 +911,7 @@
 }
 
 bool G1Policy::adaptive_young_list_length() const {
-  return _young_gen_sizer.adaptive_young_list_length();
+  return _young_gen_sizer->adaptive_young_list_length();
 }
 
 size_t G1Policy::desired_survivor_size(uint max_regions) const {
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1POLICY_HPP
 #define SHARE_VM_GC_G1_G1POLICY_HPP
 
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1InCSetState.hpp"
@@ -91,7 +92,7 @@
   // for the first time during initialization.
   uint   _reserve_regions;
 
-  G1YoungGenSizer _young_gen_sizer;
+  G1YoungGenSizer* _young_gen_sizer;
 
   uint _free_regions_at_end_of_collection;
 
@@ -282,10 +283,12 @@
   void abort_time_to_mixed_tracking();
 public:
 
-  G1Policy(STWGCTimer* gc_timer);
+  G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
 
   virtual ~G1Policy();
 
+  static G1Policy* create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw);
+
   G1CollectorState* collector_state() const;
 
   G1GCPhaseTimes* phase_times() const { return _phase_times; }
@@ -298,7 +301,7 @@
   // This should be called after the heap is resized.
   void record_new_heap_size(uint new_number_of_regions);
 
-  void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
+  virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
 
   void note_gc_start();
 
@@ -308,11 +311,11 @@
 
   // Record the start and end of an evacuation pause.
   void record_collection_pause_start(double start_time_sec);
-  void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
+  virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
 
   // Record the start and end of a full collection.
   void record_full_collection_start();
-  void record_full_collection_end();
+  virtual void record_full_collection_end();
 
   // Must currently be called while the world is stopped.
   void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
@@ -432,6 +435,10 @@
   void update_max_gc_locker_expansion();
 
   void update_survivors_policy();
+
+  virtual bool force_upgrade_to_full() {
+    return false;
+  }
 };
 
 #endif // SHARE_VM_GC_G1_G1POLICY_HPP
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,15 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/virtualspace.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.inline.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "utilities/formatBuffer.hpp"
 
 G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
                                              size_t used_size,
@@ -170,16 +174,156 @@
   }
 }
 
+static bool map_nvdimm_space(ReservedSpace rs) {
+  assert(AllocateOldGenAt != NULL, "");
+  int _backing_fd = os::create_file_for_heap(AllocateOldGenAt);
+  if (_backing_fd == -1) {
+    log_error(gc, init)("Could not create file for Old generation at location %s", AllocateOldGenAt);
+    return false;
+  }
+  // commit this memory in nv-dimm
+  char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd);
+
+  if (ret != rs.base()) {
+    if (ret != NULL) {
+      os::unmap_memory(rs.base(), rs.size());
+    }
+    log_error(gc, init)("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt);
+    os::close(_backing_fd);
+    return false;
+  }
+
+  os::close(_backing_fd);
+  return true;
+}
+
+G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs,
+                                                         size_t actual_size,
+                                                         size_t page_size,
+                                                         size_t alloc_granularity,
+                                                         size_t commit_factor,
+                                                         MemoryType type) :
+  G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
+  _rs(rs),
+  _num_committed_dram(0),
+  _num_committed_nvdimm(0),
+  _page_size(page_size),
+  _commit_factor(commit_factor),
+  _type(type) {
+  assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize");
+}
+
+bool G1RegionToHeteroSpaceMapper::initialize() {
+  // Since we need to re-map the reserved space - 'Xmx' to nv-dimm and 'Xmx' to dram, we need to release the reserved memory first.
+  // Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping.
+  os::release_memory(_rs.base(), _rs.size());
+  // First half of size Xmx is for nv-dimm.
+  ReservedSpace rs_nvdimm = _rs.first_part(MaxHeapSize);
+  assert(rs_nvdimm.base() == _rs.base(), "We should get the same base address");
+
+  // Second half of reserved memory is mapped to dram.
+  ReservedSpace rs_dram = _rs.last_part(MaxHeapSize);
+
+  assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same");
+
+  // Reserve dram memory
+  char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base());
+  if (base != rs_dram.base()) {
+    if (base != NULL) {
+      os::release_memory(base, rs_dram.size());
+    }
+    log_error(gc, init)("Error in re-mapping memory on dram during G1 heterogenous memory initialization");
+    return false;
+  }
+
+  // We reserve and commit this entire space to NV-DIMM.
+  if (!map_nvdimm_space(rs_nvdimm)) {
+    log_error(gc, init)("Error in re-mapping memory to nv-dimm during G1 heterogenous memory initialization");
+    return false;
+  }
+
+  if (_region_granularity >= (_page_size * _commit_factor)) {
+    _dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
+  } else {
+    _dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
+  }
+
+  _start_index_of_nvdimm = 0;
+  _start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity);
+  return true;
+}
+
+void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
+  uint end_idx = (start_idx + (uint)num_regions - 1);
+
+  uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
+  uint num_nvdimm = (uint)num_regions - num_dram;
+
+  if (num_nvdimm > 0) {
+    // We do not need to commit nv-dimm regions, since they are committed in the beginning.
+    _num_committed_nvdimm += num_nvdimm;
+  }
+  if (num_dram > 0) {
+    _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang);
+    _num_committed_dram += num_dram;
+  }
+}
+
+void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) {
+  uint end_idx = (start_idx + (uint)num_regions - 1);
+  uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
+  uint num_nvdimm = (uint)num_regions - num_dram;
+
+  if (num_nvdimm > 0) {
+    // We do not uncommit memory for nv-dimm regions.
+    _num_committed_nvdimm -= num_nvdimm;
+  }
+
+  if (num_dram > 0) {
+    _dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram);
+    _num_committed_dram -= num_dram;
+  }
+}
+
+uint G1RegionToHeteroSpaceMapper::num_committed_dram() const {
+  return _num_committed_dram;
+}
+
+uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() const {
+  return _num_committed_nvdimm;
+}
+
+G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs,
+                                                                 size_t actual_size,
+                                                                 size_t page_size,
+                                                                 size_t region_granularity,
+                                                                 size_t commit_factor,
+                                                                 MemoryType type) {
+  if (AllocateOldGenAt != NULL) {
+    G1RegionToHeteroSpaceMapper* mapper = new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
+    if (!mapper->initialize()) {
+      delete mapper;
+      return NULL;
+    }
+    return (G1RegionToSpaceMapper*)mapper;
+  } else {
+    return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
+  }
+}
+
 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
                                                             size_t actual_size,
                                                             size_t page_size,
                                                             size_t region_granularity,
                                                             size_t commit_factor,
                                                             MemoryType type) {
-
   if (region_granularity >= (page_size * commit_factor)) {
     return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   } else {
     return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   }
 }
+
+void G1RegionToSpaceMapper::commit_and_set_special() {
+  _storage.commit_and_set_special();
+}
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -70,6 +70,7 @@
     return _commit_map.at(idx);
   }
 
+  void commit_and_set_special();
   virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;
   virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
 
@@ -87,6 +88,37 @@
                                               size_t region_granularity,
                                               size_t byte_translation_factor,
                                               MemoryType type);
+
+  static G1RegionToSpaceMapper* create_heap_mapper(ReservedSpace rs,
+                                                   size_t actual_size,
+                                                   size_t page_size,
+                                                   size_t region_granularity,
+                                                   size_t byte_translation_factor,
+                                                   MemoryType type);
 };
 
+// G1RegionToSpaceMapper implementation where
+// part of space is mapped to dram and part to nv-dimm
+class G1RegionToHeteroSpaceMapper : public G1RegionToSpaceMapper {
+private:
+  size_t _pages_per_region;
+  ReservedSpace _rs;
+  G1RegionToSpaceMapper* _dram_mapper;
+  uint _num_committed_dram;
+  uint _num_committed_nvdimm;
+  uint _start_index_of_nvdimm;
+  uint _start_index_of_dram;
+  size_t _page_size;
+  size_t _commit_factor;
+  MemoryType _type;
+
+public:
+  G1RegionToHeteroSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type);
+  bool initialize();
+  uint num_committed_dram() const;
+  uint num_committed_nvdimm() const;
+
+  virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL);
+  virtual void uncommit_regions(uint start_idx, size_t num_regions = 1);
+};
 #endif // SHARE_VM_GC_G1_G1REGIONTOSPACEMAPPER_HPP
--- a/src/hotspot/share/gc/g1/g1VMOperations.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -138,8 +138,8 @@
       // kind of GC.
       _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
     } else {
-      bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
-                                    !g1h->has_regions_left_for_allocation();
+      bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause);
+
       if (should_upgrade_to_full) {
         // There has been a request to perform a GC to free some space. We have no
         // information on how much memory has been asked for. In case there are
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
 #include "gc/g1/g1YoungGenSizer.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "logging/log.hpp"
 
 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
-  _min_desired_young_length(0), _max_desired_young_length(0), _adaptive_size(true) {
+  _adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
 
   if (FLAG_IS_CMDLINE(NewRatio)) {
     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
@@ -127,3 +129,11 @@
   recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
           &_max_desired_young_length);
 }
+
+G1YoungGenSizer* G1YoungGenSizer::create_gen_sizer(G1CollectorPolicy* policy) {
+  if (policy->is_hetero_heap()) {
+    return new G1HeterogeneousHeapYoungGenSizer();
+  } else {
+    return new G1YoungGenSizer();
+  }
+}
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
 #define SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
 
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // There are three command line options related to the young gen size:
@@ -63,7 +64,7 @@
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
-class G1YoungGenSizer {
+class G1YoungGenSizer : public CHeapObj<mtGC> {
 private:
   enum SizerKind {
     SizerDefaults,
@@ -73,8 +74,6 @@
     SizerNewRatio
   };
   SizerKind _sizer_kind;
-  uint _min_desired_young_length;
-  uint _max_desired_young_length;
 
   // False when using a fixed young generation size due to command-line options,
   // true otherwise.
@@ -87,13 +86,17 @@
   // given the number of heap regions depending on the kind of sizing algorithm.
   void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
 
+protected:
+  uint _min_desired_young_length;
+  uint _max_desired_young_length;
+
 public:
   G1YoungGenSizer();
   // Calculate the maximum length of the young gen given the number of regions
   // depending on the sizing algorithm.
-  void adjust_max_new_size(uint number_of_heap_regions);
+  virtual void adjust_max_new_size(uint number_of_heap_regions);
 
-  void heap_size_changed(uint new_number_of_heap_regions);
+  virtual void heap_size_changed(uint new_number_of_heap_regions);
   uint min_desired_young_length() const {
     return _min_desired_young_length;
   }
@@ -104,6 +107,8 @@
   bool adaptive_young_list_length() const {
     return _adaptive_size;
   }
+
+  static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
 };
 
 #endif // SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
--- a/src/hotspot/share/gc/g1/g1_globals.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -317,5 +317,15 @@
           "above this value cancels a given periodic GC. A value of zero "  \
           "disables this check.")                                           \
           range(0.0, (double)max_uintx)                                     \
+                                                                            \
+  experimental(uintx, G1YoungExpansionBufferPercent, 10,                    \
+               "When heterogenous heap is enabled by AllocateOldGenAt "     \
+               "option, after every GC, young gen is re-sized which "       \
+               "involves system calls to commit/uncommit memory. To "       \
+               "reduce these calls, we keep a buffer of extra regions to "  \
+               "absorb small changes in young gen length. This flag takes " \
+               "the buffer size as an percentage of young gen length")      \
+               range(0, 100)                                                \
+
 
 #endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -28,6 +28,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+#include "gc/shared/collectorPolicy.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -54,18 +56,25 @@
 };
 
 HeapRegionManager::HeapRegionManager() :
-  _regions(), _heap_mapper(NULL),
-  _prev_bitmap_mapper(NULL),
-  _next_bitmap_mapper(NULL),
   _bot_mapper(NULL),
   _cardtable_mapper(NULL),
   _card_counts_mapper(NULL),
-  _free_list("Free list", new MasterFreeRegionListChecker()),
   _available_map(mtGC),
   _num_committed(0),
-  _allocated_heapregions_length(0)
+  _allocated_heapregions_length(0),
+  _regions(), _heap_mapper(NULL),
+  _prev_bitmap_mapper(NULL),
+  _next_bitmap_mapper(NULL),
+  _free_list("Free list", new MasterFreeRegionListChecker())
 { }
 
+HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy) {
+  if (policy->is_hetero_heap()) {
+    return new HeterogeneousHeapRegionManager((uint)(policy->max_heap_byte_size() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
+  }
+  return new HeapRegionManager();
+}
+
 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
                                G1RegionToSpaceMapper* prev_bitmap,
                                G1RegionToSpaceMapper* next_bitmap,
@@ -514,7 +523,7 @@
 #endif // PRODUCT
 
 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
-    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
+    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
   assert(n_workers > 0, "Need at least one worker.");
   uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
   memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -26,8 +26,10 @@
 #define SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP
 
 #include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/shared/collectorPolicy.hpp"
 #include "services/memoryUsage.hpp"
 
 class HeapRegion;
@@ -71,17 +73,10 @@
   friend class VMStructs;
   friend class HeapRegionClaimer;
 
-  G1HeapRegionTable _regions;
-
-  G1RegionToSpaceMapper* _heap_mapper;
-  G1RegionToSpaceMapper* _prev_bitmap_mapper;
-  G1RegionToSpaceMapper* _next_bitmap_mapper;
   G1RegionToSpaceMapper* _bot_mapper;
   G1RegionToSpaceMapper* _cardtable_mapper;
   G1RegionToSpaceMapper* _card_counts_mapper;
 
-  FreeRegionList _free_list;
-
   // Each bit in this bitmap indicates that the corresponding region is available
   // for allocation.
   CHeapBitMap _available_map;
@@ -95,11 +90,8 @@
   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
-  void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
-
   // Pass down commit calls to the VirtualSpace.
   void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
-  void uncommit_regions(uint index, size_t num_regions = 1);
 
   // Notify other data structures about change in the heap layout.
   void update_committed_space(HeapWord* old_end, HeapWord* new_end);
@@ -117,6 +109,16 @@
   // the heap. Returns the length of the sequence found. If this value is zero, no
   // sequence could be found, otherwise res_idx contains the start index of this range.
   uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+
+protected:
+  G1HeapRegionTable _regions;
+  G1RegionToSpaceMapper* _heap_mapper;
+  G1RegionToSpaceMapper* _prev_bitmap_mapper;
+  G1RegionToSpaceMapper* _next_bitmap_mapper;
+  FreeRegionList _free_list;
+
+  void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
+  void uncommit_regions(uint index, size_t num_regions = 1);
   // Allocate a new HeapRegion for the given index.
   HeapRegion* new_heap_region(uint hrm_index);
 #ifdef ASSERT
@@ -127,18 +129,25 @@
   // Empty constructor, we'll initialize it with the initialize() method.
   HeapRegionManager();
 
-  void initialize(G1RegionToSpaceMapper* heap_storage,
-                  G1RegionToSpaceMapper* prev_bitmap,
-                  G1RegionToSpaceMapper* next_bitmap,
-                  G1RegionToSpaceMapper* bot,
-                  G1RegionToSpaceMapper* cardtable,
-                  G1RegionToSpaceMapper* card_counts);
+  static HeapRegionManager* create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy);
+
+  virtual void initialize(G1RegionToSpaceMapper* heap_storage,
+                          G1RegionToSpaceMapper* prev_bitmap,
+                          G1RegionToSpaceMapper* next_bitmap,
+                          G1RegionToSpaceMapper* bot,
+                          G1RegionToSpaceMapper* cardtable,
+                          G1RegionToSpaceMapper* card_counts);
+
+  // Prepare heap regions before and after full collection.
+  // Nothing to be done in this class.
+  virtual void prepare_for_full_collection_start() {}
+  virtual void prepare_for_full_collection_end() {}
 
   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
   // the heap from the lowest address, this region (and its associated data
   // structures) are available and we do not need to check further.
-  HeapRegion* get_dummy_region() { return new_heap_region(0); }
+  virtual HeapRegion* get_dummy_region() { return new_heap_region(0); }
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
@@ -167,8 +176,8 @@
     _free_list.add_ordered(list);
   }
 
-  HeapRegion* allocate_free_region(bool is_old) {
-    HeapRegion* hr = _free_list.remove_region(is_old);
+  virtual HeapRegion* allocate_free_region(HeapRegionType type) {
+    HeapRegion* hr = _free_list.remove_region(!type.is_young());
 
     if (hr != NULL) {
       assert(hr->next() == NULL, "Single region should not have next");
@@ -202,6 +211,9 @@
   // Return the maximum number of regions in the heap.
   uint max_length() const { return (uint)_regions.length(); }
 
+  // Return maximum number of regions that heap can expand to.
+  virtual uint max_expandable_length() const { return (uint)_regions.length(); }
+
   MemoryUsage get_auxiliary_data_memory_usage() const;
 
   MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
@@ -210,26 +222,26 @@
   // HeapRegions, or re-use existing ones. Returns the number of regions the
   // sequence was expanded by. If a HeapRegion allocation fails, the resulting
   // number of regions might be smaller than what's desired.
-  uint expand_by(uint num_regions, WorkGang* pretouch_workers);
+  virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers);
 
   // Makes sure that the regions from start to start+num_regions-1 are available
   // for allocation. Returns the number of regions that were committed to achieve
   // this.
-  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
+  virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
 
   // Find a contiguous set of empty regions of length num. Returns the start index of
   // that set, or G1_NO_HRM_INDEX.
-  uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
+  virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
   // Find a contiguous set of empty or unavailable regions of length num. Returns the
   // start index of that set, or G1_NO_HRM_INDEX.
-  uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
+  virtual uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
 
   HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 
   // Find the highest free or uncommitted region in the reserved heap,
   // and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
   // Set the 'expanded' boolean true if a new region was committed.
-  uint find_highest_free(bool* expanded);
+  virtual uint find_highest_free(bool* expanded);
 
   // Allocate the regions that contain the address range specified, committing the
   // regions if necessary. Return false if any of the regions is already committed
@@ -244,13 +256,13 @@
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.
-  uint shrink_by(uint num_regions_to_remove);
+  virtual uint shrink_by(uint num_regions_to_remove);
 
   // Uncommit a number of regions starting at the specified index, which must be available,
   // empty, and free.
   void shrink_at(uint index, size_t num_regions);
 
-  void verify();
+  virtual void verify();
 
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -234,6 +234,21 @@
   verify_optional();
 }
 
+uint FreeRegionList::num_of_regions_in_range(uint start, uint end) const {
+  HeapRegion* cur = _head;
+  uint num = 0;
+  while (cur != NULL) {
+    uint index = cur->hrm_index();
+    if (index > end) {
+      break;
+    } else if (index >= start) {
+      num++;
+    }
+    cur = cur->next();
+  }
+  return num;
+}
+
 void FreeRegionList::verify() {
   // See comment in HeapRegionSetBase::verify() about MT safety and
   // verification.
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -194,6 +194,8 @@
   void remove_starting_at(HeapRegion* first, uint num_regions);
 
   virtual void verify();
+
+  uint num_of_regions_in_range(uint start, uint end) const;
 };
 
 // Iterator class that provides a convenient way to iterate over the
--- a/src/hotspot/share/gc/g1/heapRegionType.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionType.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,11 @@
 #include "gc/g1/g1HeapRegionTraceType.hpp"
 #include "gc/g1/heapRegionType.hpp"
 
+const HeapRegionType HeapRegionType::Eden      = HeapRegionType(EdenTag);
+const HeapRegionType HeapRegionType::Survivor  = HeapRegionType(SurvTag);
+const HeapRegionType HeapRegionType::Old       = HeapRegionType(OldTag);
+const HeapRegionType HeapRegionType::Humongous = HeapRegionType(StartsHumongousTag);
+
 bool HeapRegionType::is_valid(Tag tag) {
   switch (tag) {
     case FreeTag:
--- a/src/hotspot/share/gc/g1/heapRegionType.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionType.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -117,6 +117,9 @@
     _tag = tag;
   }
 
+  // Private constructor used static constants
+  HeapRegionType(Tag t) : _tag(t) { hrt_assert_is_valid(_tag); }
+
 public:
   // Queries
 
@@ -186,6 +189,11 @@
   G1HeapRegionTraceType::Type get_trace_type();
 
   HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
+
+  static const HeapRegionType Eden;
+  static const HeapRegionType Survivor;
+  static const HeapRegionType Old;
+  static const HeapRegionType Humongous;
 };
 
 #endif // SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegionManager.inline.hpp"
+#include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
+#include "memory/allocation.hpp"
+
+
+HeterogeneousHeapRegionManager* HeterogeneousHeapRegionManager::manager() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert(g1h != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
+
+  HeapRegionManager* hrm = g1h->hrm();
+  assert(hrm != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
+  return (HeterogeneousHeapRegionManager*)hrm;
+}
+
+void HeterogeneousHeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
+                                                G1RegionToSpaceMapper* prev_bitmap,
+                                                G1RegionToSpaceMapper* next_bitmap,
+                                                G1RegionToSpaceMapper* bot,
+                                                G1RegionToSpaceMapper* cardtable,
+                                                G1RegionToSpaceMapper* card_counts) {
+  HeapRegionManager::initialize(heap_storage, prev_bitmap, next_bitmap, bot, cardtable, card_counts);
+
+  // We commit bitmap for all regions during initialization and mark the bitmap space as special.
+  // This allows regions to be un-committed while concurrent-marking threads are accessing the bitmap concurrently.
+  _prev_bitmap_mapper->commit_and_set_special();
+  _next_bitmap_mapper->commit_and_set_special();
+}
+
+// expand_by() is called to grow the heap. We grow into nvdimm now.
+// Dram regions are committed later as needed during mutator region allocation or
+// when young list target length is determined after gc cycle.
+uint HeterogeneousHeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
+  uint num_regions_possible = total_regions_committed() >= max_expandable_length() ? 0 : max_expandable_length() - total_regions_committed();
+  uint num_expanded = expand_nvdimm(MIN2(num_regions, num_regions_possible), pretouch_workers);
+  return num_expanded;
+}
+
+// Expands heap starting from 'start' index. The question is should we expand from one memory (e.g. nvdimm) to another (e.g. dram).
+// Looking at the code, expand_at() is called for humongous allocation where 'start' is in nv-dimm.
+// So we only allocate regions in the same kind of memory as 'start'.
+uint HeterogeneousHeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
+  if (num_regions == 0) {
+    return 0;
+  }
+  uint target_num_regions = MIN2(num_regions, max_expandable_length() - total_regions_committed());
+  uint end = is_in_nvdimm(start) ? end_index_of_nvdimm() : end_index_of_dram();
+
+  uint num_expanded = expand_in_range(start, end, target_num_regions, pretouch_workers);
+  assert(total_regions_committed() <= max_expandable_length(), "must be");
+  return num_expanded;
+}
+
+// This function ensures that there are 'expected_num_regions' committed regions in dram.
+// If new regions are committed, it un-commits that many regions from nv-dimm.
+// If there are already more regions committed in dram, extra regions are un-committed.
+void HeterogeneousHeapRegionManager::adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers) {
+
+  // Release back the extra regions allocated in evacuation failure scenario.
+  if(_no_borrowed_regions > 0) {
+    _no_borrowed_regions -= shrink_dram(_no_borrowed_regions);
+    _no_borrowed_regions -= shrink_nvdimm(_no_borrowed_regions);
+  }
+
+  if(expected_num_regions > free_list_dram_length()) {
+    // If we are going to expand DRAM, we expand a little more so that we can absorb small variations in Young gen sizing.
+    uint targeted_dram_regions = expected_num_regions * (1 + (double)G1YoungExpansionBufferPercent / 100);
+    uint to_be_made_available = targeted_dram_regions - free_list_dram_length();
+
+#ifdef ASSERT
+    uint total_committed_before = total_regions_committed();
+#endif
+    uint can_be_made_available = shrink_nvdimm(to_be_made_available);
+    uint ret = expand_dram(can_be_made_available, pretouch_workers);
+#ifdef ASSERT
+    assert(ret == can_be_made_available, "should be equal");
+    assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+  } else {
+    uint to_be_released = free_list_dram_length() - expected_num_regions;
+    // if number of extra DRAM regions is small, do not shrink.
+    if (to_be_released < expected_num_regions * G1YoungExpansionBufferPercent / 100) {
+      return;
+    }
+
+#ifdef ASSERT
+    uint total_committed_before = total_regions_committed();
+#endif
+    uint ret = shrink_dram(to_be_released);
+    assert(ret == to_be_released, "Should be able to shrink by given amount");
+    ret = expand_nvdimm(to_be_released, pretouch_workers);
+#ifdef ASSERT
+    assert(ret == to_be_released, "Should be able to expand by given amount");
+    assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+  }
+}
+
+uint HeterogeneousHeapRegionManager::total_regions_committed() const {
+  return num_committed_dram() + num_committed_nvdimm();
+}
+
+uint HeterogeneousHeapRegionManager::num_committed_dram() const {
+  // This class does not keep count of committed regions in dram and nv-dimm.
+  // G1RegionToHeteroSpaceMapper keeps this information.
+  return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_dram();
+}
+
+uint HeterogeneousHeapRegionManager::num_committed_nvdimm() const {
+  // See comment for num_committed_dram()
+  return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_nvdimm();
+}
+
+// Return maximum number of regions that heap can expand to.
+uint HeterogeneousHeapRegionManager::max_expandable_length() const {
+  return _max_regions;
+}
+
+uint HeterogeneousHeapRegionManager::find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx <= (max_length() + 1), "checking");
+
+  uint num_regions = 0;
+
+  uint cur = start_idx;
+  while (cur <= end_idx && is_available(cur)) {
+    cur++;
+  }
+  if (cur == end_idx + 1) {
+    return num_regions;
+  }
+  *res_idx = cur;
+  while (cur <= end_idx && !is_available(cur)) {
+    cur++;
+  }
+  num_regions = cur - *res_idx;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
+    assert(!is_available(i), "just checking");
+  }
+  assert(cur == end_idx + 1 || num_regions == 0 || is_available(cur),
+    "The region at the current position %u must be available or at the end", cur);
+#endif
+  return num_regions;
+}
+
+uint HeterogeneousHeapRegionManager::expand_dram(uint num_regions, WorkGang* pretouch_workers) {
+  return expand_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, pretouch_workers);
+}
+
+uint HeterogeneousHeapRegionManager::expand_nvdimm(uint num_regions, WorkGang* pretouch_workers) {
+  return expand_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, pretouch_workers);
+}
+
+// Follows same logic as expand_at() form HeapRegionManager.
+uint HeterogeneousHeapRegionManager::expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_gang) {
+
+  uint so_far = 0;
+  uint chunk_start = 0;
+  uint num_last_found = 0;
+  while (so_far < num_regions &&
+         (num_last_found = find_unavailable_in_range(start, end, &chunk_start)) > 0) {
+    uint to_commit = MIN2(num_regions - so_far, num_last_found);
+    make_regions_available(chunk_start, to_commit, pretouch_gang);
+    so_far += to_commit;
+    start = chunk_start + to_commit + 1;
+  }
+
+  return so_far;
+}
+
+// Shrink in the range of indexes which are reserved for dram.
+uint HeterogeneousHeapRegionManager::shrink_dram(uint num_regions, bool update_free_list) {
+  return shrink_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, update_free_list);
+}
+
+// Shrink in the range of indexes which are reserved for nv-dimm.
+uint HeterogeneousHeapRegionManager::shrink_nvdimm(uint num_regions, bool update_free_list) {
+  return shrink_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, update_free_list);
+}
+
+// Find empty regions in given range, un-commit them and return the count.
+uint HeterogeneousHeapRegionManager::shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list) {
+
+  if (num_regions == 0) {
+    return 0;
+  }
+  uint so_far = 0;
+  uint idx_last_found = 0;
+  uint num_last_found;
+  while (so_far < num_regions &&
+         (num_last_found = find_empty_in_range_reverse(start, end, &idx_last_found)) > 0) {
+    uint to_uncommit = MIN2(num_regions - so_far, num_last_found);
+    if(update_free_list) {
+      _free_list.remove_starting_at(at(idx_last_found + num_last_found - to_uncommit), to_uncommit);
+    }
+    uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
+    so_far += to_uncommit;
+    end = idx_last_found;
+  }
+  return so_far;
+}
+
+uint HeterogeneousHeapRegionManager::find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx) {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx < max_length(), "checking");
+  guarantee(end_idx < max_length(), "checking");
+  if(start_idx > end_idx) {
+    return 0;
+  }
+
+  uint num_regions_found = 0;
+
+  jlong cur = end_idx;
+  while (cur >= start_idx && !(is_available(cur) && at(cur)->is_empty())) {
+    cur--;
+  }
+  if (cur == start_idx - 1) {
+    return num_regions_found;
+  }
+  jlong old_cur = cur;
+  // cur indexes the first empty region
+  while (cur >= start_idx && is_available(cur) && at(cur)->is_empty()) {
+    cur--;
+  }
+  *res_idx = cur + 1;
+  num_regions_found = old_cur - cur;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
+    assert(at(i)->is_empty(), "just checking");
+  }
+#endif
+  return num_regions_found;
+}
+
+HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(HeapRegionType type) {
+
+  // We want to prevent mutators from proceeding when we have borrowed regions from the last collection. This
+  // will force a full collection to remedy the situation.
+  // Free region requests from GC threads can proceed.
+  if(type.is_eden() || type.is_humongous()) {
+    if(has_borrowed_regions()) {
+      return NULL;
+    }
+  }
+
+  // old and humongous regions are allocated from nv-dimm; eden and survivor regions are allocated from dram
+  // assumption: dram regions take higher indexes
+  bool from_nvdimm = (type.is_old() || type.is_humongous()) ? true : false;
+  bool from_head = from_nvdimm;
+  HeapRegion* hr = _free_list.remove_region(from_head);
+
+  if (hr != NULL && ( (from_nvdimm && !is_in_nvdimm(hr->hrm_index())) || (!from_nvdimm && !is_in_dram(hr->hrm_index())) ) ) {
+    _free_list.add_ordered(hr);
+    hr = NULL;
+  }
+
+#ifdef ASSERT
+  uint total_committed_before = total_regions_committed();
+#endif
+
+  if (hr == NULL) {
+    if (!from_nvdimm) {
+      uint ret = shrink_nvdimm(1);
+      if (ret == 1) {
+        ret = expand_dram(1, NULL);
+        assert(ret == 1, "We should be able to commit one region");
+        hr = _free_list.remove_region(from_head);
+      }
+    }
+    else { /*is_old*/
+      uint ret = shrink_dram(1);
+      if (ret == 1) {
+        ret = expand_nvdimm(1, NULL);
+        assert(ret == 1, "We should be able to commit one region");
+        hr = _free_list.remove_region(from_head);
+      }
+    }
+  }
+#ifdef ASSERT
+  assert(total_committed_before == total_regions_committed(), "invariant not met");
+#endif
+
+  // When an old region is requested (which happens during collection pause) and we can't find any empty region
+  // in the set of available regions (which is an evacuation failure scenario), we borrow (or pre-allocate) an unavailable region
+  // from nv-dimm. This region is used to evacuate surviving objects from eden, survivor or old.
+  if(hr == NULL && type.is_old()) {
+    hr = borrow_old_region_for_gc();
+  }
+
+  if (hr != NULL) {
+    assert(hr->next() == NULL, "Single region should not have next");
+    assert(is_available(hr->hrm_index()), "Must be committed");
+  }
+  return hr;
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous_only_empty(size_t num) {
+  if (has_borrowed_regions()) {
+      return G1_NO_HRM_INDEX;
+  }
+  return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, true);
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous_empty_or_unavailable(size_t num) {
+  if (has_borrowed_regions()) {
+    return G1_NO_HRM_INDEX;
+  }
+  return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, false);
+}
+
+uint HeterogeneousHeapRegionManager::find_contiguous(size_t start, size_t end, size_t num, bool empty_only) {
+  uint found = 0;
+  size_t length_found = 0;
+  uint cur = (uint)start;
+  uint length_unavailable = 0;
+
+  while (length_found < num && cur <= end) {
+    HeapRegion* hr = _regions.get_by_index(cur);
+    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
+      // This region is a potential candidate for allocation into.
+      if (!is_available(cur)) {
+        if(shrink_dram(1) == 1) {
+          uint ret = expand_in_range(cur, cur, 1, NULL);
+          assert(ret == 1, "We should be able to expand at this index");
+        } else {
+          length_unavailable++;
+        }
+      }
+      length_found++;
+    }
+    else {
+      // This region is not a candidate. The next region is the next possible one.
+      found = cur + 1;
+      length_found = 0;
+    }
+    cur++;
+  }
+
+  if (length_found == num) {
+    for (uint i = found; i < (found + num); i++) {
+      HeapRegion* hr = _regions.get_by_index(i);
+      // sanity check
+      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
+                "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
+                " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr));
+    }
+    if (!empty_only && length_unavailable > (max_expandable_length() - total_regions_committed())) {
+      // if 'length_unavailable' number of regions will be made available, we will exceed max regions.
+      return G1_NO_HRM_INDEX;
+    }
+    return found;
+  }
+  else {
+    return G1_NO_HRM_INDEX;
+  }
+}
+
+uint HeterogeneousHeapRegionManager::find_highest_free(bool* expanded) {
+  // Loop downwards from the highest dram region index, looking for an
+  // entry which is either free or not yet committed.  If not yet
+  // committed, expand_at that index.
+  uint curr = end_index_of_dram();
+  while (true) {
+    HeapRegion *hr = _regions.get_by_index(curr);
+    if (hr == NULL && !(total_regions_committed() < _max_regions)) {
+      uint res = shrink_nvdimm(1);
+      if (res == 1) {
+        res = expand_in_range(curr, curr, 1, NULL);
+        assert(res == 1, "We should be able to expand since shrink was successful");
+        *expanded = true;
+        return curr;
+      }
+    }
+    else {
+      if (hr->is_free()) {
+        *expanded = false;
+        return curr;
+      }
+    }
+    if (curr == start_index_of_dram()) {
+      return G1_NO_HRM_INDEX;
+    }
+    curr--;
+  }
+}
+
+// We need to override this since region 0 which serves are dummy region in base class may not be available here.
+// This is a corner condition when either number of regions is small. When adaptive sizing is used, initial heap size
+// could be just one region.  This region is commited in dram to be used for young generation, leaving region 0 (which is in nvdimm)
+// unavailable.
+HeapRegion* HeterogeneousHeapRegionManager::get_dummy_region() {
+  uint curr = 0;
+
+  while (curr < _regions.length()) {
+    if (is_available(curr)) {
+      return new_heap_region(curr);
+    }
+    curr++;
+  }
+  assert(false, "We should always find a region available for dummy region");
+  return NULL;
+}
+
+// First shrink in dram, then in nv-dimm.
+uint HeterogeneousHeapRegionManager::shrink_by(uint num_regions) {
+  // This call is made at end of full collection. Before making this call the region sets are tore down (tear_down_region_sets()).
+  // So shrink() calls below do not need to remove uncomitted regions from free list.
+  uint ret = shrink_dram(num_regions, false /* update_free_list */);
+  ret += shrink_nvdimm(num_regions - ret, false /* update_free_list */);
+  return ret;
+}
+
+void HeterogeneousHeapRegionManager::verify() {
+  HeapRegionManager::verify();
+}
+
+uint HeterogeneousHeapRegionManager::free_list_dram_length() const {
+  return _free_list.num_of_regions_in_range(start_index_of_dram(), end_index_of_dram());
+}
+
+uint HeterogeneousHeapRegionManager::free_list_nvdimm_length() const {
+  return _free_list.num_of_regions_in_range(start_index_of_nvdimm(), end_index_of_nvdimm());
+}
+
+bool HeterogeneousHeapRegionManager::is_in_nvdimm(uint index) const {
+  return index >= start_index_of_nvdimm() && index <= end_index_of_nvdimm();
+}
+
+bool HeterogeneousHeapRegionManager::is_in_dram(uint index) const {
+  return index >= start_index_of_dram() && index <= end_index_of_dram();
+}
+
+// We have to make sure full collection copies all surviving objects to NV-DIMM.
+// We might not have enough regions in nvdimm_set, so we need to make more regions on NV-DIMM available for full collection.
+// Note: by doing this we are breaking the in-variant that total number of committed regions is equal to current heap size.
+// After full collection ends, we will re-establish this in-variant by freeing DRAM regions.
+void HeterogeneousHeapRegionManager::prepare_for_full_collection_start() {
+  _total_commited_before_full_gc = total_regions_committed() - _no_borrowed_regions;
+  _no_borrowed_regions = 0;
+  expand_nvdimm(num_committed_dram(), NULL);
+  remove_all_free_regions();
+}
+
+// We need to bring back the total committed regions to before full collection start.
+// Unless we are close to OOM, all regular (not pinned) regions in DRAM should be free.
+// We shrink all free regions in DRAM and if needed from NV-DIMM (when there are pinned DRAM regions)
+// If we can't bring back committed regions count to _total_commited_before_full_gc, we keep the extra count in _no_borrowed_regions.
+// When this GC finishes, new regions won't be allocated since has_borrowed_regions() is true. VM will be forced to re-try GC
+// with clear soft references followed by OOM error in worst case.
+void HeterogeneousHeapRegionManager::prepare_for_full_collection_end() {
+  uint shrink_size = total_regions_committed() - _total_commited_before_full_gc;
+  uint so_far = 0;
+  uint idx_last_found = 0;
+  uint num_last_found;
+  uint end = (uint)_regions.length() - 1;
+  while (so_far < shrink_size &&
+         (num_last_found = find_empty_in_range_reverse(0, end, &idx_last_found)) > 0) {
+    uint to_uncommit = MIN2(shrink_size - so_far, num_last_found);
+    uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
+    so_far += to_uncommit;
+    end = idx_last_found;
+  }
+  // See comment above the function.
+  _no_borrowed_regions = shrink_size - so_far;
+}
+
+uint HeterogeneousHeapRegionManager::start_index_of_dram() const { return _max_regions;}
+
+uint HeterogeneousHeapRegionManager::end_index_of_dram() const { return 2*_max_regions - 1; }
+
+uint HeterogeneousHeapRegionManager::start_index_of_nvdimm() const { return 0; }
+
+uint HeterogeneousHeapRegionManager::end_index_of_nvdimm() const { return _max_regions - 1; }
+
+// This function is called when there are no free nv-dimm regions.
+// It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
+HeapRegion* HeterogeneousHeapRegionManager::borrow_old_region_for_gc() {
+  assert(free_list_nvdimm_length() == 0, "this function should be called only when there are no nv-dimm regions in free list");
+
+  uint ret = expand_nvdimm(1, NULL);
+  if(ret != 1) {
+    return NULL;
+  }
+  HeapRegion* hr = _free_list.remove_region(true /*from_head*/);
+  assert(is_in_nvdimm(hr->hrm_index()), "allocated region should be in nv-dimm");
+  _no_borrowed_regions++;
+  return hr;
+}
+
+bool HeterogeneousHeapRegionManager::has_borrowed_regions() const {
+  return _no_borrowed_regions > 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
+#define SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
+
+#include "gc/g1/heapRegionManager.hpp"
+
+// This class manages heap regions on heterogenous memory comprising of dram and nv-dimm.
+// Regions in dram (dram_set) are used for young objects and archive regions (CDS).
+// Regions in nv-dimm (nvdimm_set) are used for old objects and humongous objects.
+// At any point there are some regions committed on dram and some on nv-dimm with the following guarantees:
+//   1. The total number of regions committed in dram and nv-dimm equals the current size of heap.
+//   2. Consequently, total number of regions committed is less than or equal to Xmx.
+//   3. To maintain the guarantee stated by 1., whenever one set grows (new regions committed), the other set shrinks (regions un-committed).
+//      3a. If more dram regions are needed (young generation expansion), corresponding number of regions in nv-dimm are un-committed.
+//      3b. When old generation or humongous set grows, and new regions need to be committed to nv-dimm, corresponding number of regions
+//            are un-committed in dram.
+class HeterogeneousHeapRegionManager : public HeapRegionManager {
+  const uint _max_regions;
+  uint _max_dram_regions;
+  uint _max_nvdimm_regions;
+  uint _start_index_of_nvdimm;
+  uint _total_commited_before_full_gc;
+  uint _no_borrowed_regions;
+
+  uint total_regions_committed() const;
+  uint num_committed_dram() const;
+  uint num_committed_nvdimm() const;
+
+  // Similar to find_unavailable_from_idx() function from base class, difference is this function searches in range [start, end].
+  uint find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const;
+
+  // Expand into dram. Maintains the invariant that total number of committed regions is less than current heap size.
+  uint expand_dram(uint num_regions, WorkGang* pretouch_workers);
+
+  // Expand into nv-dimm.
+  uint expand_nvdimm(uint num_regions, WorkGang* pretouch_workers);
+
+  // Expand by finding unavailable regions in [start, end] range.
+  uint expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_workers);
+
+  // Shrink dram set of regions.
+  uint shrink_dram(uint num_regions, bool update_free_list = true);
+
+  // Shrink nv-dimm set of regions.
+  uint shrink_nvdimm(uint num_regions, bool update_free_list = true);
+
+  // Shrink regions from [start, end] range.
+  uint shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list = true);
+
+  // Similar to find_empty_from_idx_reverse() in base class. Only here it searches in a range.
+  uint find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx);
+
+  // Similar to find_contiguous() in base class, with [start, end] range
+  uint find_contiguous(size_t start, size_t end, size_t num, bool empty_only);
+
+  // This function is called when there are no free nv-dimm regions.
+  // It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
+  HeapRegion* borrow_old_region_for_gc();
+
+  uint free_list_dram_length() const;
+  uint free_list_nvdimm_length() const;
+
+  // is region with given index in nv-dimm?
+  bool is_in_nvdimm(uint index) const;
+  bool is_in_dram(uint index) const;
+
+public:
+
+  // Empty constructor, we'll initialize it with the initialize() method.
+  HeterogeneousHeapRegionManager(uint num_regions) : _max_regions(num_regions), _max_dram_regions(0),
+                                                     _max_nvdimm_regions(0), _start_index_of_nvdimm(0),
+                                                     _total_commited_before_full_gc(0), _no_borrowed_regions(0)
+  {}
+
+  static HeterogeneousHeapRegionManager* manager();
+
+  virtual void initialize(G1RegionToSpaceMapper* heap_storage,
+                          G1RegionToSpaceMapper* prev_bitmap,
+                          G1RegionToSpaceMapper* next_bitmap,
+                          G1RegionToSpaceMapper* bot,
+                          G1RegionToSpaceMapper* cardtable,
+                          G1RegionToSpaceMapper* card_counts);
+
+  uint start_index_of_nvdimm() const;
+  uint start_index_of_dram() const;
+  uint end_index_of_nvdimm() const;
+  uint end_index_of_dram() const;
+
+  // Override.
+  HeapRegion* get_dummy_region();
+
+  // Adjust dram_set to provision 'expected_num_regions' regions.
+  void adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers);
+
+  // Prepare heap regions before and after full collection.
+  void prepare_for_full_collection_start();
+  void prepare_for_full_collection_end();
+
+  virtual HeapRegion* allocate_free_region(HeapRegionType type);
+
+  // Return maximum number of regions that heap can expand to.
+  uint max_expandable_length() const;
+
+  // Override. Expand in nv-dimm.
+  uint expand_by(uint num_regions, WorkGang* pretouch_workers);
+
+  // Override.
+  uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
+
+  // Override. This function is called for humongous allocation, so we need to find empty regions in nv-dimm.
+  uint find_contiguous_only_empty(size_t num);
+
+  // Override. This function is called for humongous allocation, so we need to find empty or unavailable regions in nv-dimm.
+  uint find_contiguous_empty_or_unavailable(size_t num);
+
+  // Overrides base class implementation to find highest free region in dram.
+  uint find_highest_free(bool* expanded);
+
+  // Override. This fuction is called to shrink the heap, we shrink in dram first then in nv-dimm.
+  uint shrink_by(uint num_regions_to_remove);
+
+  bool has_borrowed_regions() const;
+
+  void verify();
+};
+
+#endif // SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -53,7 +53,7 @@
   nonstatic_field(HeapRegionManager, _num_committed,    uint)                 \
                                                                               \
   nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
-  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager)    \
+  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager*)    \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
   nonstatic_field(G1CollectedHeap, _archive_set,        HeapRegionSetBase)    \
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -28,6 +28,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
+#include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
 
 void GCArguments::initialize() {
@@ -53,4 +54,28 @@
     // If class unloading is disabled, also disable concurrent class unloading.
     FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
   }
+
+  if (!FLAG_IS_DEFAULT(AllocateOldGenAt)) {
+    // CompressedOops not supported when AllocateOldGenAt is set.
+    FLAG_SET_DEFAULT(UseCompressedOops, false);
+    FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
+    // When AllocateOldGenAt is set, we cannot use largepages for entire heap memory.
+    // Only young gen which is allocated in dram can use large pages, but we currently don't support that.
+    FLAG_SET_DEFAULT(UseLargePages, false);
+  }
 }
+
+bool GCArguments::check_args_consistency() {
+  bool status = true;
+  if (!FLAG_IS_DEFAULT(AllocateHeapAt) && !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
+    jio_fprintf(defaultStream::error_stream(),
+      "AllocateHeapAt and AllocateOldGenAt cannot be used together.\n");
+    status = false;
+  }
+  if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) {
+    jio_fprintf(defaultStream::error_stream(),
+      "AllocateOldGenAt is not supported for selected GC.\n");
+    status = false;
+  }
+  return status;
+}
--- a/src/hotspot/share/gc/shared/gcArguments.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -39,6 +39,7 @@
   virtual void initialize();
   virtual size_t conservative_max_heap_alignment() = 0;
   virtual CollectedHeap* create_heap() = 0;
+  static bool check_args_consistency();
 };
 
 #endif // SHARE_GC_SHARED_GCARGUMENTS_HPP
--- a/src/hotspot/share/prims/whitebox.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/prims/whitebox.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -82,6 +82,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkThread.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/g1/heterogeneousHeapRegionManager.hpp"
 #endif // INCLUDE_G1GC
 #if INCLUDE_PARALLELGC
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
@@ -499,6 +500,61 @@
 
 #endif // INCLUDE_G1GC
 
+#if INCLUDE_G1GC
+WB_ENTRY(jlong, WB_DramReservedStart(JNIEnv* env, jobject o))
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint start_region = HeterogeneousHeapRegionManager::manager()->start_index_of_dram();
+      return (jlong)(g1h->base() + start_region * HeapRegion::GrainBytes);
+    } else {
+      return (jlong)g1h->base();
+    }
+  }
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_DramReservedStart: enabled only for G1");
+WB_END
+
+WB_ENTRY(jlong, WB_DramReservedEnd(JNIEnv* env, jobject o))
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint end_region = HeterogeneousHeapRegionManager::manager()->end_index_of_dram();
+      return (jlong)(g1h->base() + (end_region + 1) * HeapRegion::GrainBytes - 1);
+    } else {
+      return (jlong)g1h->base() + g1h->collector_policy()->max_heap_byte_size();
+    }
+  }
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_DramReservedEnd: enabled only for G1");
+WB_END
+
+WB_ENTRY(jlong, WB_NvdimmReservedStart(JNIEnv* env, jobject o))
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint start_region = HeterogeneousHeapRegionManager::manager()->start_index_of_nvdimm();
+      return (jlong)(g1h->base() + start_region * HeapRegion::GrainBytes);
+    } else {
+      THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
+    }
+  }
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedStart: enabled only for G1");
+WB_END
+
+WB_ENTRY(jlong, WB_NvdimmReservedEnd(JNIEnv* env, jobject o))
+  if (UseG1GC) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    if (g1h->g1_collector_policy()->is_hetero_heap()) {
+      uint end_region = HeterogeneousHeapRegionManager::manager()->start_index_of_nvdimm();
+      return (jlong)(g1h->base() + (end_region + 1) * HeapRegion::GrainBytes - 1);
+    } else {
+      THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: Old gen is not allocated on NV-DIMM using AllocateOldGenAt flag");
+    }
+  }
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_NvdimmReservedEnd: enabled only for G1");
+WB_END
+
+#endif // INCLUDE_G1GC
+
 #if INCLUDE_PARALLELGC
 
 WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
@@ -2053,6 +2109,10 @@
   {CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
                                                       (void*)&WB_G1AuxiliaryMemoryUsage  },
   {CC"g1GetMixedGCInfo",   CC"(I)[J",                 (void*)&WB_G1GetMixedGCInfo },
+  {CC"dramReservedStart",   CC"()J",                  (void*)&WB_DramReservedStart },
+  {CC"dramReservedEnd",     CC"()J",                  (void*)&WB_DramReservedEnd },
+  {CC"nvdimmReservedStart", CC"()J",                  (void*)&WB_NvdimmReservedStart },
+  {CC"nvdimmReservedEnd",   CC"()J",                  (void*)&WB_NvdimmReservedEnd },
 #endif // INCLUDE_G1GC
 #if INCLUDE_PARALLELGC
   {CC"psVirtualSpaceAlignment",CC"()J",               (void*)&WB_PSVirtualSpaceAlignment},
--- a/src/hotspot/share/runtime/arguments.cpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/runtime/arguments.cpp	Fri Dec 21 08:18:59 2018 -0800
@@ -2062,6 +2062,9 @@
       log_warning(arguments) ("NUMA support for Heap depends on the file system when AllocateHeapAt option is used.\n");
     }
   }
+
+  status = status && GCArguments::check_args_consistency();
+
   return status;
 }
 
@@ -2953,6 +2956,7 @@
   }
 #endif // LINUX
   fix_appclasspath();
+
   return JNI_OK;
 }
 
--- a/src/hotspot/share/runtime/globals.hpp	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/hotspot/share/runtime/globals.hpp	Fri Dec 21 08:18:59 2018 -0800
@@ -2575,6 +2575,12 @@
           "Path to the directoy where a temporary file will be created "    \
           "to use as the backing store for Java Heap.")                     \
                                                                             \
+  experimental(ccstr, AllocateOldGenAt, NULL,                               \
+          "Path to the directoy where a temporary file will be "            \
+          "created to use as the backing store for old generation."         \
+          "File of size Xmx is pre-allocated for performance reason, so"    \
+          "we need that much space available")                              \
+                                                                            \
   develop(bool, VerifyMetaspace, false,                                     \
           "Verify metaspace on chunk movements.")                           \
                                                                             \
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Dec 21 18:26:55 2018 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java	Fri Dec 21 08:18:59 2018 -0800
@@ -47,7 +47,7 @@
 
 public class G1CollectedHeap extends CollectedHeap {
     // HeapRegionManager _hrm;
-    static private long hrmFieldOffset;
+    static private AddressField hrmField;
     // MemRegion _g1_reserved;
     static private long g1ReservedFieldOffset;
     // size_t _summary_bytes_used;
@@ -72,7 +72,7 @@
     static private synchronized void initialize(TypeDataBase db) {
         Type type = db.lookupType("G1CollectedHeap");
 
-        hrmFieldOffset = type.getField("_hrm").getOffset();
+        hrmField = type.getAddressField("_hrm");
         summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
         g1mmField = type.getAddressField("_g1mm");
         oldSetFieldOffset = type.getField("_old_set").getOffset();
@@ -93,7 +93,7 @@
     }
 
     public HeapRegionManager hrm() {
-        Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
+        Address hrmAddr = hrmField.getValue(addr);
         return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class,
                                                          hrmAddr);
     }
--- a/test/hotspot/jtreg/TEST.groups	Fri Dec 21 18:26:55 2018 +0000
+++ b/test/hotspot/jtreg/TEST.groups	Fri Dec 21 08:18:59 2018 -0800
@@ -40,7 +40,8 @@
   -:tier1_compiler_not_cms
 
 hotspot_gc = \
-  gc
+  gc \
+  -gc/nvdimm
 
 hotspot_runtime = \
   runtime
@@ -190,7 +191,8 @@
   -gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \
   -gc/cms/TestMBeanCMS.java \
   -gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
-  -gc/shenandoah
+  -gc/shenandoah \
+  -gc/nvdimm
 
 gc_epsilon = \
   gc/epsilon/ \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/nvdimm/TestAllocateOldGenAt.java	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestAllocateOldGenAt.java
+ * @key gc
+ * @summary Test to check allocation of Java Heap with AllocateOldGenAt option
+ * @requires vm.gc=="null"
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ */
+
+import jdk.test.lib.JDKToolFinder;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import java.util.ArrayList;
+import java.util.Collections;
+
+public class TestAllocateOldGenAt {
+  private static ArrayList<String> commonOpts;
+
+  public static void main(String args[]) throws Exception {
+    commonOpts = new ArrayList();
+
+    String testVmOptsStr = System.getProperty("test.java.opts");
+    if (!testVmOptsStr.isEmpty()) {
+      String[] testVmOpts = testVmOptsStr.split(" ");
+      Collections.addAll(commonOpts, testVmOpts);
+    }
+    String test_dir = System.getProperty("test.dir", ".");
+    Collections.addAll(commonOpts, new String[] {"-XX:+UnlockExperimentalVMOptions",
+                                                 "-XX:AllocateOldGenAt=" + test_dir,
+                                                 "-Xmx32m",
+                                                 "-Xms32m",
+                                                 "-version"});
+
+    runTest("-XX:+UseG1GC");
+  }
+
+  private static void runTest(String... extraFlags) throws Exception {
+    ArrayList<String> testOpts = new ArrayList();
+    Collections.addAll(testOpts, commonOpts.toArray(new String[commonOpts.size()]));
+    Collections.addAll(testOpts, extraFlags);
+
+    System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
+    for (int i = 0; i < testOpts.size(); i += 1) {
+      System.out.print(" " + testOpts.get(i));
+    }
+    System.out.println();
+
+    ProcessBuilder pb =
+      ProcessTools.createJavaProcessBuilder(testOpts.toArray(new String[testOpts.size()]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    output.shouldHaveExitValue(0);
+
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/nvdimm/TestAllocateOldGenAtError.java	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestAllocateOldGenAtError.java
+ * @key gc
+ * @summary Test to check correct handling of non-existent directory passed to AllocateOldGenAt option
+ * @requires vm.gc=="null"
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ */
+
+import java.io.File;
+import jdk.test.lib.JDKToolFinder;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.UUID;
+
+public class TestAllocateOldGenAtError {
+  private static ArrayList<String> commonOpts;
+
+  public static void main(String args[]) throws Exception {
+    commonOpts = new ArrayList();
+
+    String testVmOptsStr = System.getProperty("test.java.opts");
+    if (!testVmOptsStr.isEmpty()) {
+      String[] testVmOpts = testVmOptsStr.split(" ");
+      Collections.addAll(commonOpts, testVmOpts);
+    }
+    String test_dir = System.getProperty("test.dir", ".");
+
+    File f = null;
+    do {
+      f = new File(test_dir, UUID.randomUUID().toString());
+    } while(f.exists());
+
+    Collections.addAll(commonOpts, new String[] {"-XX:+UnlockExperimentalVMOptions",
+                                                 "-XX:AllocateOldGenAt=" + f.getName(),
+                                                 "-Xlog:gc+heap=info",
+                                                 "-Xmx32m",
+                                                 "-Xms32m",
+                                                 "-version"});
+
+    testG1();
+  }
+
+  private static void testG1() throws Exception {
+    System.out.println("Testing G1 GC");
+
+    OutputAnalyzer output = runTest("-XX:+UseG1GC");
+
+    output.shouldContain("Could not initialize G1 heap");
+    output.shouldContain("Error occurred during initialization of VM");
+    output.shouldNotHaveExitValue(0);
+  }
+
+  private static OutputAnalyzer runTest(String... extraFlags) throws Exception {
+    ArrayList<String> testOpts = new ArrayList();
+    Collections.addAll(testOpts, commonOpts.toArray(new String[commonOpts.size()]));
+    Collections.addAll(testOpts, extraFlags);
+
+    System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
+    for (int i = 0; i < testOpts.size(); i += 1) {
+      System.out.print(" " + testOpts.get(i));
+    }
+    System.out.println();
+
+    ProcessBuilder pb =
+      ProcessTools.createJavaProcessBuilder(testOpts.toArray(new String[testOpts.size()]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    return output;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/nvdimm/TestAllocateOldGenAtMultiple.java	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestAllocateOldGenAtMultiple.java
+ * @key gc
+ * @summary Test to check allocation of Java Heap with AllocateOldGenAt option. Has multiple sub-tests to cover different code paths.
+ * @requires vm.gc=="null"
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * @requires vm.bits == "64"
+ * @run main TestAllocateOldGenAtMultiple -XX:+UseG1GC
+ */
+
+import jdk.test.lib.JDKToolFinder;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import java.util.ArrayList;
+import java.util.Collections;
+
+public class TestAllocateOldGenAtMultiple {
+  public static void main(String args[]) throws Exception {
+    ArrayList<String> vmOpts = new ArrayList();
+    String[] testVmOpts = null;
+
+    String test_dir = System.getProperty("test.dir", ".");
+
+    String testVmOptsStr = System.getProperty("test.java.opts");
+    if (!testVmOptsStr.isEmpty()) {
+      testVmOpts = testVmOptsStr.split(" ");
+    }
+
+    // Extra options for each of the sub-tests
+    String[] extraOptsList = new String[] {
+      "-Xmx32m -Xms32m -XX:+UseCompressedOops",     // 1. With compressedoops enabled.
+      "-Xmx32m -Xms32m -XX:-UseCompressedOops",     // 2. With compressedoops disabled.
+      "-Xmx32m -Xms32m -XX:HeapBaseMinAddress=3g",  // 3. With user specified HeapBaseMinAddress.
+      "-Xmx4g -Xms4g",                              // 4. With larger heap size (UnscaledNarrowOop not possible).
+      "-Xmx4g -Xms4g -XX:+UseLargePages",           // 5. Set UseLargePages.
+      "-Xmx4g -Xms4g -XX:+UseNUMA"                  // 6. Set UseNUMA.
+    };
+
+    for(String extraOpts : extraOptsList) {
+      vmOpts.clear();
+      if(testVmOpts != null) {
+        Collections.addAll(vmOpts, testVmOpts);
+      }
+      // Add extra options specific to the sub-test.
+      String[] extraOptsArray = extraOpts.split(" ");
+      if(extraOptsArray != null) {
+        Collections.addAll(vmOpts, extraOptsArray);
+      }
+      // Add common options
+      Collections.addAll(vmOpts, new String[] {"-XX:+UnlockExperimentalVMOptions",
+                                               "-XX:AllocateOldGenAt=" + test_dir,
+                                               "-version"});
+
+      System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
+      for (int i = 0; i < vmOpts.size(); i += 1) {
+        System.out.print(" " + vmOpts.get(i));
+      }
+      System.out.println();
+
+      ProcessBuilder pb =
+        ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
+      OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+      System.out.println("Output:\n" + output.getOutput());
+
+      output.shouldHaveExitValue(0);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/nvdimm/TestHumongousObjectsOnNvdimm.java	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestHumongousObjectsOnNvdimm
+ * @summary Check that humongous objects reside in nv-dimm
+ * @library /test/lib /
+ * @requires vm.gc=="null"
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main TestHumongousObjectsOnNvdimm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                                  -XX:+WhiteBoxAPI
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Collections;
+import gc.testlibrary.Helpers;
+
+/**
+ * Test spawns HumongousObjectTest in a separate VM and expects that it
+ * completes without a RuntimeException.
+ */
+public class TestHumongousObjectsOnNvdimm {
+
+    private static ArrayList<String> testOpts;
+
+    public static void main(String args[]) throws Exception {
+        testOpts = new ArrayList();
+
+        String[] common_options = new String[] {
+            "-Xbootclasspath/a:.",
+            "-XX:+UnlockExperimentalVMOptions",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:+WhiteBoxAPI",
+            "-XX:AllocateOldGenAt="+System.getProperty("test.dir", "."),
+            "-Xms10M", "-Xmx10M",
+            "-XX:G1HeapRegionSize=1m"
+        };
+
+        String testVmOptsStr = System.getProperty("test.java.opts");
+        if (!testVmOptsStr.isEmpty()) {
+            String[] testVmOpts = testVmOptsStr.split(" ");
+            Collections.addAll(testOpts, testVmOpts);
+        }
+        Collections.addAll(testOpts, common_options);
+
+        // Test with G1 GC
+        runTest("-XX:+UseG1GC");
+    }
+
+    private static void runTest(String... extraFlags) throws Exception {
+        Collections.addAll(testOpts, extraFlags);
+        testOpts.add(HumongousObjectTest.class.getName());
+        System.out.println(testOpts);
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false,
+                testOpts.toArray(new String[testOpts.size()]));
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+    }
+}
+
+/**
+ * This class tests that a humongous object resides in NVDIMM.
+ */
+class HumongousObjectTest {
+    private static final WhiteBox WB = WhiteBox.getWhiteBox();
+
+    private static void validateObject(Object o) {
+        Asserts.assertTrue(WB.isObjectInOldGen(o),
+                "Object is supposed to be in OldGen");
+
+        long obj_addr = WB.getObjectAddress(o);
+        long nvdimm_heap_start = WB.nvdimmReservedStart();
+        long nvdimm_heap_end = WB.nvdimmReservedEnd();
+
+        Asserts.assertTrue(WB.g1BelongsToHumongousRegion(obj_addr), "Object address should be in Humongous set");
+        Asserts.assertTrue(obj_addr >= nvdimm_heap_start && obj_addr < nvdimm_heap_end,
+                "Humongous object does not reside in NVDIMM");
+    }
+
+    public static void main(String args[]) throws Exception {
+        // allocate an humongous object
+        int byteArrayMemoryOverhead = Helpers.detectByteArrayAllocationOverhead();
+        int MinByteArrayHumongousSize = (WB.g1RegionSize() / 2) - byteArrayMemoryOverhead + 1;
+        byte[] obj = new byte[MinByteArrayHumongousSize];
+
+        validateObject(obj);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/nvdimm/TestOldObjectsOnNvdimm.java	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestOldObjectsOnNvdimm
+ * @summary Check that objects in old generation reside in dram.
+ * @requires vm.gc=="null"
+ * @library /test/lib
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main TestOldObjectsOnNvdimm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                                  -XX:+WhiteBoxAPI
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Collections;
+
+/*
+ * Test spawns OldObjectTest in a separate VM and expects that it
+ * completes without a RuntimeException.
+ */
+public class TestOldObjectsOnNvdimm {
+
+    public static final int ALLOCATION_SIZE = 100;
+    private static ArrayList<String> testOpts;
+
+    public static void main(String args[]) throws Exception {
+        testOpts = new ArrayList();
+
+        String[] common_options = new String[] {
+            "-Xbootclasspath/a:.",
+            "-XX:+UnlockExperimentalVMOptions",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:+WhiteBoxAPI",
+            "-XX:AllocateOldGenAt="+System.getProperty("test.dir", "."),
+            "-Xms10M", "-Xmx10M",
+            "-XX:MaxTenuringThreshold=1" // Promote objects to Old Gen
+        };
+
+        String testVmOptsStr = System.getProperty("test.java.opts");
+        if (!testVmOptsStr.isEmpty()) {
+            String[] testVmOpts = testVmOptsStr.split(" ");
+            Collections.addAll(testOpts, testVmOpts);
+        }
+        Collections.addAll(testOpts, common_options);
+
+        // Test with G1 GC
+        runTest("-XX:+UseG1GC");
+    }
+
+    private static void runTest(String... extraFlags) throws Exception {
+        Collections.addAll(testOpts, extraFlags);
+        testOpts.add(OldObjectTest.class.getName());
+        System.out.println(testOpts);
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false,
+                testOpts.toArray(new String[testOpts.size()]));
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        System.out.println(output.getStdout());
+        output.shouldHaveExitValue(0);
+    }
+}
+
+/*
+ * This class tests that object is in Old generation after tenuring and resides in NVDIMM.
+ * The necessary condition for this test is running in VM with the following flags:
+ * -XX:AllocateOldGenAt=, -XX:MaxTenuringThreshold=1
+ */
+class OldObjectTest {
+    private static final WhiteBox WB = WhiteBox.getWhiteBox();
+
+    private static void validateOldObject(Object o) {
+        Asserts.assertTrue(WB.isObjectInOldGen(o),
+                "Object is supposed to be in OldGen");
+
+        long oldObj_addr = WB.getObjectAddress(o);
+        long nvdimm_heap_start = WB.nvdimmReservedStart();
+        long nvdimm_heap_end = WB.nvdimmReservedEnd();
+
+        Asserts.assertTrue(oldObj_addr >= nvdimm_heap_start && oldObj_addr <= nvdimm_heap_end,
+                "Old object does not reside in NVDIMM");
+    }
+
+    public static void main(String args[]) throws Exception {
+        // allocate an object and perform Young GCs to promote it to Old
+        byte[] oldObj = new byte[TestOldObjectsOnNvdimm.ALLOCATION_SIZE];
+        WB.youngGC();
+        WB.youngGC();
+        validateOldObject(oldObj);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/nvdimm/TestYoungObjectsOnDram.java	Fri Dec 21 08:18:59 2018 -0800
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestYoungObjectsOnDram
+ * @summary Check that objects in young generation reside in dram.
+ * @requires vm.gc=="null"
+ * @library /test/lib
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main TestYoungObjectsOnDram -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                                  -XX:+WhiteBoxAPI
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Collections;
+
+/**
+ * Test spawns YoungObjectTest in a separate VM and expects that it
+ * completes without a RuntimeException.
+ */
+public class TestYoungObjectsOnDram {
+
+    public static final int ALLOCATION_SIZE = 100;
+    private static ArrayList<String> testOpts;
+
+    public static void main(String args[]) throws Exception {
+        testOpts = new ArrayList();
+
+        String[] common_options = new String[] {
+            "-Xbootclasspath/a:.",
+            "-XX:+UnlockExperimentalVMOptions",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:+WhiteBoxAPI",
+            "-XX:AllocateOldGenAt="+System.getProperty("test.dir", "."),
+            "-XX:SurvivorRatio=1", // Survivor-to-eden ratio is 1:1
+            "-Xms10M", "-Xmx10M",
+            "-XX:InitialTenuringThreshold=15" // avoid promotion of objects to Old Gen
+        };
+
+        String testVmOptsStr = System.getProperty("test.java.opts");
+        if (!testVmOptsStr.isEmpty()) {
+            String[] testVmOpts = testVmOptsStr.split(" ");
+            Collections.addAll(testOpts, testVmOpts);
+        }
+        Collections.addAll(testOpts, common_options);
+
+        // Test with G1 GC
+        runTest("-XX:+UseG1GC");
+    }
+
+    private static void runTest(String... extraFlags) throws Exception {
+        Collections.addAll(testOpts, extraFlags);
+        testOpts.add(YoungObjectTest.class.getName());
+        System.out.println(testOpts);
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false,
+                testOpts.toArray(new String[testOpts.size()]));
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        System.out.println(output.getStdout());
+        output.shouldHaveExitValue(0);
+    }
+}
+
+/**
+ * This class tests that newly created object is in Young generation and resides in DRAM.
+ * The necessary condition for this test is running in VM with the following flags:
+ * -XX:AllocateOldGenAt=, -XX:InitialTenuringThreshold=15, -XX:SurvivorRatio=1
+ */
+class YoungObjectTest {
+    private static final WhiteBox WB = WhiteBox.getWhiteBox();
+
+    private static void validateYoungObject(Object o) {
+        Asserts.assertTrue(!WB.isObjectInOldGen(o),
+                "Object is supposed to be in YoungGen");
+
+        long youngObj_addr = WB.getObjectAddress(o);
+        long dram_heap_start = WB.dramReservedStart();
+        long dram_heap_end = WB.dramReservedEnd();
+
+        Asserts.assertTrue(youngObj_addr >= dram_heap_start && youngObj_addr <= dram_heap_end,
+                "Young object does not reside in DRAM");
+    }
+
+    public static void main(String args[]) throws Exception {
+        // allocate an object
+        byte[] youngObj = new byte[TestYoungObjectsOnDram.ALLOCATION_SIZE];
+        validateYoungObject(youngObj);
+
+        // Start a Young GC and check that object is still in DRAM.
+        // We have used -XX:InitialTenuringThreshold=15 to invoke this test
+        WB.youngGC();
+        validateYoungObject(youngObj);
+    }
+}
--- a/test/lib/sun/hotspot/WhiteBox.java	Fri Dec 21 18:26:55 2018 +0000
+++ b/test/lib/sun/hotspot/WhiteBox.java	Fri Dec 21 08:18:59 2018 -0800
@@ -182,6 +182,10 @@
   public native long    g1NumMaxRegions();
   public native long    g1NumFreeRegions();
   public native int     g1RegionSize();
+  public native long    dramReservedStart();
+  public native long    dramReservedEnd();
+  public native long    nvdimmReservedStart();
+  public native long    nvdimmReservedEnd();
   public native MemoryUsage g1AuxiliaryMemoryUsage();
   private  native Object[]    parseCommandLine0(String commandline, char delim, DiagnosticCommand[] args);
   public          Object[]    parseCommandLine(String commandline, char delim, DiagnosticCommand[] args) {