Merge
authordsamersoff
Mon, 31 Aug 2015 21:46:33 +0300
changeset 32599 084cb21b3975
parent 32404 ab79437fbfaa (current diff)
parent 32597 d5dfba528673 (diff)
child 32600 4fdefe1ae99b
Merge
hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
hotspot/src/cpu/x86/vm/templateTable_x86.cpp
--- a/hotspot/make/solaris/makefiles/adlc.make	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/make/solaris/makefiles/adlc.make	Mon Aug 31 21:46:33 2015 +0300
@@ -76,6 +76,11 @@
 ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
   CFLAGS_WARN = +w -errwarn
 endif
+# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly 
+# instantiated template functions trigger this warning when +w is active.
+ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)
+  CFLAGS_WARN += -erroff=notemsource
+endif
 CFLAGS += $(CFLAGS_WARN)
 
 ifeq ("${Platform_compiler}", "sparcWorks")
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -3043,7 +3043,9 @@
   // register obj is destroyed afterwards.
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
+         bs->kind() == BarrierSet::CardTableExtension,
+         "Wrong barrier set kind");
 
   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -691,7 +691,7 @@
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
         __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -731,7 +731,7 @@
           __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -186,7 +186,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (val == noreg) {
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -2614,7 +2614,7 @@
 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableModRef ||
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
 #ifdef ASSERT
   cmpdi(CCR0, Rnew_val, 0);
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -656,7 +656,7 @@
           __ bind(filtered);
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -697,7 +697,7 @@
           }
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           Label Lskip_loop, Lstore_loop;
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -105,7 +105,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         Label Lnull, Ldone;
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -3958,7 +3958,7 @@
   if (new_val == G0) return;
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableModRef ||
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
   card_table_write(bs->byte_map_base, tmp, store_addr);
 }
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -981,7 +981,7 @@
           __ restore();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -1014,7 +1014,7 @@
           __ restore();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -91,7 +91,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (index == noreg ) {
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -4320,7 +4320,9 @@
   // register obj is destroyed afterwards.
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
+         bs->kind() == BarrierSet::CardTableExtension,
+         "Wrong barrier set kind");
 
   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -722,7 +722,7 @@
            __ popa();
          }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -754,7 +754,7 @@
         }
         break;
 
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -367,16 +367,20 @@
 #ifdef ASSERT
     // verify that threads correspond
     {
-      Label L, S;
+     Label L1, L2, L3;
       __ cmpptr(r15_thread, thread);
-      __ jcc(Assembler::notEqual, S);
+      __ jcc(Assembler::equal, L1);
+      __ stop("StubRoutines::call_stub: r15_thread is corrupted");
+      __ bind(L1);
       __ get_thread(rbx);
+      __ cmpptr(r15_thread, thread);
+      __ jcc(Assembler::equal, L2);
+      __ stop("StubRoutines::call_stub: r15_thread is modified by call");
+      __ bind(L2);
       __ cmpptr(r15_thread, rbx);
-      __ jcc(Assembler::equal, L);
-      __ bind(S);
-      __ jcc(Assembler::equal, L);
+      __ jcc(Assembler::equal, L3);
       __ stop("StubRoutines::call_stub: threads must correspond");
-      __ bind(L);
+      __ bind(L3);
     }
 #endif
 
@@ -450,15 +454,20 @@
 #ifdef ASSERT
     // verify that threads correspond
     {
-      Label L, S;
+      Label L1, L2, L3;
       __ cmpptr(r15_thread, thread);
-      __ jcc(Assembler::notEqual, S);
+      __ jcc(Assembler::equal, L1);
+      __ stop("StubRoutines::catch_exception: r15_thread is corrupted");
+      __ bind(L1);
       __ get_thread(rbx);
+      __ cmpptr(r15_thread, thread);
+      __ jcc(Assembler::equal, L2);
+      __ stop("StubRoutines::catch_exception: r15_thread is modified by call");
+      __ bind(L2);
       __ cmpptr(r15_thread, rbx);
-      __ jcc(Assembler::equal, L);
-      __ bind(S);
+      __ jcc(Assembler::equal, L3);
       __ stop("StubRoutines::catch_exception: threads must correspond");
-      __ bind(L);
+      __ bind(L3);
     }
 #endif
 
@@ -1244,7 +1253,7 @@
            __ popa();
         }
          break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -1284,7 +1293,7 @@
           __ popa();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -200,7 +200,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (val == noreg) {
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -1425,7 +1425,7 @@
       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       // No pre barriers
       break;
@@ -1445,7 +1445,7 @@
       G1SATBCardTableModRef_post_barrier(addr,  new_val);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       CardTableModRef_post_barrier(addr,  new_val);
       break;
--- a/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -367,7 +367,7 @@
   _max = _bottom + HeapRegion::min_region_size_in_words();
 
   // Tell mark-sweep that objects in this region are not to be marked.
-  G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
+  G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
 
   // Since we've modified the old set, call update_sizes.
   _g1h->g1mm()->update_sizes();
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -65,6 +65,7 @@
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
+#include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -949,6 +950,7 @@
 }
 
 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MutexLockerEx x(Heap_lock);
@@ -1037,12 +1039,13 @@
     }
 
     // Notify mark-sweep of the archive range.
-    G1MarkSweep::mark_range_archive(curr_range);
+    G1MarkSweep::set_range_archive(curr_range, true);
   }
   return true;
 }
 
 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MemRegion reserved = _hrm.reserved();
@@ -1125,6 +1128,81 @@
   return result;
 }
 
+void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
+  assert(ranges != NULL, "MemRegion array NULL");
+  assert(count != 0, "No MemRegions provided");
+  MemRegion reserved = _hrm.reserved();
+  HeapWord* prev_last_addr = NULL;
+  HeapRegion* prev_last_region = NULL;
+  size_t size_used = 0;
+  size_t uncommitted_regions = 0;
+
+  // For each Memregion, free the G1 regions that constitute it, and
+  // notify mark-sweep that the range is no longer to be considered 'archive.'
+  MutexLockerEx x(Heap_lock);
+  for (size_t i = 0; i < count; i++) {
+    HeapWord* start_address = ranges[i].start();
+    HeapWord* last_address = ranges[i].last();
+
+    assert(reserved.contains(start_address) && reserved.contains(last_address),
+           err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
+                   p2i(start_address), p2i(last_address)));
+    assert(start_address > prev_last_addr,
+           err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
+                   p2i(start_address), p2i(prev_last_addr)));
+    size_used += ranges[i].byte_size();
+    prev_last_addr = last_address;
+
+    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+
+    // Check for ranges that start in the same G1 region in which the previous
+    // range ended, and adjust the start address so we don't try to free
+    // the same region again. If the current range is entirely within that
+    // region, skip it.
+    if (start_region == prev_last_region) {
+      start_address = start_region->end();
+      if (start_address > last_address) {
+        continue;
+      }
+      start_region = _hrm.addr_to_region(start_address);
+    }
+    prev_last_region = last_region;
+
+    // After verifying that each region was marked as an archive region by
+    // alloc_archive_regions, set it free and empty and uncommit it.
+    HeapRegion* curr_region = start_region;
+    while (curr_region != NULL) {
+      guarantee(curr_region->is_archive(),
+                err_msg("Expected archive region at index %u", curr_region->hrm_index()));
+      uint curr_index = curr_region->hrm_index();
+      _old_set.remove(curr_region);
+      curr_region->set_free();
+      curr_region->set_top(curr_region->bottom());
+      if (curr_region != last_region) {
+        curr_region = _hrm.next_region_in_heap(curr_region);
+      } else {
+        curr_region = NULL;
+      }
+      _hrm.shrink_at(curr_index, 1);
+      uncommitted_regions++;
+    }
+
+    // Notify mark-sweep that this is no longer an archive range.
+    G1MarkSweep::set_range_archive(ranges[i], false);
+  }
+
+  if (uncommitted_regions != 0) {
+    ergo_verbose1(ErgoHeapSizing,
+                  "attempt heap shrinking",
+                  ergo_format_reason("uncommitted archive regions")
+                  ergo_format_byte("total size"),
+                  HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
+  }
+  decrease_used(size_used);
+}
+
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
                                                         uint* gc_count_before_ret,
                                                         uint* gclocker_retry_count_ret) {
@@ -4051,7 +4129,9 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
+        g1_policy()->finalize_cset(target_pause_time_ms);
+
+        evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
 
         register_humongous_regions_with_cset();
 
@@ -4175,7 +4255,10 @@
         // investigate this in CR 7178365.
         double sample_end_time_sec = os::elapsedTime();
         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
-        g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
+        g1_policy()->record_collection_pause_end(pause_time_ms);
+
+        evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
+        evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
 
         MemoryService::track_memory_usage();
 
@@ -4501,8 +4584,7 @@
                  bool only_young, bool claim)
         : _oop_closure(oop_closure),
           _oop_in_klass_closure(oop_closure->g1(),
-                                oop_closure->pss(),
-                                oop_closure->rp()),
+                                oop_closure->pss()),
           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
           _claim(claim) {
 
@@ -4531,18 +4613,18 @@
       bool only_young = _g1h->collector_state()->gcs_are_young();
 
       // Non-IM young GC.
-      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
                                                                                only_young, // Only process dirty klasses.
                                                                                false);     // No need to claim CLDs.
       // IM young GC.
       //    Strong roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
                                                                                false, // Process all klasses.
                                                                                true); // Need to claim CLDs.
       //    Weak roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
                                                                                     false, // Process all klasses.
                                                                                     true); // Need to claim CLDs.
@@ -5241,9 +5323,9 @@
     G1ParScanThreadState*           pss = _pss[worker_id];
     pss->set_ref_processor(NULL);
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
@@ -5341,9 +5423,9 @@
     pss->set_ref_processor(NULL);
     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
@@ -5451,9 +5533,9 @@
   // closures while we're actually processing the discovered
   // reference objects.
 
-  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss, NULL);
-
-  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
+  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss);
+
+  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
 
   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -757,6 +757,12 @@
   // alloc_archive_regions, and after class loading has occurred.
   void fill_archive_regions(MemRegion* range, size_t count);
 
+  // For each of the specified MemRegions, uncommit the containing G1 regions
+  // which had been allocated by alloc_archive_regions. This should be called
+  // rather than fill_archive_regions at JVM init time if the archive file
+  // mapping failed, with the same non-overlapping and sorted MemRegion array.
+  void dealloc_archive_regions(MemRegion* range, size_t count);
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -932,7 +932,7 @@
 // Anything below that is considered to be zero
 #define MIN_TIMER_GRANULARITY 0.0000001
 
-void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
+void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
   double end_time_sec = os::elapsedTime();
   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
          "otherwise, the subtraction below does not make sense");
@@ -964,9 +964,6 @@
   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
                           end_time_sec, _g1->gc_tracer_stw()->gc_id());
 
-  evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
-  evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
-
   if (update_stats) {
     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
     // this is where we update the allocation rate of the application
@@ -1883,7 +1880,7 @@
 }
 
 
-void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
+void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
   double young_start_time_sec = os::elapsedTime();
 
   YoungList* young_list = _g1->young_list();
@@ -2093,7 +2090,6 @@
 
   double non_young_end_time_sec = os::elapsedTime();
   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
-  evacuation_info.set_collectionset_regions(cset_region_length());
 }
 
 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -634,13 +634,11 @@
   virtual HeapWord* satisfy_failed_allocation(size_t size,
                                               bool is_tlab);
 
-  BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
-
   bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
 
   // Record the start and end of an evacuation pause.
   void record_collection_pause_start(double start_time_sec);
-  void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
+  void record_collection_pause_end(double pause_time_ms);
 
   // Record the start and end of a full collection.
   void record_full_collection_start();
@@ -682,6 +680,10 @@
     return _bytes_copied_during_gc;
   }
 
+  size_t collection_set_bytes_used_before() const {
+    return _collection_set_bytes_used_before;
+  }
+
   // Determine whether there are candidate regions so that the
   // next GC should be mixed. The two action strings are used
   // in the ergo output when the method returns true or false.
@@ -691,7 +693,7 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
+  void finalize_cset(double target_pause_time_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -310,9 +310,9 @@
                                  HeapRegion::GrainBytes);
 }
 
-void G1MarkSweep::mark_range_archive(MemRegion range) {
+void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
   assert(_archive_check_enabled, "archive range check not enabled");
-  _archive_region_map.set_by_address(range, true);
+  _archive_region_map.set_by_address(range, is_archive);
 }
 
 bool G1MarkSweep::in_archive_range(oop object) {
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -58,8 +58,8 @@
   // Create the _archive_region_map which is used to identify archive objects.
   static void enable_archive_object_check();
 
-  // Mark the regions containing the specified address range as archive regions.
-  static void mark_range_archive(MemRegion range);
+  // Set the regions containing the specified address range as archive/non-archive.
+  static void set_range_archive(MemRegion range, bool is_archive);
 
   // Check if an object is in an archive region using the _archive_region_map.
   static bool in_archive_range(oop object);
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -125,8 +125,7 @@
   template <class T> void do_oop_work(T* p);
 
 public:
-  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
-                   ReferenceProcessor* rp) :
+  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
       G1ParCopyHelper(g1, par_scan_state) {
     assert(_ref_processor == NULL, "sanity");
   }
@@ -141,7 +140,6 @@
 
   G1CollectedHeap*      g1()  { return _g1; };
   G1ParScanThreadState* pss() { return _par_scan_state; }
-  ReferenceProcessor*   rp()  { return _ref_processor; };
 };
 
 typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -426,7 +426,7 @@
       (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
     uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
 
-    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
+    shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
 
     cur = idx_last_found;
     removed += to_remove;
@@ -437,6 +437,17 @@
   return removed;
 }
 
+void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
+#ifdef ASSERT
+  for (uint i = index; i < (index + num_regions); i++) {
+    assert(is_available(i), err_msg("Expected available region at index %u", i));
+    assert(at(i)->is_empty(), err_msg("Expected empty region at index %u", i));
+    assert(at(i)->is_free(), err_msg("Expected free region at index %u", i));
+  }
+#endif
+  uncommit_regions(index, num_regions);
+}
+
 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
   guarantee(start_idx < _allocated_heapregions_length, "checking");
   guarantee(res_idx != NULL, "checking");
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -241,6 +241,10 @@
   // Return the actual number of uncommitted regions.
   uint shrink_by(uint num_regions_to_remove);
 
+  // Uncommit a number of regions starting at the specified index, which must be available,
+  // empty, and free.
+  void shrink_at(uint index, size_t num_regions);
+
   void verify();
 
   // Do some sanity checking.
--- a/hotspot/src/share/vm/gc/parallel/cardTableExtension.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/cardTableExtension.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -56,13 +56,7 @@
   CardTableExtension(MemRegion whole_heap) :
     CardTableModRefBS(
       whole_heap,
-      // Concrete tag should be BarrierSet::CardTableExtension.
-      // That will presently break things in a bunch of places though.
-      // The concrete tag is used as a dispatch key in many places, and
-      // CardTableExtension does not correctly dispatch in some of those
-      // uses. This will be addressed as part of a reorganization of the
-      // BarrierSet hierarchy.
-      BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
+      BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
     { }
 
   // Scavenge support
--- a/hotspot/src/share/vm/gc/shared/barrierSet.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/barrierSet.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -132,6 +132,9 @@
   // First the pre-write versions...
   template <class T> inline void write_ref_field_pre(T* field, oop new_val);
 private:
+  // Helper for write_ref_field_pre and friends, testing for specialized cases.
+  bool devirtualize_reference_writes() const;
+
   // Keep this private so as to catch violations at build time.
   virtual void write_ref_field_pre_work(     void* field, oop new_val) { guarantee(false, "Not needed"); };
 protected:
--- a/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -32,8 +32,18 @@
 // performance-critical calls when the barrier is the most common
 // card-table kind.
 
+inline bool BarrierSet::devirtualize_reference_writes() const {
+  switch (kind()) {
+  case CardTableForRS:
+  case CardTableExtension:
+    return true;
+  default:
+    return false;
+  }
+}
+
 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
   } else {
     write_ref_field_pre_work(field, new_val);
@@ -41,7 +51,7 @@
 }
 
 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
   } else {
     write_ref_field_work(field, new_val, release);
@@ -77,7 +87,7 @@
 
 
 inline void BarrierSet::write_region(MemRegion mr) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
   } else {
     write_region_work(mr);
--- a/hotspot/src/share/vm/gc/shared/cardTableModRefBSForCTRS.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBSForCTRS.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -31,13 +31,7 @@
 CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
   CardTableModRefBS(
     whole_heap,
-    // Concrete tag should be BarrierSet::CardTableForRS.
-    // That will presently break things in a bunch of places though.
-    // The concrete tag is used as a dispatch key in many places, and
-    // CardTableForRS does not correctly dispatch in some of those
-    // uses. This will be addressed as part of a reorganization of the
-    // BarrierSet hierarchy.
-    BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS)),
+    BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
   // LNC functionality
   _lowest_non_clean(NULL),
   _lowest_non_clean_chunk_size(NULL),
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -150,8 +150,6 @@
 #endif // INCLUDE_ALL_GCS
 
 
-  virtual BarrierSet::Name barrier_set_name() = 0;
-
   virtual GenRemSet* create_rem_set(MemRegion reserved);
 
   // This method controls how a collector satisfies a request
@@ -299,8 +297,6 @@
     assert(_max_young_size == MaxNewSize, "Should be taken care of by initialize_size_info");
   }
 
-  BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
-
   virtual CollectorPolicy::Name kind() {
     return CollectorPolicy::GenCollectorPolicyKind;
   }
--- a/hotspot/src/share/vm/memory/filemap.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -707,12 +707,16 @@
                                     addr, string_ranges[i].byte_size(), si->_read_only,
                                     si->_allow_exec);
         if (base == NULL || base != addr) {
+          // dealloc the string regions from java heap
+          dealloc_string_regions();
           fail_continue("Unable to map shared string space at required address.");
           return false;
         }
       }
 
       if (!verify_string_regions()) {
+        // dealloc the string regions from java heap
+        dealloc_string_regions();
         fail_continue("Shared string regions are corrupt");
         return false;
       }
@@ -745,12 +749,14 @@
 }
 
 void FileMapInfo::fixup_string_regions() {
+#if INCLUDE_ALL_GCS
   // If any string regions were found, call the fill routine to make them parseable.
   // Note that string_ranges may be non-NULL even if no ranges were found.
   if (num_ranges != 0) {
     assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
     G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
   }
+#endif
 }
 
 bool FileMapInfo::verify_region_checksum(int i) {
@@ -793,20 +799,14 @@
   }
 }
 
-void FileMapInfo::unmap_string_regions() {
-  for (int i = MetaspaceShared::first_string;
-           i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
-    struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
-    size_t used = si->_used;
-    if (used > 0) {
-      size_t size = align_size_up(used, os::vm_allocation_granularity());
-      char* addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
-                                             (narrowOop)si->_addr._offset));
-      if (!os::unmap_memory(addr, size)) {
-        fail_stop("Unable to unmap shared space.");
-      }
-    }
+// dealloc the archived string region from java heap
+void FileMapInfo::dealloc_string_regions() {
+#if INCLUDE_ALL_GCS
+  if (num_ranges > 0) {
+    assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
+    G1CollectedHeap::heap()->dealloc_archive_regions(string_ranges, num_ranges);
   }
+#endif
 }
 
 void FileMapInfo::assert_mark(bool check) {
@@ -967,7 +967,9 @@
         map_info->_header->_space[i]._addr._base = NULL;
       }
     }
-    map_info->unmap_string_regions();
+    // Dealloc the string regions only without unmapping. The string regions are part
+    // of the java heap. Unmapping of the heap regions are managed by GC.
+    map_info->dealloc_string_regions();
   } else if (DumpSharedSpaces) {
     fail_stop("%s", msg);
   }
--- a/hotspot/src/share/vm/memory/filemap.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -208,7 +208,7 @@
   bool  verify_string_regions();
   void  fixup_string_regions();
   void  unmap_region(int i);
-  void  unmap_string_regions();
+  void  dealloc_string_regions();
   bool  verify_region_checksum(int i);
   void  close();
   bool  is_open() { return _file_open; }
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -1522,7 +1522,7 @@
       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
       break;
 
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
     case BarrierSet::ModRef:
       break;
@@ -1539,7 +1539,7 @@
     case BarrierSet::G1SATBCTLogging:
       return true; // Can move it if no safepoint
 
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
     case BarrierSet::ModRef:
       return true; // There is no pre-barrier
@@ -1565,7 +1565,7 @@
       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
       break;
 
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
       break;
@@ -3791,7 +3791,7 @@
   Node* cast = __ CastPX(__ ctrl(), adr);
 
   // Divide by card size
-  assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
+  assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
          "Only one we handle so far.");
   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
 
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -3482,7 +3482,7 @@
 
   for (SystemProperty* p = Arguments::system_properties(); p != NULL; p = p->next()) {
     if (strcmp(property, p->key()) == 0) {
-      if (p->set_value((char *)value_ptr)) {
+      if (p->set_value(value_ptr)) {
         err =  JVMTI_ERROR_NONE;
       }
     }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -983,53 +983,61 @@
 
 bool Arguments::add_property(const char* prop) {
   const char* eq = strchr(prop, '=');
-  char* key;
-  // ns must be static--its address may be stored in a SystemProperty object.
-  const static char ns[1] = {0};
-  char* value = (char *)ns;
-
-  size_t key_len = (eq == NULL) ? strlen(prop) : (eq - prop);
-  key = AllocateHeap(key_len + 1, mtInternal);
-  strncpy(key, prop, key_len);
-  key[key_len] = '\0';
-
-  if (eq != NULL) {
-    size_t value_len = strlen(prop) - key_len - 1;
-    value = AllocateHeap(value_len + 1, mtInternal);
-    strncpy(value, &prop[key_len + 1], value_len + 1);
+  const char* key;
+  const char* value = "";
+
+  if (eq == NULL) {
+    // property doesn't have a value, thus use passed string
+    key = prop;
+  } else {
+    // property have a value, thus extract it and save to the
+    // allocated string
+    size_t key_len = eq - prop;
+    char* tmp_key = AllocateHeap(key_len + 1, mtInternal);
+
+    strncpy(tmp_key, prop, key_len);
+    tmp_key[key_len] = '\0';
+    key = tmp_key;
+
+    value = &prop[key_len + 1];
   }
 
   if (strcmp(key, "java.compiler") == 0) {
     process_java_compiler_argument(value);
-    FreeHeap(key);
-    if (eq != NULL) {
-      FreeHeap(value);
-    }
-    return true;
-  } else if (strcmp(key, "sun.java.command") == 0) {
-    _java_command = value;
-
     // Record value in Arguments, but let it get passed to Java.
   } else if (strcmp(key, "sun.java.launcher.is_altjvm") == 0 ||
              strcmp(key, "sun.java.launcher.pid") == 0) {
     // sun.java.launcher.is_altjvm and sun.java.launcher.pid property are
     // private and are processed in process_sun_java_launcher_properties();
     // the sun.java.launcher property is passed on to the java application
-    FreeHeap(key);
-    if (eq != NULL) {
-      FreeHeap(value);
-    }
-    return true;
-  } else if (strcmp(key, "java.vendor.url.bug") == 0) {
-    // save it in _java_vendor_url_bug, so JVM fatal error handler can access
-    // its value without going through the property list or making a Java call.
-    _java_vendor_url_bug = value;
   } else if (strcmp(key, "sun.boot.library.path") == 0) {
     PropertyList_unique_add(&_system_properties, key, value, true);
-    return true;
+  } else {
+    if (strcmp(key, "sun.java.command") == 0) {
+      if (_java_command != NULL) {
+        os::free(_java_command);
+      }
+      _java_command = os::strdup_check_oom(value, mtInternal);
+    } else if (strcmp(key, "java.vendor.url.bug") == 0) {
+      if (_java_vendor_url_bug != DEFAULT_VENDOR_URL_BUG) {
+        assert(_java_vendor_url_bug != NULL, "_java_vendor_url_bug is NULL");
+        os::free((void *)_java_vendor_url_bug);
+      }
+      // save it in _java_vendor_url_bug, so JVM fatal error handler can access
+      // its value without going through the property list or making a Java call.
+      _java_vendor_url_bug = os::strdup_check_oom(value, mtInternal);
+    }
+
+    // Create new property and add at the end of the list
+    PropertyList_unique_add(&_system_properties, key, value);
   }
-  // Create new property and add at the end of the list
-  PropertyList_unique_add(&_system_properties, key, value);
+
+  if (key != prop) {
+    // SystemProperty copy passed value, thus free previously allocated
+    // memory
+    FreeHeap((void *)key);
+  }
+
   return true;
 }
 
@@ -1046,7 +1054,7 @@
   // Ensure Agent_OnLoad has the correct initial values.
   // This may not be the final mode; mode may change later in onload phase.
   PropertyList_unique_add(&_system_properties, "java.vm.info",
-                          (char*)VM_Version::vm_info_string(), false);
+                          VM_Version::vm_info_string(), false);
 
   UseInterpreter             = true;
   UseCompiler                = true;
@@ -1858,7 +1866,7 @@
 }
 
 // Aggressive optimization flags  -XX:+AggressiveOpts
-void Arguments::set_aggressive_opts_flags() {
+jint Arguments::set_aggressive_opts_flags() {
 #ifdef COMPILER2
   if (AggressiveUnboxing) {
     if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
@@ -1885,7 +1893,9 @@
     // Feed the cache size setting into the JDK
     char buffer[1024];
     sprintf(buffer, "java.lang.Integer.IntegerCache.high=" INTX_FORMAT, AutoBoxCacheMax);
-    add_property(buffer);
+    if (!add_property(buffer)) {
+      return JNI_ENOMEM;
+    }
   }
   if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) {
     FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500);
@@ -1898,12 +1908,14 @@
 //      FLAG_SET_DEFAULT(EliminateZeroing, true);
 //    }
   }
+
+  return JNI_OK;
 }
 
 //===========================================================================================================
 // Parsing of java.compiler property
 
-void Arguments::process_java_compiler_argument(char* arg) {
+void Arguments::process_java_compiler_argument(const char* arg) {
   // For backwards compatibility, Djava.compiler=NONE or ""
   // causes us to switch to -Xint mode UNLESS -Xdebug
   // is also specified.
@@ -3870,7 +3882,10 @@
   set_bytecode_flags();
 
   // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled
-  set_aggressive_opts_flags();
+  jint code = set_aggressive_opts_flags();
+  if (code != JNI_OK) {
+    return code;
+  }
 
   // Turn off biased locking for locking debug mode flags,
   // which are subtly different from each other but neither works with
@@ -4036,7 +4051,7 @@
   }
 }
 
-void Arguments::PropertyList_add(SystemProperty** plist, const char* k, char* v) {
+void Arguments::PropertyList_add(SystemProperty** plist, const char* k, const char* v) {
   if (plist == NULL)
     return;
 
@@ -4049,7 +4064,7 @@
 }
 
 // This add maintains unique property key in the list.
-void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append) {
+void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v, jboolean append) {
   if (plist == NULL)
     return;
 
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Mon Aug 31 21:46:33 2015 +0300
@@ -60,7 +60,7 @@
   char* value() const                       { return _value; }
   SystemProperty* next() const              { return _next; }
   void set_next(SystemProperty* next)       { _next = next; }
-  bool set_value(char *value) {
+  bool set_value(const char *value) {
     if (writeable()) {
       if (_value != NULL) {
         FreeHeap(_value);
@@ -364,14 +364,14 @@
   static bool add_property(const char* prop);
 
   // Aggressive optimization flags.
-  static void set_aggressive_opts_flags();
+  static jint set_aggressive_opts_flags();
 
   // Argument parsing
   static void do_pd_flag_adjustments();
   static bool parse_argument(const char* arg, Flag::Flags origin);
   static bool process_argument(const char* arg, jboolean ignore_unrecognized, Flag::Flags origin);
   static void process_java_launcher_argument(const char*, void*);
-  static void process_java_compiler_argument(char* arg);
+  static void process_java_compiler_argument(const char* arg);
   static jint parse_options_environment_variable(const char* name, ScopedVMInitArgs* vm_args);
   static jint parse_java_tool_options_environment_variable(ScopedVMInitArgs* vm_args);
   static jint parse_java_options_environment_variable(ScopedVMInitArgs* vm_args);
@@ -561,22 +561,22 @@
   // Property List manipulation
   static void PropertyList_add(SystemProperty *element);
   static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
-  static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
-  static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v) {
+  static void PropertyList_add(SystemProperty** plist, const char* k, const char* v);
+  static void PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v) {
     PropertyList_unique_add(plist, k, v, false);
   }
-  static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append);
+  static void PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v, jboolean append);
   static const char* PropertyList_get_value(SystemProperty* plist, const char* key);
   static int  PropertyList_count(SystemProperty* pl);
   static const char* PropertyList_get_key_at(SystemProperty* pl,int index);
   static char* PropertyList_get_value_at(SystemProperty* pl,int index);
 
   // Miscellaneous System property value getter and setters.
-  static void set_dll_dir(char *value) { _sun_boot_library_path->set_value(value); }
-  static void set_java_home(char *value) { _java_home->set_value(value); }
-  static void set_library_path(char *value) { _java_library_path->set_value(value); }
+  static void set_dll_dir(const char *value) { _sun_boot_library_path->set_value(value); }
+  static void set_java_home(const char *value) { _java_home->set_value(value); }
+  static void set_library_path(const char *value) { _java_library_path->set_value(value); }
   static void set_ext_dirs(char *value)     { _ext_dirs = os::strdup_check_oom(value); }
-  static void set_sysclasspath(char *value) { _sun_boot_class_path->set_value(value); }
+  static void set_sysclasspath(const char *value) { _sun_boot_class_path->set_value(value); }
   static void append_sysclasspath(const char *value) { _sun_boot_class_path->append_value(value); }
 
   static char* get_java_home() { return _java_home->value(); }
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -2253,6 +2253,7 @@
                                                                           \
   declare_constant(BarrierSet::ModRef)                                    \
   declare_constant(BarrierSet::CardTableModRef)                           \
+  declare_constant(BarrierSet::CardTableForRS)                            \
   declare_constant(BarrierSet::CardTableExtension)                        \
   declare_constant(BarrierSet::G1SATBCT)                                  \
   declare_constant(BarrierSet::G1SATBCTLogging)                           \
--- a/hotspot/src/share/vm/shark/sharkBuilder.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/shark/sharkBuilder.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -440,8 +440,10 @@
 // HotSpot memory barriers
 
 void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
-  if (bs->kind() != BarrierSet::CardTableModRef)
+  if (bs->kind() != BarrierSet::CardTableForRS &&
+      bs->kind() != BarrierSet::CardTableExtension) {
     Unimplemented();
+  }
 
   CreateStore(
     LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card_val()),
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Mon Aug 31 21:46:33 2015 +0300
@@ -231,7 +231,7 @@
 
   if (signame) {
     jio_snprintf(buf, buflen,
-                 "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" INTPTR_FORMAT,
+                 "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT,
                  signame, _id, _pc,
                  os::current_process_id(), os::current_thread_id());
   } else if (_filename != NULL && _lineno > 0) {
@@ -239,7 +239,7 @@
     char separator = os::file_separator()[0];
     const char *p = strrchr(_filename, separator);
     int n = jio_snprintf(buf, buflen,
-                         "Internal Error at %s:%d, pid=%d, tid=" INTPTR_FORMAT,
+                         "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT,
                          p ? p + 1 : _filename, _lineno,
                          os::current_process_id(), os::current_thread_id());
     if (n >= 0 && n < buflen && _message) {
@@ -253,7 +253,7 @@
     }
   } else {
     jio_snprintf(buf, buflen,
-                 "Internal Error (0x%x), pid=%d, tid=" INTPTR_FORMAT,
+                 "Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT,
                  _id, os::current_process_id(), os::current_thread_id());
   }
 
@@ -486,7 +486,7 @@
 
      // process id, thread id
      st->print(", pid=%d", os::current_process_id());
-     st->print(", tid=" INTPTR_FORMAT, os::current_thread_id());
+     st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
      st->cr();
 
   STEP(80, "(printing error message)")
--- a/hotspot/test/compiler/arguments/CheckCICompilerCount.java	Thu Aug 27 14:40:19 2015 -0700
+++ b/hotspot/test/compiler/arguments/CheckCICompilerCount.java	Mon Aug 31 21:46:33 2015 +0300
@@ -75,7 +75,7 @@
             "intx CICompilerCount                          := 1                                   {product}"
         },
         {
-            "CICompilerCount=0 must be at least 1",
+            "CICompilerCount (0) must be at least 1",
             "Improperly specified VM option 'CICompilerCount=0'"
         },
         {
@@ -130,7 +130,7 @@
             "intx CICompilerCount                          := 2                                   {product}"
         },
         {
-            "CICompilerCount=1 must be at least 2",
+            "CICompilerCount (1) must be at least 2",
             "Improperly specified VM option 'CICompilerCount=1'"
         },
         {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/humongousObjects/Helpers.java	Mon Aug 31 21:46:33 2015 +0300
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package gc.g1.humongousObjects;
+
+import sun.hotspot.WhiteBox;
+
+public class Helpers {
+
+    // In case of 128 byte padding
+    private static final int MAX_PADDING_SIZE = 128;
+
+    /**
+     * Detects amount of extra bytes required to allocate a byte array.
+     * Allocating a byte[n] array takes more then just n bytes in the heap.
+     * Extra bytes are required to store object reference and the length.
+     * This amount depends on bitness and other factors.
+     *
+     * @return byte[] memory overhead
+     */
+    public static int detectByteArrayAllocationOverhead() {
+
+        WhiteBox whiteBox = WhiteBox.getWhiteBox();
+
+        int zeroLengthByteArraySize = (int) whiteBox.getObjectSize(new byte[0]);
+
+        // Since we do not know is there any padding in zeroLengthByteArraySize we cannot just take byte[0] size as overhead
+        for (int i = 1; i < MAX_PADDING_SIZE + 1; ++i) {
+            int realAllocationSize = (int) whiteBox.getObjectSize(new byte[i]);
+            if (realAllocationSize != zeroLengthByteArraySize) {
+                // It means we did not have any padding on previous step
+                return zeroLengthByteArraySize - (i - 1);
+            }
+        }
+        throw new Error("We cannot find byte[] memory overhead - should not reach here");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/humongousObjects/TestHumongousThreshold.java	Mon Aug 31 21:46:33 2015 +0300
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package gc.g1.humongousObjects;
+
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+/**
+ * @test TestHumongousThreshold
+ * @summary Checks that objects larger than half a region are allocated as humongous
+ * @requires vm.gc=="G1" | vm.gc=="null"
+ * @library /testlibrary /../../test/lib
+ * @modules java.management
+ * @build sun.hotspot.WhiteBox
+ *        gc.g1.humongousObjects.Helpers
+ *        gc.g1.humongousObjects.TestHumongousThreshold
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=1M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=2M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=4M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=8M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=16M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=32M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ */
+
+public class TestHumongousThreshold {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final int REGION_SIZE = WHITE_BOX.g1RegionSize();
+    private static final int MAX_CONTINUOUS_SIZE_CHECK = 129;
+    private static final int NON_HUMONGOUS_DIVIDER = 10;
+
+    /**
+     * The method allocates byte[] with specified size and checks that:
+     * 1. byte[] is allocated as we specified in expectedHumongous.
+     * 2. byte[] is allocated as humongous if its size is large than a half of region and non-humongous otherwise.
+     * It uses WB to obtain the size of created byte[]. Only objects larger than half of region are expected
+     * to be humongous.
+     *
+     * @param arraySize size of allocation
+     * @param expectedHumongous expected humongous/non-humongous allocation
+     * @return allocated byte array
+     */
+
+    private static byte[] allocateAndCheck(int arraySize, boolean expectedHumongous) {
+        byte[] storage = new byte[arraySize];
+        long objectSize = WHITE_BOX.getObjectSize(storage);
+        boolean shouldBeHumongous = objectSize > (REGION_SIZE / 2);
+
+        Asserts.assertEquals(expectedHumongous, shouldBeHumongous, "Despite we expected this object to be "
+                + (expectedHumongous ? "humongous" : "non-humongous") + " it appeared otherwise when we checked "
+                + "object size - likely test bug; Allocation size = " + arraySize + "; Object size = " + objectSize
+                + "; region size = " + REGION_SIZE);
+
+        Asserts.assertEquals(WHITE_BOX.g1IsHumongous(storage), shouldBeHumongous,
+                "Object should be allocated as " + (shouldBeHumongous ? "humongous"
+                        : "non-humongous") + " but it wasn't; Allocation size = " + arraySize + "; Object size = "
+                        + objectSize + "; region size = " + REGION_SIZE);
+        return storage;
+    }
+
+    public static void main(String[] args) {
+        int byteArrayMemoryOverhead = Helpers.detectByteArrayAllocationOverhead();
+
+        // Largest non-humongous byte[]
+        int maxByteArrayNonHumongousSize = (REGION_SIZE / 2) - byteArrayMemoryOverhead;
+
+        // Increment for non-humongous testing
+        int nonHumongousStep = maxByteArrayNonHumongousSize / NON_HUMONGOUS_DIVIDER;
+
+        // Maximum byte[] that takes one region
+        int maxByteArrayOneRegionSize = REGION_SIZE - byteArrayMemoryOverhead;
+
+        // Sizes in regions
+        // i,e, 1.0f means one region, 1.5f means one and half region etc
+        float[] humongousFactors = {0.8f, 1.0f, 1.2f, 1.5f, 1.7f, 2.0f, 2.5f};
+
+        // Some diagnostic output
+        System.out.format("%s started%n", TestHumongousThreshold.class.getName());
+        System.out.format("Actual G1 region size %d%n", REGION_SIZE);
+        System.out.format("byte[] memory overhead %d%n", byteArrayMemoryOverhead);
+
+        // Non-humongous allocations
+        System.out.format("Doing non-humongous allocations%n");
+
+        // Testing allocations with byte[] with length from 0 to MAX_CONTINUOUS_SIZE_CHECK
+        System.out.format("Testing allocations with byte[] with length from 0 to %d%n", MAX_CONTINUOUS_SIZE_CHECK);
+        for (int i = 0; i < MAX_CONTINUOUS_SIZE_CHECK; ++i) {
+            allocateAndCheck(i, false);
+        }
+
+        // Testing allocations with byte[] with length from 0 to nonHumongousStep * NON_HUMONGOUS_DIVIDER
+        System.out.format("Testing allocations with byte[] with length from 0 to %d with step %d%n",
+                nonHumongousStep * NON_HUMONGOUS_DIVIDER, nonHumongousStep);
+        for (int i = 0; i < NON_HUMONGOUS_DIVIDER; ++i) {
+            allocateAndCheck(i * nonHumongousStep, false);
+        }
+
+        // Testing allocations with byte[] of maximum non-humongous length
+        System.out.format("Testing allocations with byte[] of maximum non-humongous length %d%n",
+                maxByteArrayNonHumongousSize);
+        allocateAndCheck(maxByteArrayNonHumongousSize, false);
+
+        // Humongous allocations
+        System.out.format("Doing humongous allocations%n");
+        // Testing with minimum humongous object
+        System.out.format("Testing with byte[] of minimum humongous object %d%n", maxByteArrayNonHumongousSize + 1);
+        allocateAndCheck(maxByteArrayNonHumongousSize + 1, true);
+
+        // Testing allocations with byte[] with length from (maxByteArrayNonHumongousSize + 1) to
+        // (maxByteArrayNonHumongousSize + 1 + MAX_CONTINUOUS_SIZE_CHECK)
+        System.out.format("Testing allocations with byte[] with length from %d to %d%n",
+                maxByteArrayNonHumongousSize + 1, maxByteArrayNonHumongousSize + 1 + MAX_CONTINUOUS_SIZE_CHECK);
+        for (int i = 0; i < MAX_CONTINUOUS_SIZE_CHECK; ++i) {
+            allocateAndCheck(maxByteArrayNonHumongousSize + 1 + i, true);
+        }
+
+        // Checking that large (more than a half of region size) objects are humongous
+        System.out.format("Checking that large (more than a half of region size) objects are humongous%n");
+        for (float factor : humongousFactors) {
+            allocateAndCheck((int) (maxByteArrayOneRegionSize * factor), true);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedStringsRunAuto.java	Mon Aug 31 21:46:33 2015 +0300
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SharedStringsAuto
+ * @summary Test -Xshare:auto with shared strings.
+ * Feature support: G1GC only, compressed oops/kptrs, 64-bit os, not on windows
+ * @requires (sun.arch.data.model != "32") & (os.family != "windows")
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
+ * @requires (vm.gc=="G1" | vm.gc=="null")
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ * @run main SharedStringsRunAuto
+ */
+
+import jdk.test.lib.*;
+import java.io.File;
+
+public class SharedStringsRunAuto {
+    public static void main(String[] args) throws Exception {
+        // Dump
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./SharedStringsRunAuto.jsa",
+            "-XX:+UseCompressedOops", "-XX:+UseG1GC",
+            "-XX:+PrintSharedSpaces",
+            "-Xshare:dump");
+
+        new OutputAnalyzer(pb.start())
+            .shouldContain("Loading classes to share")
+            .shouldContain("Shared string table stats")
+            .shouldHaveExitValue(0);
+
+        // Run with -Xshare:auto
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./SharedStringsRunAuto.jsa",
+           "-XX:+UseCompressedOops", "-XX:+UseG1GC",
+           "-Xshare:auto",
+           "-version");
+
+        new OutputAnalyzer(pb.start())
+            .shouldMatch("(java|openjdk) version")
+            .shouldHaveExitValue(0);
+    }
+}