Merge
authorjwilhelm
Tue, 08 Sep 2015 16:10:37 +0200
changeset 32734 7029a3eb3008
parent 32467 23b29549f8d1 (current diff)
parent 32626 5d1d9327219c (diff)
child 32735 c6063d028c3c
Merge
hotspot/src/share/vm/classfile/imageDecompressor.cpp
hotspot/src/share/vm/classfile/imageDecompressor.hpp
hotspot/src/share/vm/classfile/imageFile.cpp
hotspot/src/share/vm/classfile/imageFile.hpp
hotspot/src/share/vm/utilities/endian.cpp
hotspot/src/share/vm/utilities/endian.hpp
hotspot/test/runtime/modules/ImageFile/ImageAttributeOffsetsTest.java
hotspot/test/runtime/modules/ImageFile/ImageCloseTest.java
hotspot/test/runtime/modules/ImageFile/ImageFileHeaderTest.java
hotspot/test/runtime/modules/ImageFile/ImageFindAttributesTest.java
hotspot/test/runtime/modules/ImageFile/ImageGetAttributesTest.java
hotspot/test/runtime/modules/ImageFile/ImageGetDataAddressTest.java
hotspot/test/runtime/modules/ImageFile/ImageGetIndexAddressTest.java
hotspot/test/runtime/modules/ImageFile/ImageGetStringBytesTest.java
hotspot/test/runtime/modules/ImageFile/ImageOpenTest.java
hotspot/test/runtime/modules/ImageFile/ImageReadTest.java
hotspot/test/runtime/modules/ImageFile/LocationConstants.java
--- a/hotspot/make/aix/makefiles/mapfile-vers-debug	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/aix/makefiles/mapfile-vers-debug	Tue Sep 08 16:10:37 2015 +0200
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/hotspot/make/aix/makefiles/mapfile-vers-product	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/aix/makefiles/mapfile-vers-product	Tue Sep 08 16:10:37 2015 +0200
@@ -139,18 +139,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug	Tue Sep 08 16:10:37 2015 +0200
@@ -139,18 +139,6 @@
                 _JVM_Halt
                 _JVM_HoldsLock
                 _JVM_IHashCode
-                _JVM_ImageAttributeOffsets
-                _JVM_ImageAttributeOffsetsLength
-                _JVM_ImageClose
-                _JVM_ImageFindAttributes
-                _JVM_ImageGetAttributes
-                _JVM_ImageGetAttributesCount
-                _JVM_ImageGetDataAddress
-                _JVM_ImageGetIndexAddress
-                _JVM_ImageGetStringBytes
-                _JVM_ImageOpen
-                _JVM_ImageRead
-                _JVM_ImageReadCompressed
                 _JVM_InitAgentProperties
                 _JVM_InitProperties
                 _JVM_InternString
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product	Tue Sep 08 16:10:37 2015 +0200
@@ -139,18 +139,6 @@
                 _JVM_Halt
                 _JVM_HoldsLock
                 _JVM_IHashCode
-                _JVM_ImageAttributeOffsets
-                _JVM_ImageAttributeOffsetsLength
-                _JVM_ImageClose
-                _JVM_ImageFindAttributes
-                _JVM_ImageGetAttributes
-                _JVM_ImageGetAttributesCount
-                _JVM_ImageGetDataAddress
-                _JVM_ImageGetIndexAddress
-                _JVM_ImageGetStringBytes
-                _JVM_ImageOpen
-                _JVM_ImageRead
-                _JVM_ImageReadCompressed
                 _JVM_InitAgentProperties
                 _JVM_InitProperties
                 _JVM_InternString
--- a/hotspot/make/bsd/makefiles/mapfile-vers-debug	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-debug	Tue Sep 08 16:10:37 2015 +0200
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/hotspot/make/bsd/makefiles/mapfile-vers-product	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-product	Tue Sep 08 16:10:37 2015 +0200
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/hotspot/make/linux/makefiles/mapfile-vers-debug	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/linux/makefiles/mapfile-vers-debug	Tue Sep 08 16:10:37 2015 +0200
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/hotspot/make/linux/makefiles/mapfile-vers-product	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/linux/makefiles/mapfile-vers-product	Tue Sep 08 16:10:37 2015 +0200
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/hotspot/make/solaris/makefiles/adlc.make	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/solaris/makefiles/adlc.make	Tue Sep 08 16:10:37 2015 +0200
@@ -76,6 +76,11 @@
 ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
   CFLAGS_WARN = +w -errwarn
 endif
+# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly 
+# instantiated template functions trigger this warning when +w is active.
+ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)
+  CFLAGS_WARN += -erroff=notemsource
+endif
 CFLAGS += $(CFLAGS_WARN)
 
 ifeq ("${Platform_compiler}", "sparcWorks")
--- a/hotspot/make/solaris/makefiles/mapfile-vers	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/make/solaris/makefiles/mapfile-vers	Tue Sep 08 16:10:37 2015 +0200
@@ -141,18 +141,6 @@
                 JVM_Halt;
                 JVM_HoldsLock;
                 JVM_IHashCode;
-                JVM_ImageAttributeOffsets;
-                JVM_ImageAttributeOffsetsLength;
-                JVM_ImageClose;
-                JVM_ImageFindAttributes;
-                JVM_ImageGetAttributes;
-                JVM_ImageGetAttributesCount;
-                JVM_ImageGetDataAddress;
-                JVM_ImageGetIndexAddress;
-                JVM_ImageGetStringBytes;
-                JVM_ImageOpen;
-                JVM_ImageRead;
-                JVM_ImageReadCompressed;
                 JVM_InitAgentProperties;
                 JVM_InitProperties;
                 JVM_InternString;
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -3043,7 +3043,9 @@
   // register obj is destroyed afterwards.
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
+         bs->kind() == BarrierSet::CardTableExtension,
+         "Wrong barrier set kind");
 
   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -691,7 +691,7 @@
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
         __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -731,7 +731,7 @@
           __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -186,7 +186,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (val == noreg) {
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -2614,7 +2614,7 @@
 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableModRef ||
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
 #ifdef ASSERT
   cmpdi(CCR0, Rnew_val, 0);
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -656,7 +656,7 @@
           __ bind(filtered);
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -697,7 +697,7 @@
           }
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           Label Lskip_loop, Lstore_loop;
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -105,7 +105,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         Label Lnull, Ldone;
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -3958,7 +3958,7 @@
   if (new_val == G0) return;
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableModRef ||
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
   card_table_write(bs->byte_map_base, tmp, store_addr);
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+#if INCLUDE_ALL_GCS
+
+// An implementation of memset, for use when there may be concurrent
+// readers of the region being stored into.
+//
+// We can't use the standard library memset if it is implemented using
+// block initializing stores.  Doing so can result in concurrent readers
+// seeing spurious zeros.
+//
+// We can't use the obvious C/C++ for-loop, because the compiler may
+// recognize the idiomatic loop and optimize it into a call to the
+// standard library memset; we've seen exactly this happen with, for
+// example, Solaris Studio 12.3.  Hence the use of inline assembly
+// code, hiding loops from the compiler's optimizer.
+//
+// We don't attempt to use the standard library memset when it is safe
+// to do so.  We could conservatively do so by detecting the presence
+// of block initializing stores (VM_Version::has_blk_init()), but the
+// implementation provided here should be sufficient.
+
+inline void fill_subword(void* start, void* end, int value) {
+  STATIC_ASSERT(BytesPerWord == 8);
+  assert(pointer_delta(end, start, 1) < BytesPerWord, "precondition");
+  // Dispatch on (end - start).
+  void* pc;
+  __asm__ volatile(
+    // offset := (7 - (end - start)) + 3
+    //   3 instructions from rdpc to DISPATCH
+    " sub %[offset], %[end], %[offset]\n\t" // offset := start - end
+    " sllx %[offset], 2, %[offset]\n\t" // scale offset for instruction size of 4
+    " add %[offset], 40, %[offset]\n\t" // offset += 10 * instruction size
+    " rd %pc, %[pc]\n\t"                // dispatch on scaled offset
+    " jmpl %[pc]+%[offset], %g0\n\t"
+    "  nop\n\t"
+    // DISPATCH: no direct reference, but without it the store block may be elided.
+    "1:\n\t"
+    " stb %[value], [%[end]-7]\n\t" // end[-7] = value
+    " stb %[value], [%[end]-6]\n\t"
+    " stb %[value], [%[end]-5]\n\t"
+    " stb %[value], [%[end]-4]\n\t"
+    " stb %[value], [%[end]-3]\n\t"
+    " stb %[value], [%[end]-2]\n\t"
+    " stb %[value], [%[end]-1]\n\t" // end[-1] = value
+    : /* no outputs */
+      [pc] "&=r" (pc)               // temp
+    : [offset] "&+r" (start),
+      [end] "r" (end),
+      [value] "r" (value)
+    : "memory");
+}
+
+void memset_with_concurrent_readers(void* to, int value, size_t size) {
+  Prefetch::write(to, 0);
+  void* end = static_cast<char*>(to) + size;
+  if (size >= BytesPerWord) {
+    // Fill any partial word prefix.
+    uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
+    fill_subword(to, aligned_to, value);
+
+    // Compute fill word.
+    STATIC_ASSERT(BitsPerByte == 8);
+    STATIC_ASSERT(BitsPerWord == 64);
+    uintx xvalue = value & 0xff;
+    xvalue |= (xvalue << 8);
+    xvalue |= (xvalue << 16);
+    xvalue |= (xvalue << 32);
+
+    uintx* aligned_end = static_cast<uintx*>(align_ptr_down(end, BytesPerWord));
+    assert(aligned_to <= aligned_end, "invariant");
+
+    // for ( ; aligned_to < aligned_end; ++aligned_to) {
+    //   *aligned_to = xvalue;
+    // }
+    uintptr_t temp;
+    __asm__ volatile(
+      // Unroll loop x8.
+      " sub %[aend], %[ato], %[temp]\n\t"
+      " cmp %[temp], 56\n\t"           // cc := (aligned_end - aligned_to) > 7 words
+      " ba %xcc, 2f\n\t"               // goto TEST always
+      "  sub %[aend], 56, %[temp]\n\t" // limit := aligned_end - 7 words
+      // LOOP:
+      "1:\n\t"                         // unrolled x8 store loop top
+      " cmp %[temp], %[ato]\n\t"       // cc := limit > (next) aligned_to
+      " stx %[xvalue], [%[ato]-64]\n\t" // store 8 words, aligned_to pre-incremented
+      " stx %[xvalue], [%[ato]-56]\n\t"
+      " stx %[xvalue], [%[ato]-48]\n\t"
+      " stx %[xvalue], [%[ato]-40]\n\t"
+      " stx %[xvalue], [%[ato]-32]\n\t"
+      " stx %[xvalue], [%[ato]-24]\n\t"
+      " stx %[xvalue], [%[ato]-16]\n\t"
+      " stx %[xvalue], [%[ato]-8]\n\t"
+      // TEST:
+      "2:\n\t"
+      " bgu,a %xcc, 1b\n\t"            // goto LOOP if more than 7 words remaining
+      "  add %[ato], 64, %[ato]\n\t"   // aligned_to += 8, for next iteration
+      // Fill remaining < 8 full words.
+      // Dispatch on (aligned_end - aligned_to).
+      // offset := (7 - (aligned_end - aligned_to)) + 3
+      //   3 instructions from rdpc to DISPATCH
+      " sub %[ato], %[aend], %[ato]\n\t" // offset := aligned_to - aligned_end
+      " srax %[ato], 1, %[ato]\n\t"      // scale offset for instruction size of 4
+      " add %[ato], 40, %[ato]\n\t"      // offset += 10 * instruction size
+      " rd %pc, %[temp]\n\t"             // dispatch on scaled offset
+      " jmpl %[temp]+%[ato], %g0\n\t"
+      "  nop\n\t"
+      // DISPATCH: no direct reference, but without it the store block may be elided.
+      "3:\n\t"
+      " stx %[xvalue], [%[aend]-56]\n\t" // aligned_end[-7] = xvalue
+      " stx %[xvalue], [%[aend]-48]\n\t"
+      " stx %[xvalue], [%[aend]-40]\n\t"
+      " stx %[xvalue], [%[aend]-32]\n\t"
+      " stx %[xvalue], [%[aend]-24]\n\t"
+      " stx %[xvalue], [%[aend]-16]\n\t"
+      " stx %[xvalue], [%[aend]-8]\n\t"  // aligned_end[-1] = xvalue
+      : /* no outputs */
+        [temp] "&=r" (temp)
+      : [ato] "&+r" (aligned_to),
+        [aend] "r" (aligned_end),
+        [xvalue] "r" (xvalue)
+      : "cc", "memory");
+    to = aligned_end;           // setup for suffix
+  }
+  // Fill any partial word suffix.  Also the prefix if size < BytesPerWord.
+  fill_subword(to, end, value);
+}
+
+#endif // INCLUDE_ALL_GCS
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -981,7 +981,7 @@
           __ restore();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -1014,7 +1014,7 @@
           __ restore();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -91,7 +91,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (index == noreg ) {
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -85,27 +85,6 @@
   _supports_cx8 = has_v9();
   _supports_atomic_getset4 = true; // swap instruction
 
-  // There are Fujitsu Sparc64 CPUs which support blk_init as well so
-  // we have to take this check out of the 'is_niagara()' block below.
-  if (has_blk_init()) {
-    // When using CMS or G1, we cannot use memset() in BOT updates
-    // because the sun4v/CMT version in libc_psr uses BIS which
-    // exposes "phantom zeros" to concurrent readers. See 6948537.
-    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
-      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
-    }
-    // Issue a stern warning if the user has explicitly set
-    // UseMemSetInBOT (it is known to cause issues), but allow
-    // use for experimentation and debugging.
-    if (UseConcMarkSweepGC || UseG1GC) {
-      if (UseMemSetInBOT) {
-        assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
-        warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
-                " on sun4v; please understand that you are using at your own risk!");
-      }
-    }
-  }
-
   if (is_niagara()) {
     // Indirect branch is the same cost as direct
     if (FLAG_IS_DEFAULT(UseInlineCaches)) {
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -4320,7 +4320,9 @@
   // register obj is destroyed afterwards.
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+  assert(bs->kind() == BarrierSet::CardTableForRS ||
+         bs->kind() == BarrierSet::CardTableExtension,
+         "Wrong barrier set kind");
 
   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -722,7 +722,7 @@
            __ popa();
          }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -754,7 +754,7 @@
         }
         break;
 
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -367,16 +367,20 @@
 #ifdef ASSERT
     // verify that threads correspond
     {
-      Label L, S;
+     Label L1, L2, L3;
       __ cmpptr(r15_thread, thread);
-      __ jcc(Assembler::notEqual, S);
+      __ jcc(Assembler::equal, L1);
+      __ stop("StubRoutines::call_stub: r15_thread is corrupted");
+      __ bind(L1);
       __ get_thread(rbx);
+      __ cmpptr(r15_thread, thread);
+      __ jcc(Assembler::equal, L2);
+      __ stop("StubRoutines::call_stub: r15_thread is modified by call");
+      __ bind(L2);
       __ cmpptr(r15_thread, rbx);
-      __ jcc(Assembler::equal, L);
-      __ bind(S);
-      __ jcc(Assembler::equal, L);
+      __ jcc(Assembler::equal, L3);
       __ stop("StubRoutines::call_stub: threads must correspond");
-      __ bind(L);
+      __ bind(L3);
     }
 #endif
 
@@ -450,15 +454,20 @@
 #ifdef ASSERT
     // verify that threads correspond
     {
-      Label L, S;
+      Label L1, L2, L3;
       __ cmpptr(r15_thread, thread);
-      __ jcc(Assembler::notEqual, S);
+      __ jcc(Assembler::equal, L1);
+      __ stop("StubRoutines::catch_exception: r15_thread is corrupted");
+      __ bind(L1);
       __ get_thread(rbx);
+      __ cmpptr(r15_thread, thread);
+      __ jcc(Assembler::equal, L2);
+      __ stop("StubRoutines::catch_exception: r15_thread is modified by call");
+      __ bind(L2);
       __ cmpptr(r15_thread, rbx);
-      __ jcc(Assembler::equal, L);
-      __ bind(S);
+      __ jcc(Assembler::equal, L3);
       __ stop("StubRoutines::catch_exception: threads must correspond");
-      __ bind(L);
+      __ bind(L3);
     }
 #endif
 
@@ -1244,7 +1253,7 @@
            __ popa();
         }
          break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
       case BarrierSet::ModRef:
         break;
@@ -1284,7 +1293,7 @@
           __ popa();
         }
         break;
-      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
         {
           CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -200,7 +200,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       {
         if (val == noreg) {
--- a/hotspot/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,10 @@
   return cpuinfo_field_contains("cpu", "Niagara");
 }
 
+static bool detect_M_family() {
+  return cpuinfo_field_contains("cpu", "SPARC-M");
+}
+
 static bool detect_blkinit() {
   return cpuinfo_field_contains("cpucaps", "blkinit");
 }
@@ -66,6 +70,11 @@
     features = niagara1_m | T_family_m;
   }
 
+  if (detect_M_family()) {
+    NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
+    features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
+  }
+
   if (detect_blkinit()) {
     features |= blk_init_instructions_m;
   }
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1425,7 +1425,7 @@
       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       // No pre barriers
       break;
@@ -1445,7 +1445,7 @@
       G1SATBCardTableModRef_post_barrier(addr,  new_val);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       CardTableModRef_post_barrier(addr,  new_val);
       break;
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -28,8 +28,8 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/classLoaderExt.hpp"
-#include "classfile/imageFile.hpp"
 #include "classfile/javaClasses.hpp"
+#include "classfile/jimage.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -58,6 +58,7 @@
 #include "runtime/os.hpp"
 #include "runtime/threadCritical.hpp"
 #include "runtime/timer.hpp"
+#include "runtime/vm_version.hpp"
 #include "services/management.hpp"
 #include "services/threadService.hpp"
 #include "utilities/events.hpp"
@@ -68,7 +69,7 @@
 #include "classfile/sharedPathsMiscInfo.hpp"
 #endif
 
-// Entry points in zip.dll for loading zip/jar file entries and image file entries
+// Entry points in zip.dll for loading zip/jar file entries
 
 typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
 typedef void (JNICALL *ZipClose_t)(jzfile *zip);
@@ -89,6 +90,15 @@
 static ZipInflateFully_t ZipInflateFully    = NULL;
 static Crc32_t           Crc32              = NULL;
 
+// Entry points for jimage.dll for loading jimage file entries
+
+static JImageOpen_t                    JImageOpen                    = NULL;
+static JImageClose_t                   JImageClose                   = NULL;
+static JImagePackageToModule_t         JImagePackageToModule         = NULL;
+static JImageFindResource_t            JImageFindResource            = NULL;
+static JImageGetResource_t             JImageGetResource             = NULL;
+static JImageResourceIterator_t        JImageResourceIterator        = NULL;
+
 // Globals
 
 PerfCounter*    ClassLoader::_perf_accumulated_time = NULL;
@@ -141,6 +151,15 @@
   return (strncmp(str, str_to_find, str_to_find_len) == 0);
 }
 
+static const char* get_jimage_version_string() {
+  static char version_string[10] = "";
+  if (version_string[0] == '\0') {
+    jio_snprintf(version_string, sizeof(version_string), "%d.%d",
+                 Abstract_VM_Version::vm_minor_version(), Abstract_VM_Version::vm_micro_version());
+  }
+  return (const char*)version_string;
+}
+
 bool string_ends_with(const char* str, const char* str_to_find) {
   size_t str_len = strlen(str);
   size_t str_to_find_len = strlen(str_to_find);
@@ -272,98 +291,114 @@
   }
 }
 
-ClassPathImageEntry::ClassPathImageEntry(ImageFileReader* image) :
+ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
   ClassPathEntry(),
-  _image(image),
-  _module_data(NULL) {
-  guarantee(image != NULL, "image file is null");
-
-  char module_data_name[JVM_MAXPATHLEN];
-  ImageModuleData::module_data_name(module_data_name, _image->name());
-  _module_data = new ImageModuleData(_image, module_data_name);
+  _jimage(jimage) {
+  guarantee(jimage != NULL, "jimage file is null");
+  guarantee(name != NULL, "jimage file name is null");
+  size_t len = strlen(name) + 1;
+  _name = NEW_C_HEAP_ARRAY(const char, len, mtClass);
+  strncpy((char *)_name, name, len);
 }
 
 ClassPathImageEntry::~ClassPathImageEntry() {
-  if (_module_data != NULL) {
-    delete _module_data;
-    _module_data = NULL;
+  if (_name != NULL) {
+    FREE_C_HEAP_ARRAY(const char, _name);
+    _name = NULL;
   }
-
-  if (_image != NULL) {
-    ImageFileReader::close(_image);
-    _image = NULL;
+  if (_jimage != NULL) {
+    (*JImageClose)(_jimage);
+    _jimage = NULL;
   }
 }
 
-const char* ClassPathImageEntry::name() {
-  return _image ? _image->name() : "";
+void ClassPathImageEntry::name_to_package(const char* name, char* buffer, int length) {
+  const char *pslash = strrchr(name, '/');
+  if (pslash == NULL) {
+    buffer[0] = '\0';
+    return;
+  }
+  int len = pslash - name;
+#if INCLUDE_CDS
+  if (len <= 0 && DumpSharedSpaces) {
+    buffer[0] = '\0';
+    return;
+  }
+#endif
+  assert(len > 0, "Bad length for package name");
+  if (len >= length) {
+    buffer[0] = '\0';
+    return;
+  }
+  // drop name after last slash (including slash)
+  // Ex., "java/lang/String.class" => "java/lang"
+  strncpy(buffer, name, len);
+  // ensure string termination (strncpy does not guarantee)
+  buffer[len] = '\0';
 }
 
+// For a class in a named module, look it up in the jimage file using this syntax:
+//    /<module-name>/<package-name>/<base-class>
+//
+// Assumptions:
+//     1. There are no unnamed modules in the jimage file.
+//     2. A package is in at most one module in the jimage file.
+//
 ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) {
-  ImageLocation location;
-  bool found = _image->find_location(name, location);
-
-  if (!found) {
-    const char *pslash = strrchr(name, '/');
-    int len = pslash - name;
+  jlong size;
+  JImageLocationRef location = (*JImageFindResource)(_jimage, "", get_jimage_version_string(), name, &size);
 
-    // NOTE: IMAGE_MAX_PATH is used here since this path is internal to the jimage
-    // (effectively unlimited.)  There are several JCK tests that use paths over
-    // 1024 characters long, the limit on Windows systems.
-    if (pslash && 0 < len && len < IMAGE_MAX_PATH) {
-
-      char path[IMAGE_MAX_PATH];
-      strncpy(path, name, len);
-      path[len] = '\0';
-      const char* moduleName = _module_data->package_to_module(path);
-
-      if (moduleName != NULL && (len + strlen(moduleName) + 2) < IMAGE_MAX_PATH) {
-        jio_snprintf(path, IMAGE_MAX_PATH - 1, "/%s/%s", moduleName, name);
-        location.clear_data();
-        found = _image->find_location(path, location);
-      }
+  if (location == 0) {
+    char package[JIMAGE_MAX_PATH];
+    name_to_package(name, package, JIMAGE_MAX_PATH);
+    if (package[0] != '\0') {
+        const char* module = (*JImagePackageToModule)(_jimage, package);
+        if (module == NULL) {
+            module = "java.base";
+        }
+        location = (*JImageFindResource)(_jimage, module, get_jimage_version_string(), name, &size);
     }
   }
 
-  if (found) {
-    u8 size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
+  if (location != 0) {
     if (UsePerfData) {
       ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
     }
-    u1* data = NEW_RESOURCE_ARRAY(u1, size);
-    _image->get_resource(location, data);
-    return new ClassFileStream(data, (int)size, _image->name());  // Resource allocated
+    char* data = NEW_RESOURCE_ARRAY(char, size);
+    (*JImageGetResource)(_jimage, location, data, size);
+    return new ClassFileStream((u1*)data, (int)size, _name);  // Resource allocated
   }
 
   return NULL;
 }
 
 #ifndef PRODUCT
+bool ctw_visitor(JImageFile* jimage,
+        const char* module_name, const char* version, const char* package,
+        const char* name, const char* extension, void* arg) {
+  if (strcmp(extension, "class") == 0) {
+    Thread* THREAD = Thread::current();
+    char path[JIMAGE_MAX_PATH];
+    jio_snprintf(path, JIMAGE_MAX_PATH - 1, "%s/%s.class", package, name);
+    ClassLoader::compile_the_world_in(path, *(Handle*)arg, THREAD);
+    return !HAS_PENDING_EXCEPTION;
+  }
+  return true;
+}
+
 void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) {
   tty->print_cr("CompileTheWorld : Compiling all classes in %s", name());
   tty->cr();
-  const ImageStrings strings = _image->get_strings();
-  // Retrieve each path component string.
-  u4 length = _image->table_length();
-  for (u4 i = 0; i < length; i++) {
-    u1* location_data = _image->get_location_data(i);
-
-    if (location_data != NULL) {
-       ImageLocation location(location_data);
-       char path[IMAGE_MAX_PATH];
-       _image->location_path(location, path, IMAGE_MAX_PATH);
-       ClassLoader::compile_the_world_in(path, loader, CHECK);
+  (*JImageResourceIterator)(_jimage, (JImageResourceVisitor_t)ctw_visitor, (void *)&loader);
+  if (HAS_PENDING_EXCEPTION) {
+    if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
+      CLEAR_PENDING_EXCEPTION;
+      tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
+      tty->print_cr("Increase class metadata storage if a limit was set");
+    } else {
+      tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
     }
   }
-  if (HAS_PENDING_EXCEPTION) {
-  if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
-    CLEAR_PENDING_EXCEPTION;
-    tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
-    tty->print_cr("Increase class metadata storage if a limit was set");
-  } else {
-    tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
-  }
-  }
 }
 
 bool ClassPathImageEntry::is_jrt() {
@@ -490,7 +525,7 @@
   JavaThread* thread = JavaThread::current();
   ClassPathEntry* new_entry = NULL;
   if ((st->st_mode & S_IFREG) == S_IFREG) {
-    // Regular file, should be a zip or image file
+    // Regular file, should be a zip or jimage file
     // Canonicalized filename
     char canonical_path[JVM_MAXPATHLEN];
     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
@@ -501,9 +536,10 @@
         return NULL;
       }
     }
-    ImageFileReader* image = ImageFileReader::open(canonical_path);
-    if (image != NULL) {
-      new_entry = new ClassPathImageEntry(image);
+    jint error;
+    JImageFile* jimage =(*JImageOpen)(canonical_path, &error);
+    if (jimage != NULL) {
+      new_entry = new ClassPathImageEntry(jimage, canonical_path);
     } else {
       char* error_msg = NULL;
       jzfile* zip;
@@ -682,6 +718,35 @@
   // This lookup only works on 1.3. Do not check for non-null here
 }
 
+void ClassLoader::load_jimage_library() {
+  // First make sure native library is loaded
+  os::native_java_library();
+  // Load jimage library
+  char path[JVM_MAXPATHLEN];
+  char ebuf[1024];
+  void* handle = NULL;
+  if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "jimage")) {
+    handle = os::dll_load(path, ebuf, sizeof ebuf);
+  }
+  if (handle == NULL) {
+    vm_exit_during_initialization("Unable to load jimage library", path);
+  }
+
+  // Lookup jimage entry points
+  JImageOpen = CAST_TO_FN_PTR(JImageOpen_t, os::dll_lookup(handle, "JIMAGE_Open"));
+  guarantee(JImageOpen != NULL, "function JIMAGE_Open not found");
+  JImageClose = CAST_TO_FN_PTR(JImageClose_t, os::dll_lookup(handle, "JIMAGE_Close"));
+  guarantee(JImageClose != NULL, "function JIMAGE_Close not found");
+  JImagePackageToModule = CAST_TO_FN_PTR(JImagePackageToModule_t, os::dll_lookup(handle, "JIMAGE_PackageToModule"));
+  guarantee(JImagePackageToModule != NULL, "function JIMAGE_PackageToModule not found");
+  JImageFindResource = CAST_TO_FN_PTR(JImageFindResource_t, os::dll_lookup(handle, "JIMAGE_FindResource"));
+  guarantee(JImageFindResource != NULL, "function JIMAGE_FindResource not found");
+  JImageGetResource = CAST_TO_FN_PTR(JImageGetResource_t, os::dll_lookup(handle, "JIMAGE_GetResource"));
+  guarantee(JImageGetResource != NULL, "function JIMAGE_GetResource not found");
+  JImageResourceIterator = CAST_TO_FN_PTR(JImageResourceIterator_t, os::dll_lookup(handle, "JIMAGE_ResourceIterator"));
+  guarantee(JImageResourceIterator != NULL, "function JIMAGE_ResourceIterator not found");
+}
+
 jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) {
   return (*ZipInflateFully)(in, inSize, out, outSize, pmsg);
 }
@@ -1086,6 +1151,8 @@
 
   // lookup zip library entry points
   load_zip_library();
+  // lookup jimage library entry points
+  load_jimage_library();
 #if INCLUDE_CDS
   // initialize search path
   if (DumpSharedSpaces) {
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -37,8 +37,7 @@
 
 // Class path entry (directory or zip file)
 
-class ImageFileReader;
-class ImageModuleData;
+class JImageFile;
 
 class ClassPathEntry: public CHeapObj<mtClass> {
  private:
@@ -52,7 +51,7 @@
   }
   virtual bool is_jar_file() = 0;
   virtual const char* name() = 0;
-  virtual ImageFileReader* image() = 0;
+  virtual JImageFile* jimage() = 0;
   // Constructor
   ClassPathEntry();
   // Attempt to locate file_name through this class path entry.
@@ -70,7 +69,7 @@
  public:
   bool is_jar_file()       { return false;  }
   const char* name()       { return _dir; }
-  ImageFileReader* image() { return NULL; }
+  JImageFile* jimage()     { return NULL; }
   ClassPathDirEntry(const char* dir);
   ClassFileStream* open_stream(const char* name, TRAPS);
   // Debugging
@@ -100,7 +99,7 @@
  public:
   bool is_jar_file()       { return true;  }
   const char* name()       { return _zip_name; }
-  ImageFileReader* image() { return NULL; }
+  JImageFile* jimage()     { return NULL; }
   ClassPathZipEntry(jzfile* zip, const char* zip_name);
   ~ClassPathZipEntry();
   u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
@@ -115,16 +114,16 @@
 // For java image files
 class ClassPathImageEntry: public ClassPathEntry {
 private:
-  ImageFileReader* _image;
-  ImageModuleData* _module_data;
+  JImageFile* _jimage;
+  const char* _name;
 public:
   bool is_jar_file()  { return false;  }
-  bool is_open()  { return _image != NULL; }
-  const char* name();
-  ImageFileReader* image() { return _image; }
-  ImageModuleData* module_data() { return _module_data; }
-  ClassPathImageEntry(ImageFileReader* image);
+  bool is_open()  { return _jimage != NULL; }
+  const char* name() { return _name == NULL ? "" : _name; }
+  JImageFile* jimage() { return _jimage; }
+  ClassPathImageEntry(JImageFile* jimage, const char* name);
   ~ClassPathImageEntry();
+  static void name_to_package(const char* name, char* buffer, int length);
   ClassFileStream* open_stream(const char* name, TRAPS);
 
   // Debugging
@@ -206,6 +205,7 @@
   static void setup_search_path(const char *class_path);
 
   static void load_zip_library();
+  static void load_jimage_library();
   static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
                                                  bool throw_exception, TRAPS);
 
--- a/hotspot/src/share/vm/classfile/imageDecompressor.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "classfile/imageDecompressor.hpp"
-#include "runtime/thread.hpp"
-#include "utilities/bytes.hpp"
-
-/*
- * Allocate in C Heap not in resource area, otherwise JVM crashes.
- * This array life time is the VM life time. Array is never freed and
- * is not expected to contain more than few references.
- */
-GrowableArray<ImageDecompressor*>* ImageDecompressor::_decompressors =
-  new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageDecompressor*>(2, true);
-
-static Symbol* createSymbol(const char* str) {
-  Thread* THREAD = Thread::current();
-  Symbol* sym = SymbolTable::lookup(str, (int) strlen(str), THREAD);
-  if (HAS_PENDING_EXCEPTION) {
-    warning("can't create symbol\n");
-    CLEAR_PENDING_EXCEPTION;
-    return NULL;
-  }
-  return sym;
-}
-
-/*
- * Initialize the array of decompressors.
- */
-bool image_decompressor_init() {
-  Symbol* zipSymbol = createSymbol("zip");
-  if (zipSymbol == NULL) {
-    return false;
-  }
-  ImageDecompressor::add_decompressor(new ZipDecompressor(zipSymbol));
-
-  return true;
-}
-
-/*
- * Decompression entry point. Called from ImageFileReader::get_resource.
- */
-void ImageDecompressor::decompress_resource(u1* compressed, u1* uncompressed,
-        u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap) {
-  bool has_header = false;
-  u1* decompressed_resource = compressed;
-  u1* compressed_resource = compressed;
-
-  // Resource could have been transformed by a stack of decompressors.
-  // Iterate and decompress resources until there is no more header.
-  do {
-    ResourceHeader _header;
-    memcpy(&_header, compressed_resource, sizeof (ResourceHeader));
-    has_header = _header._magic == ResourceHeader::resource_header_magic;
-    if (has_header) {
-      // decompressed_resource array contains the result of decompression
-      // when a resource content is terminal, it means that it is an actual resource,
-      // not an intermediate not fully uncompressed content. In this case
-      // the resource is allocated as an mtClass, otherwise as an mtOther
-      decompressed_resource = is_C_heap && _header._is_terminal ?
-              NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtClass) :
-              NEW_C_HEAP_ARRAY(u1, _header._uncompressed_size, mtOther);
-      // Retrieve the decompressor name
-      const char* decompressor_name = strings->get(_header._decompressor_name_offset);
-      if (decompressor_name == NULL) warning("image decompressor not found\n");
-      guarantee(decompressor_name, "image decompressor not found");
-      // Retrieve the decompressor instance
-      ImageDecompressor* decompressor = get_decompressor(decompressor_name);
-      if (decompressor == NULL) {
-        warning("image decompressor %s not found\n", decompressor_name);
-      }
-      guarantee(decompressor, "image decompressor not found");
-      u1* compressed_resource_base = compressed_resource;
-      compressed_resource += ResourceHeader::resource_header_length;
-      // Ask the decompressor to decompress the compressed content
-      decompressor->decompress_resource(compressed_resource, decompressed_resource,
-        &_header, strings);
-      if (compressed_resource_base != compressed) {
-        FREE_C_HEAP_ARRAY(char, compressed_resource_base);
-      }
-      compressed_resource = decompressed_resource;
-    }
-  } while (has_header);
-  memcpy(uncompressed, decompressed_resource, uncompressed_size);
-}
-
-// Zip decompressor
-
-void ZipDecompressor::decompress_resource(u1* data, u1* uncompressed,
-        ResourceHeader* header, const ImageStrings* strings) {
-  char* msg = NULL;
-  jboolean res = ClassLoader::decompress(data, header->_size, uncompressed,
-          header->_uncompressed_size, &msg);
-  if (!res) warning("decompression failed due to %s\n", msg);
-  guarantee(res, "decompression failed");
-}
-
-// END Zip Decompressor
--- a/hotspot/src/share/vm/classfile/imageDecompressor.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
-#define SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
-
-#include "runtime/thread.inline.hpp"
-#include "classfile/classLoader.hpp"
-#include "classfile/imageFile.hpp"
-#include "classfile/symbolTable.hpp"
-#include "oops/symbol.hpp"
-#include "utilities/growableArray.hpp"
-
-/*
- * Compressed resources located in image have an header.
- * This header contains:
- * - _magic: A magic u4, required to retrieved the header in the compressed content
- * - _size: The size of the compressed resource.
- * - _uncompressed_size: The uncompressed size of the compressed resource.
- * - _decompressor_name_offset: The ImageDecompressor instance name StringsTable offset.
- * - _decompressor_config_offset: StringsTable offset of configuration that could be needed by
- *   the decompressor in order to decompress.
- * - _is_terminal: 1: the compressed content is terminal. Uncompressing it would
- *   create the actual resource. 0: the compressed content is not terminal. Uncompressing it
- *   will result in a compressed content to be decompressed (This occurs when a stack of compressors
- *   have been used to compress the resource.
- */
-struct ResourceHeader {
-  /* Length of header, needed to retrieve content offset */
-  static const u1 resource_header_length = 21;
-  /* magic bytes that identifies a compressed resource header*/
-  static const u4 resource_header_magic = 0xCAFEFAFA;
-  u4 _magic; // Resource header
-  u4 _size;  // Resource size
-  u4 _uncompressed_size;  // Expected uncompressed size
-  u4 _decompressor_name_offset;  // Strings table decompressor offset
-  u4 _decompressor_config_offset; // Strings table config offset
-  u1 _is_terminal; // Last decompressor 1, otherwise 0.
-};
-
-/*
- * Resources located in jimage file can be compressed. Compression occurs at
- * jimage file creation time. When compressed a resource is added an header that
- * contains the name of the compressor that compressed it.
- * Various compression strategies can be applied to compress a resource.
- * The same resource can even be compressed multiple time by a stack of compressors.
- * At runtime, a resource is decompressed in a loop until there is no more header
- * meaning that the resource is equivalent to the not compressed resource.
- * In each iteration, the name of the compressor located in the current header
- * is used to retrieve the associated instance of ImageDecompressor.
- * For example “zip” is the name of the compressor that compresses resources
- * using the zip algorithm. The ZipDecompressor class name is also “zip”.
- * ImageDecompressor instances are retrieved from a static array in which
- * they are registered.
- */
-class ImageDecompressor: public CHeapObj<mtClass> {
-
-private:
-  const Symbol* _name;
-
-  /*
-   * Array of concrete decompressors. This array is used to retrieve the decompressor
-   * that can handle resource decompression.
-   */
-  static GrowableArray<ImageDecompressor*>* _decompressors;
-
-  /*
-   * Identifier of a decompressor. This name is the identification key to retrieve
-   * decompressor from a resource header.
-   */
-  inline const Symbol* get_name() const { return _name; }
-
-protected:
-  ImageDecompressor(const Symbol* name) : _name(name) {
-  }
-  virtual void decompress_resource(u1* data, u1* uncompressed,
-    ResourceHeader* header, const ImageStrings* strings) = 0;
-
-public:
-  inline static void add_decompressor(ImageDecompressor* decompressor) {
-    _decompressors->append(decompressor);
-  }
-  inline static ImageDecompressor* get_decompressor(const char * decompressor_name) {
-    Thread* THREAD = Thread::current();
-    TempNewSymbol sym = SymbolTable::new_symbol(decompressor_name,
-            (int) strlen(decompressor_name), CHECK_NULL);
-    if (HAS_PENDING_EXCEPTION) {
-      warning("can't create symbol\n");
-      CLEAR_PENDING_EXCEPTION;
-      return NULL;
-    }
-    for (int i = 0; i < _decompressors->length(); i++) {
-      ImageDecompressor* decompressor = _decompressors->at(i);
-      if (decompressor->get_name()->fast_compare(sym) == 0) {
-        return decompressor;
-      }
-    }
-    guarantee(false, "No decompressor found.");
-    return NULL;
-  }
-  static void decompress_resource(u1* compressed, u1* uncompressed,
-    u4 uncompressed_size, const ImageStrings* strings, bool is_C_heap);
-};
-
-/**
- * Zip decompressor.
- */
-class ZipDecompressor : public ImageDecompressor {
-public:
-  ZipDecompressor(const Symbol* sym) : ImageDecompressor(sym) { }
-  void decompress_resource(u1* data, u1* uncompressed, ResourceHeader* header,
-    const ImageStrings* strings);
-};
-
-#endif // SHARE_VM_CLASSFILE_IMAGEDECOMPRESSOR_HPP
--- a/hotspot/src/share/vm/classfile/imageFile.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,546 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/imageDecompressor.hpp"
-#include "classfile/imageFile.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
-#include "utilities/endian.hpp"
-#include "utilities/growableArray.hpp"
-
-// Image files are an alternate file format for storing classes and resources. The
-// goal is to supply file access which is faster and smaller than the jar format.
-//
-// (More detailed nodes in the header.)
-//
-
-// Compute the Perfect Hashing hash code for the supplied UTF-8 string.
-s4 ImageStrings::hash_code(const char* string, s4 seed) {
-  // Access bytes as unsigned.
-  u1* bytes = (u1*)string;
-  // Compute hash code.
-  for (u1 byte = *bytes++; byte; byte = *bytes++) {
-    seed = (seed * HASH_MULTIPLIER) ^ byte;
-  }
-  // Ensure the result is not signed.
-  return seed & 0x7FFFFFFF;
-}
-
-// Match up a string in a perfect hash table.  Result still needs validation
-// for precise match (false positive.)
-s4 ImageStrings::find(Endian* endian, const char* name, s4* redirect, u4 length) {
-  // If the table is empty, then short cut.
-  if (redirect == NULL || length == 0) {
-    return NOT_FOUND;
-  }
-  // Compute the basic perfect hash for name.
-  s4 hash_code = ImageStrings::hash_code(name);
-  // Modulo table size.
-  s4 index = hash_code % length;
-  // Get redirect entry.
-  //   value == 0 then not found
-  //   value < 0 then -1 - value is true index
-  //   value > 0 then value is seed for recomputing hash.
-  s4 value = endian->get(redirect[index]);
-  // if recompute is required.
-  if (value > 0) {
-    // Entry collision value, need to recompute hash.
-    hash_code = ImageStrings::hash_code(name, value);
-    // Modulo table size.
-    return hash_code % length;
-  } else if (value < 0) {
-    // Compute direct index.
-    return -1 - value;
-  }
-  // No entry found.
-  return NOT_FOUND;
-}
-
-// Test to see if UTF-8 string begins with the start UTF-8 string.  If so,
-// return non-NULL address of remaining portion of string.  Otherwise, return
-// NULL.  Used to test sections of a path without copying from image string
-// table.
-const char* ImageStrings::starts_with(const char* string, const char* start) {
-  char ch1, ch2;
-  // Match up the strings the best we can.
-  while ((ch1 = *string) && (ch2 = *start)) {
-    if (ch1 != ch2) {
-      // Mismatch, return NULL.
-      return NULL;
-    }
-    // Next characters.
-    string++, start++;
-  }
-  // Return remainder of string.
-  return string;
-}
-
-// Inflates the attribute stream into individual values stored in the long
-// array _attributes. This allows an attribute value to be quickly accessed by
-// direct indexing.  Unspecified values default to zero (from constructor.)
-void ImageLocation::set_data(u1* data) {
-  // Deflate the attribute stream into an array of attributes.
-  u1 byte;
-  // Repeat until end header is found.
-  while ((byte = *data)) {
-    // Extract kind from header byte.
-    u1 kind = attribute_kind(byte);
-    guarantee(kind < ATTRIBUTE_COUNT, "invalid image location attribute");
-    // Extract length of data (in bytes).
-    u1 n = attribute_length(byte);
-    // Read value (most significant first.)
-    _attributes[kind] = attribute_value(data + 1, n);
-    // Position to next attribute by skipping attribute header and data bytes.
-    data += n + 1;
-  }
-}
-
-// Zero all attribute values.
-void ImageLocation::clear_data() {
-  // Set defaults to zero.
-  memset(_attributes, 0, sizeof(_attributes));
-}
-
-// ImageModuleData constructor maps out sub-tables for faster access.
-ImageModuleData::ImageModuleData(const ImageFileReader* image_file,
-        const char* module_data_name) :
-    _image_file(image_file),
-    _endian(image_file->endian()),
-    _strings(image_file->get_strings()) {
-  // Retrieve the resource containing the module data for the image file.
-  ImageLocation location;
-  bool found = image_file->find_location(module_data_name, location);
-  guarantee(found, "missing module data");
-  u8 data_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
-  _data = (u1*)NEW_C_HEAP_ARRAY(char, data_size, mtClass);
-  _image_file->get_resource(location, _data);
-  // Map out the header.
-  _header = (Header*)_data;
-  // Get the package to module entry count.
-  u4 ptm_count = _header->ptm_count(_endian);
-  // Get the module to package entry count.
-  u4 mtp_count = _header->mtp_count(_endian);
-  // Compute the offset of the package to module perfect hash redirect.
-  u4 ptm_redirect_offset = sizeof(Header);
-  // Compute the offset of the package to module data.
-  u4 ptm_data_offset = ptm_redirect_offset + ptm_count * sizeof(s4);
-  // Compute the offset of the module to package perfect hash redirect.
-  u4 mtp_redirect_offset = ptm_data_offset + ptm_count * sizeof(PTMData);
-  // Compute the offset of the module to package data.
-  u4 mtp_data_offset = mtp_redirect_offset + mtp_count * sizeof(s4);
-  // Compute the offset of the module to package tables.
-  u4 mtp_packages_offset = mtp_data_offset + mtp_count * sizeof(MTPData);
-  // Compute the address of the package to module perfect hash redirect.
-  _ptm_redirect = (s4*)(_data + ptm_redirect_offset);
-  // Compute the address of the package to module data.
-  _ptm_data = (PTMData*)(_data + ptm_data_offset);
-  // Compute the address of the module to package perfect hash redirect.
-  _mtp_redirect = (s4*)(_data + mtp_redirect_offset);
-  // Compute the address of the module to package data.
-  _mtp_data = (MTPData*)(_data + mtp_data_offset);
-  // Compute the address of the module to package tables.
-  _mtp_packages = (s4*)(_data + mtp_packages_offset);
-}
-
-// Release module data resource.
-ImageModuleData::~ImageModuleData() {
-  if (_data != NULL) {
-    FREE_C_HEAP_ARRAY(u1, _data);
-  }
-}
-
-// Return the name of the module data resource.  Ex. "./lib/modules/file.jimage"
-// yields "file.jdata"
-void ImageModuleData::module_data_name(char* buffer, const char* image_file_name) {
-  // Locate the last slash in the file name path.
-  const char* slash = strrchr(image_file_name, os::file_separator()[0]);
-  // Trim the path to name and extension.
-  const char* name = slash != NULL ? slash + 1 : (char *)image_file_name;
-  // Locate the extension period.
-  const char* dot = strrchr(name, '.');
-  guarantee(dot, "missing extension on jimage name");
-  // Trim to only base name.
-  int length = dot - name;
-  strncpy(buffer, name, length);
-  buffer[length] = '\0';
-  // Append extension.
-  strcat(buffer, ".jdata");
-}
-
-// Return the module in which a package resides.  Returns NULL if not found.
-const char* ImageModuleData::package_to_module(const char* package_name) {
-  // Search the package to module table.
-  s4 index = ImageStrings::find(_endian, package_name, _ptm_redirect,
-                                  _header->ptm_count(_endian));
-  // If entry is found.
-  if (index != ImageStrings::NOT_FOUND) {
-    // Retrieve the package to module entry.
-    PTMData* data = _ptm_data + index;
-    // Verify that it is the correct data.
-    if (strcmp(package_name, get_string(data->name_offset(_endian))) != 0) {
-      return NULL;
-    }
-    // Return the module name.
-    return get_string(data->module_name_offset(_endian));
-  }
-  return NULL;
-}
-
-// Returns all the package names in a module.  Returns NULL if module not found.
-GrowableArray<const char*>* ImageModuleData::module_to_packages(const char* module_name) {
-  // Search the module to package table.
-  s4 index = ImageStrings::find(_endian, module_name, _mtp_redirect,
-                                  _header->mtp_count(_endian));
-  // If entry is found.
-  if (index != ImageStrings::NOT_FOUND) {
-    // Retrieve the module to package entry.
-    MTPData* data = _mtp_data + index;
-    // Verify that it is the correct data.
-    if (strcmp(module_name, get_string(data->name_offset(_endian))) != 0) {
-      return NULL;
-    }
-    // Construct an array of all the package entries.
-    GrowableArray<const char*>* packages = new GrowableArray<const char*>();
-    s4 package_offset = data->package_offset(_endian);
-    for (u4 i = 0; i < data->package_count(_endian); i++) {
-      u4 package_name_offset = mtp_package(package_offset + i);
-      const char* package_name = get_string(package_name_offset);
-      packages->append(package_name);
-    }
-    return packages;
-  }
-  return NULL;
-}
-
-// Table to manage multiple opens of an image file.
-GrowableArray<ImageFileReader*>* ImageFileReader::_reader_table =
-  new(ResourceObj::C_HEAP, mtInternal) GrowableArray<ImageFileReader*>(2, true);
-
-// Open an image file, reuse structure if file already open.
-ImageFileReader* ImageFileReader::open(const char* name, bool big_endian) {
-  // Lock out _reader_table.
-  MutexLocker ml(ImageFileReaderTable_lock);
-  ImageFileReader* reader;
-  // Search for an exist image file.
-  for (int i = 0; i < _reader_table->length(); i++) {
-    // Retrieve table entry.
-    reader = _reader_table->at(i);
-    // If name matches, then reuse (bump up use count.)
-    if (strcmp(reader->name(), name) == 0) {
-      reader->inc_use();
-      return reader;
-    }
-  }
-  // Need a new image reader.
-  reader = new ImageFileReader(name, big_endian);
-  bool opened = reader->open();
-  // If failed to open.
-  if (!opened) {
-    delete reader;
-    return NULL;
-  }
-  // Bump use count and add to table.
-  reader->inc_use();
-  _reader_table->append(reader);
-  return reader;
-}
-
-// Close an image file if the file is not in use elsewhere.
-void ImageFileReader::close(ImageFileReader *reader) {
-  // Lock out _reader_table.
-  MutexLocker ml(ImageFileReaderTable_lock);
-  // If last use then remove from table and then close.
-  if (reader->dec_use()) {
-    _reader_table->remove(reader);
-    delete reader;
-  }
-}
-
-// Return an id for the specifed ImageFileReader.
-u8 ImageFileReader::readerToID(ImageFileReader *reader) {
-  // ID is just the cloaked reader address.
-  return (u8)reader;
-}
-
-// Validate the image id.
-bool ImageFileReader::idCheck(u8 id) {
-  // Make sure the ID is a managed (_reader_table) reader.
-  MutexLocker ml(ImageFileReaderTable_lock);
-  return _reader_table->contains((ImageFileReader*)id);
-}
-
-// Return an id for the specifed ImageFileReader.
-ImageFileReader* ImageFileReader::idToReader(u8 id) {
-#ifdef PRODUCT
-  // Fast convert.
-  return (ImageFileReader*)id;
-#else
-  // Do a slow check before fast convert.
-  return idCheck(id) ? (ImageFileReader*)id : NULL;
-#endif
-}
-
-// Constructor intializes to a closed state.
-ImageFileReader::ImageFileReader(const char* name, bool big_endian) {
-  // Copy the image file name.
-  _name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtClass);
-  strcpy(_name, name);
-  // Initialize for a closed file.
-  _fd = -1;
-  _endian = Endian::get_handler(big_endian);
-  _index_data = NULL;
-}
-
-// Close image and free up data structures.
-ImageFileReader::~ImageFileReader() {
-  // Ensure file is closed.
-  close();
-  // Free up name.
-  if (_name != NULL) {
-    FREE_C_HEAP_ARRAY(char, _name);
-    _name = NULL;
-  }
-}
-
-// Open image file for read access.
-bool ImageFileReader::open() {
-  // If file exists open for reading.
-  struct stat st;
-  if (os::stat(_name, &st) != 0 ||
-    (st.st_mode & S_IFREG) != S_IFREG ||
-    (_fd = os::open(_name, 0, O_RDONLY)) == -1) {
-    return false;
-  }
-  // Retrieve the file size.
-  _file_size = (u8)st.st_size;
-  // Read image file header and verify it has a valid header.
-  size_t header_size = sizeof(ImageHeader);
-  if (_file_size < header_size ||
-    !read_at((u1*)&_header, header_size, 0) ||
-    _header.magic(_endian) != IMAGE_MAGIC ||
-    _header.major_version(_endian) != MAJOR_VERSION ||
-    _header.minor_version(_endian) != MINOR_VERSION) {
-    close();
-    return false;
-  }
-  // Size of image index.
-  _index_size = index_size();
-  // Make sure file is large enough to contain the index.
-  if (_file_size < _index_size) {
-    return false;
-  }
-  // Determine how much of the image is memory mapped.
-  off_t map_size = (off_t)(MemoryMapImage ? _file_size : _index_size);
-  // Memory map image (minimally the index.)
-  _index_data = (u1*)os::map_memory(_fd, _name, 0, NULL, map_size, true, false);
-  guarantee(_index_data, "image file not memory mapped");
-  // Retrieve length of index perfect hash table.
-  u4 length = table_length();
-  // Compute offset of the perfect hash table redirect table.
-  u4 redirect_table_offset = (u4)header_size;
-  // Compute offset of index attribute offsets.
-  u4 offsets_table_offset = redirect_table_offset + length * sizeof(s4);
-  // Compute offset of index location attribute data.
-  u4 location_bytes_offset = offsets_table_offset + length * sizeof(u4);
-  // Compute offset of index string table.
-  u4 string_bytes_offset = location_bytes_offset + locations_size();
-  // Compute address of the perfect hash table redirect table.
-  _redirect_table = (s4*)(_index_data + redirect_table_offset);
-  // Compute address of index attribute offsets.
-  _offsets_table = (u4*)(_index_data + offsets_table_offset);
-  // Compute address of index location attribute data.
-  _location_bytes = _index_data + location_bytes_offset;
-  // Compute address of index string table.
-  _string_bytes = _index_data + string_bytes_offset;
-  // Successful open.
-  return true;
-}
-
-// Close image file.
-void ImageFileReader::close() {
-  // Dealllocate the index.
-  if (_index_data != NULL) {
-    os::unmap_memory((char*)_index_data, _index_size);
-    _index_data = NULL;
-  }
-  // Close file.
-  if (_fd != -1) {
-    os::close(_fd);
-    _fd = -1;
-  }
-}
-
-// Read directly from the file.
-bool ImageFileReader::read_at(u1* data, u8 size, u8 offset) const {
-  return os::read_at(_fd, data, size, offset) == size;
-}
-
-// Find the location attributes associated with the path.  Returns true if
-// the location is found, false otherwise.
-bool ImageFileReader::find_location(const char* path, ImageLocation& location) const {
-  // Locate the entry in the index perfect hash table.
-  s4 index = ImageStrings::find(_endian, path, _redirect_table, table_length());
-  // If is found.
-  if (index != ImageStrings::NOT_FOUND) {
-    // Get address of first byte of location attribute stream.
-    u1* data = get_location_data(index);
-    // Expand location attributes.
-    location.set_data(data);
-    // Make sure result is not a false positive.
-    return verify_location(location, path);
-  }
-  return false;
-}
-
-// Assemble the location path from the string fragments indicated in the location attributes.
-void ImageFileReader::location_path(ImageLocation& location, char* path, size_t max) const {
-  // Manage the image string table.
-  ImageStrings strings(_string_bytes, _header.strings_size(_endian));
-  // Position to first character of the path buffer.
-  char* next = path;
-  // Temp for string length.
-  size_t length;
-  // Get module string.
-  const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
-  // If module string is not empty string.
-  if (*module != '\0') {
-    // Get length of module name.
-    length = strlen(module);
-    // Make sure there is no buffer overflow.
-    guarantee(next - path + length + 2 < max, "buffer overflow");
-    // Append '/module/'.
-    *next++ = '/';
-    strcpy(next, module); next += length;
-    *next++ = '/';
-  }
-  // Get parent (package) string.
-  const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
-  // If parent string is not empty string.
-  if (*parent != '\0') {
-    // Get length of module string.
-    length = strlen(parent);
-    // Make sure there is no buffer overflow.
-    guarantee(next - path + length + 1 < max, "buffer overflow");
-    // Append 'patent/' .
-    strcpy(next, parent); next += length;
-    *next++ = '/';
-  }
-  // Get base name string.
-  const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
-  // Get length of base name.
-  length = strlen(base);
-  // Make sure there is no buffer overflow.
-  guarantee(next - path + length < max, "buffer overflow");
-  // Append base name.
-  strcpy(next, base); next += length;
-  // Get extension string.
-  const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
-  // If extension string is not empty string.
-  if (*extension != '\0') {
-    // Get length of extension string.
-    length = strlen(extension);
-    // Make sure there is no buffer overflow.
-    guarantee(next - path + length + 1 < max, "buffer overflow");
-    // Append '.extension' .
-    *next++ = '.';
-    strcpy(next, extension); next += length;
-  }
-  // Make sure there is no buffer overflow.
-  guarantee((size_t)(next - path) < max, "buffer overflow");
-  // Terminate string.
-  *next = '\0';
-}
-
-// Verify that a found location matches the supplied path (without copying.)
-bool ImageFileReader::verify_location(ImageLocation& location, const char* path) const {
-  // Manage the image string table.
-  ImageStrings strings(_string_bytes, _header.strings_size(_endian));
-  // Position to first character of the path string.
-  const char* next = path;
-  // Get module name string.
-  const char* module = location.get_attribute(ImageLocation::ATTRIBUTE_MODULE, strings);
-  // If module string is not empty.
-  if (*module != '\0') {
-    // Compare '/module/' .
-    if (*next++ != '/') return false;
-    if (!(next = ImageStrings::starts_with(next, module))) return false;
-    if (*next++ != '/') return false;
-  }
-  // Get parent (package) string
-  const char* parent = location.get_attribute(ImageLocation::ATTRIBUTE_PARENT, strings);
-  // If parent string is not empty string.
-  if (*parent != '\0') {
-    // Compare 'parent/' .
-    if (!(next = ImageStrings::starts_with(next, parent))) return false;
-    if (*next++ != '/') return false;
-  }
-  // Get base name string.
-  const char* base = location.get_attribute(ImageLocation::ATTRIBUTE_BASE, strings);
-  // Compare with basne name.
-  if (!(next = ImageStrings::starts_with(next, base))) return false;
-  // Get extension string.
-  const char* extension = location.get_attribute(ImageLocation::ATTRIBUTE_EXTENSION, strings);
-  // If extension is not empty.
-  if (*extension != '\0') {
-    // Compare '.extension' .
-    if (*next++ != '.') return false;
-    if (!(next = ImageStrings::starts_with(next, extension))) return false;
-  }
-  // True only if complete match and no more characters.
-  return *next == '\0';
-}
-
-// Return the resource data for the supplied location.
-void ImageFileReader::get_resource(ImageLocation& location, u1* uncompressed_data) const {
-  // Retrieve the byte offset and size of the resource.
-  u8 offset = location.get_attribute(ImageLocation::ATTRIBUTE_OFFSET);
-  u8 uncompressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_UNCOMPRESSED);
-  u8 compressed_size = location.get_attribute(ImageLocation::ATTRIBUTE_COMPRESSED);
-  if (compressed_size != 0) {
-    ResourceMark rm;
-    u1* compressed_data;
-    // If not memory mapped read in bytes.
-    if (!MemoryMapImage) {
-      // Allocate buffer for compression.
-      compressed_data = NEW_RESOURCE_ARRAY(u1, compressed_size);
-      // Read bytes from offset beyond the image index.
-      bool is_read = read_at(compressed_data, compressed_size, _index_size + offset);
-      guarantee(is_read, "error reading from image or short read");
-    } else {
-      compressed_data = get_data_address() + offset;
-    }
-    // Get image string table.
-    const ImageStrings strings = get_strings();
-    // Decompress resource.
-    ImageDecompressor::decompress_resource(compressed_data, uncompressed_data, uncompressed_size,
-            &strings, false);
-  } else {
-    // Read bytes from offset beyond the image index.
-    bool is_read = read_at(uncompressed_data, uncompressed_size, _index_size + offset);
-    guarantee(is_read, "error reading from image or short read");
-  }
-}
--- a/hotspot/src/share/vm/classfile/imageFile.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,602 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CLASSFILE_IMAGEFILE_HPP
-#define SHARE_VM_CLASSFILE_IMAGEFILE_HPP
-
-#include "classfile/classLoader.hpp"
-#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "utilities/endian.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/growableArray.hpp"
-
-// Image files are an alternate file format for storing classes and resources. The
-// goal is to supply file access which is faster and smaller than the jar format.
-// It should be noted that unlike jars, information stored in an image is in native
-// endian format. This allows the image to be mapped into memory without endian
-// translation.  This also means that images are platform dependent.
-//
-// Image files are structured as three sections;
-//
-//         +-----------+
-//         |  Header   |
-//         +-----------+
-//         |           |
-//         |   Index   |
-//         |           |
-//         +-----------+
-//         |           |
-//         |           |
-//         | Resources |
-//         |           |
-//         |           |
-//         +-----------+
-//
-// The header contains information related to identification and description of
-// contents.
-//
-//         +-------------------------+
-//         |   Magic (0xCAFEDADA)    |
-//         +------------+------------+
-//         | Major Vers | Minor Vers |
-//         +------------+------------+
-//         |          Flags          |
-//         +-------------------------+
-//         |      Resource Count     |
-//         +-------------------------+
-//         |       Table Length      |
-//         +-------------------------+
-//         |      Attributes Size    |
-//         +-------------------------+
-//         |       Strings Size      |
-//         +-------------------------+
-//
-// Magic - means of identifying validity of the file.  This avoids requiring a
-//         special file extension.
-// Major vers, minor vers - differences in version numbers indicate structural
-//                          changes in the image.
-// Flags - various image wide flags (future).
-// Resource count - number of resources in the file.
-// Table length - the length of lookup tables used in the index.
-// Attributes size - number of bytes in the region used to store location attribute
-//                   streams.
-// Strings size - the size of the region used to store strings used by the
-//                index and meta data.
-//
-// The index contains information related to resource lookup. The algorithm
-// used for lookup is "A Practical Minimal Perfect Hashing Method"
-// (http://homepages.dcc.ufmg.br/~nivio/papers/wea05.pdf). Given a path string
-// in the form /<module>/<package>/<base>.<extension>  return the resource location
-// information;
-//
-//     redirectIndex = hash(path, DEFAULT_SEED) % table_length;
-//     redirect = redirectTable[redirectIndex];
-//     if (redirect == 0) return not found;
-//     locationIndex = redirect < 0 ? -1 - redirect : hash(path, redirect) % table_length;
-//     location = locationTable[locationIndex];
-//     if (!verify(location, path)) return not found;
-//     return location;
-//
-// Note: The hash function takes an initial seed value.  A different seed value
-// usually returns a different result for strings that would otherwise collide with
-// other seeds. The verify function guarantees the found resource location is
-// indeed the resource we are looking for.
-//
-// The following is the format of the index;
-//
-//         +-------------------+
-//         |   Redirect Table  |
-//         +-------------------+
-//         | Attribute Offsets |
-//         +-------------------+
-//         |   Attribute Data  |
-//         +-------------------+
-//         |      Strings      |
-//         +-------------------+
-//
-// Redirect Table - Array of 32-bit signed values representing actions that
-//                  should take place for hashed strings that map to that
-//                  value.  Negative values indicate no hash collision and can be
-//                  quickly converted to indices into attribute offsets.  Positive
-//                  values represent a new seed for hashing an index into attribute
-//                  offsets.  Zero indicates not found.
-// Attribute Offsets - Array of 32-bit unsigned values representing offsets into
-//                     attribute data.  Attribute offsets can be iterated to do a
-//                     full survey of resources in the image.  Offset of zero
-//                     indicates no attributes.
-// Attribute Data - Bytes representing compact attribute data for locations. (See
-//                  comments in ImageLocation.)
-// Strings - Collection of zero terminated UTF-8 strings used by the index and
-//           image meta data.  Each string is accessed by offset.  Each string is
-//           unique.  Offset zero is reserved for the empty string.
-//
-// Note that the memory mapped index assumes 32 bit alignment of each component
-// in the index.
-//
-// Endianness of an image.
-// An image booted by hotspot is always in native endian.  However, it is possible
-// to read (by the JDK) in alternate endian format.  Primarily, this is during
-// cross platform scenarios.  Ex, where javac needs to read an embedded image
-// to access classes for crossing compilation.
-//
-
-class ImageFileReader; // forward declaration
-
-// Manage image file string table.
-class ImageStrings VALUE_OBJ_CLASS_SPEC {
-private:
-  u1* _data; // Data bytes for strings.
-  u4 _size; // Number of bytes in the string table.
-public:
-  enum {
-    // Not found result from find routine.
-    NOT_FOUND = -1,
-    // Prime used to generate hash for Perfect Hashing.
-    HASH_MULTIPLIER = 0x01000193
-  };
-
-  ImageStrings(u1* data, u4 size) : _data(data), _size(size) {}
-
-  // Return the UTF-8 string beginning at offset.
-  inline const char* get(u4 offset) const {
-    guarantee(offset < _size, "offset exceeds string table size");
-    return (const char*)(_data + offset);
-  }
-
-  // Compute the Perfect Hashing hash code for the supplied UTF-8 string.
-  inline static u4 hash_code(const char* string) {
-    return hash_code(string, HASH_MULTIPLIER);
-  }
-
-  // Compute the Perfect Hashing hash code for the supplied string, starting at seed.
-  static s4 hash_code(const char* string, s4 seed);
-
-  // Match up a string in a perfect hash table.  Result still needs validation
-  // for precise match.
-  static s4 find(Endian* endian, const char* name, s4* redirect, u4 length);
-
-  // Test to see if UTF-8 string begins with the start UTF-8 string.  If so,
-  // return non-NULL address of remaining portion of string.  Otherwise, return
-  // NULL.  Used to test sections of a path without copying from image string
-  // table.
-  static const char* starts_with(const char* string, const char* start);
-
-  // Test to see if UTF-8 string begins with start char.  If so, return non-NULL
-  // address of remaining portion of string.  Otherwise, return NULL.  Used
-  // to test a character of a path without copying.
-  inline static const char* starts_with(const char* string, const char ch) {
-    return *string == ch ? string + 1 : NULL;
-  }
-};
-
-// Manage image file location attribute data.  Within an image, a location's
-// attributes are compressed into a stream of bytes.  An attribute stream is
-// composed of individual attribute sequences.  Each attribute sequence begins with
-// a header byte containing the attribute 'kind' (upper 5 bits of header) and the
-// 'length' less 1 (lower 3 bits of header) of bytes that follow containing the
-// attribute value.  Attribute values present as most significant byte first.
-//
-// Ex. Container offset (ATTRIBUTE_OFFSET) 0x33562 would be represented as 0x22
-// (kind = 4, length = 3), 0x03, 0x35, 0x62.
-//
-// An attribute stream is terminated with a header kind of ATTRIBUTE_END (header
-// byte of zero.)
-//
-// ImageLocation inflates the stream into individual values stored in the long
-// array _attributes. This allows an attribute value can be quickly accessed by
-// direct indexing. Unspecified values default to zero.
-//
-// Notes:
-//  - Even though ATTRIBUTE_END is used to mark the end of the attribute stream,
-//    streams will contain zero byte values to represent lesser significant bits.
-//    Thus, detecting a zero byte is not sufficient to detect the end of an attribute
-//    stream.
-//  - ATTRIBUTE_OFFSET represents the number of bytes from the beginning of the region
-//    storing the resources.  Thus, in an image this represents the number of bytes
-//    after the index.
-//  - Currently, compressed resources are represented by having a non-zero
-//    ATTRIBUTE_COMPRESSED value.  This represents the number of bytes stored in the
-//    image, and the value of ATTRIBUTE_UNCOMPRESSED represents number of bytes of the
-//    inflated resource in memory. If the ATTRIBUTE_COMPRESSED is zero then the value
-//    of ATTRIBUTE_UNCOMPRESSED represents both the number of bytes in the image and
-//    in memory.  In the future, additional compression techniques will be used and
-//    represented differently.
-//  - Package strings include trailing slash and extensions include prefix period.
-//
-class ImageLocation VALUE_OBJ_CLASS_SPEC {
-public:
-  enum {
-    ATTRIBUTE_END,          // End of attribute stream marker
-    ATTRIBUTE_MODULE,       // String table offset of module name
-    ATTRIBUTE_PARENT,       // String table offset of resource path parent
-    ATTRIBUTE_BASE,         // String table offset of resource path base
-    ATTRIBUTE_EXTENSION,    // String table offset of resource path extension
-    ATTRIBUTE_OFFSET,       // Container byte offset of resource
-    ATTRIBUTE_COMPRESSED,   // In image byte size of the compressed resource
-    ATTRIBUTE_UNCOMPRESSED, // In memory byte size of the uncompressed resource
-    ATTRIBUTE_COUNT         // Number of attribute kinds
-  };
-
-private:
-  // Values of inflated attributes.
-  u8 _attributes[ATTRIBUTE_COUNT];
-
-  // Return the attribute value number of bytes.
-  inline static u1 attribute_length(u1 data) {
-    return (data & 0x7) + 1;
-  }
-
-  // Return the attribute kind.
-  inline static u1 attribute_kind(u1 data) {
-    u1 kind = data >> 3;
-    guarantee(kind < ATTRIBUTE_COUNT, "invalid attribute kind");
-    return kind;
-  }
-
-  // Return the attribute length.
-  inline static u8 attribute_value(u1* data, u1 n) {
-    guarantee(0 < n && n <= 8, "invalid attribute value length");
-    u8 value = 0;
-    // Most significant bytes first.
-    for (u1 i = 0; i < n; i++) {
-      value <<= 8;
-      value |= data[i];
-    }
-    return value;
-  }
-
-public:
-  ImageLocation() {
-    clear_data();
-  }
-
-  ImageLocation(u1* data) {
-    clear_data();
-    set_data(data);
-  }
-
-  // Inflates the attribute stream into individual values stored in the long
-  // array _attributes. This allows an attribute value to be quickly accessed by
-  // direct indexing. Unspecified values default to zero.
-  void set_data(u1* data);
-
-  // Zero all attribute values.
-  void clear_data();
-
-  // Retrieve an attribute value from the inflated array.
-  inline u8 get_attribute(u1 kind) const {
-    guarantee(ATTRIBUTE_END < kind && kind < ATTRIBUTE_COUNT, "invalid attribute kind");
-    return _attributes[kind];
-  }
-
-  // Retrieve an attribute string value from the inflated array.
-  inline const char* get_attribute(u4 kind, const ImageStrings& strings) const {
-    return strings.get((u4)get_attribute(kind));
-  }
-};
-
-//
-// NOTE: needs revision.
-// Each loader requires set of module meta data to identify which modules and
-// packages are managed by that loader.  Currently, there is one image file per
-// builtin loader, so only one  module meta data resource per file.
-//
-// Each element in the module meta data is a native endian 4 byte integer.  Note
-// that entries with zero offsets for string table entries should be ignored (
-// padding for hash table lookup.)
-//
-// Format:
-//    Count of package to module entries
-//    Count of module to package entries
-//    Perfect Hash redirect table[Count of package to module entries]
-//    Package to module entries[Count of package to module entries]
-//        Offset to package name in string table
-//        Offset to module name in string table
-//    Perfect Hash redirect table[Count of module to package entries]
-//    Module to package entries[Count of module to package entries]
-//        Offset to module name in string table
-//        Count of packages in module
-//        Offset to first package in packages table
-//    Packages[]
-//        Offset to package name in string table
-//
-// Manage the image module meta data.
-class ImageModuleData : public CHeapObj<mtClass> {
-  class Header VALUE_OBJ_CLASS_SPEC {
-  private:
-    u4 _ptm_count;      // Count of package to module entries
-    u4 _mtp_count;      // Count of module to package entries
-  public:
-    inline u4 ptm_count(Endian* endian) const { return endian->get(_ptm_count); }
-    inline u4 mtp_count(Endian* endian) const { return endian->get(_mtp_count); }
-  };
-
-  // Hashtable entry
-  class HashData VALUE_OBJ_CLASS_SPEC {
-  private:
-    u4 _name_offset;    // Name offset in string table
-  public:
-    inline s4 name_offset(Endian* endian) const { return endian->get(_name_offset); }
-  };
-
-  // Package to module hashtable entry
-  class PTMData : public HashData {
-  private:
-    u4 _module_name_offset; // Module name offset in string table
-  public:
-    inline s4 module_name_offset(Endian* endian) const { return endian->get(_module_name_offset); }
-  };
-
-  // Module to package hashtable entry
-  class MTPData : public HashData {
-  private:
-    u4 _package_count;     // Number of packages in module
-    u4 _package_offset;    // Offset in package list
-  public:
-    inline u4 package_count(Endian* endian)  const { return endian->get(_package_count); }
-    inline u4 package_offset(Endian* endian) const { return endian->get(_package_offset); }
-  };
-
-  const ImageFileReader* _image_file; // Source image file
-  Endian* _endian;                    // Endian handler
-  ImageStrings _strings;              // Image file strings
-  u1* _data;                          // Module data resource data
-  u8 _data_size;                      // Size of resource data
-  Header* _header;                    // Module data header
-  s4* _ptm_redirect;                  // Package to module hashtable redirect
-  PTMData* _ptm_data;                 // Package to module data
-  s4* _mtp_redirect;                  // Module to packages hashtable redirect
-  MTPData* _mtp_data;                 // Module to packages data
-  s4* _mtp_packages;                  // Package data (name offsets)
-
-  // Return a string from the string table.
-  inline const char* get_string(u4 offset) {
-    return _strings.get(offset);
-  }
-
-  inline u4 mtp_package(u4 index) {
-    return _endian->get(_mtp_packages[index]);
-  }
-
-public:
-  ImageModuleData(const ImageFileReader* image_file, const char* module_data_name);
-  ~ImageModuleData();
-
-  // Return the name of the module data resource.
-  static void module_data_name(char* buffer, const char* image_file_name);
-
-  // Return the module in which a package resides.  Returns NULL if not found.
-  const char* package_to_module(const char* package_name);
-
-  // Returns all the package names in a module.  Returns NULL if module not found.
-  GrowableArray<const char*>* module_to_packages(const char* module_name);
-};
-
-// Image file header, starting at offset 0.
-class ImageHeader VALUE_OBJ_CLASS_SPEC {
-private:
-  u4 _magic;           // Image file marker
-  u4 _version;         // Image file major version number
-  u4 _flags;           // Image file flags
-  u4 _resource_count;  // Number of resources in file
-  u4 _table_length;    // Number of slots in index tables
-  u4 _locations_size;  // Number of bytes in attribute table
-  u4 _strings_size;    // Number of bytes in string table
-
-public:
-  u4 magic() const { return _magic; }
-  u4 magic(Endian* endian) const { return endian->get(_magic); }
-  void set_magic(Endian* endian, u4 magic) { return endian->set(_magic, magic); }
-
-  u4 major_version(Endian* endian) const { return endian->get(_version) >> 16; }
-  u4 minor_version(Endian* endian) const { return endian->get(_version) & 0xFFFF; }
-  void set_version(Endian* endian, u4 major_version, u4 minor_version) {
-    return endian->set(_version, major_version << 16 | minor_version);
-  }
-
-  u4 flags(Endian* endian) const { return endian->get(_flags); }
-  void set_flags(Endian* endian, u4 value) { return endian->set(_flags, value); }
-
-  u4 resource_count(Endian* endian) const { return endian->get(_resource_count); }
-  void set_resource_count(Endian* endian, u4 count) { return endian->set(_resource_count, count); }
-
-  u4 table_length(Endian* endian) const { return endian->get(_table_length); }
-  void set_table_length(Endian* endian, u4 count) { return endian->set(_table_length, count); }
-
-  u4 locations_size(Endian* endian) const { return endian->get(_locations_size); }
-  void set_locations_size(Endian* endian, u4 size) { return endian->set(_locations_size, size); }
-
-  u4 strings_size(Endian* endian) const { return endian->get(_strings_size); }
-  void set_strings_size(Endian* endian, u4 size) { return endian->set(_strings_size, size); }
-};
-
-// Max path length limit independent of platform.  Windows max path is 1024,
-// other platforms use 4096.  The JCK fails several tests when 1024 is used.
-#define IMAGE_MAX_PATH 4096
-
-// Manage the image file.
-// ImageFileReader manages the content of an image file.
-// Initially, the header of the image file is read for validation.  If valid,
-// values in the header are used calculate the size of the image index.  The
-// index is then memory mapped to allow load on demand and sharing.  The
-// -XX:+MemoryMapImage flag determines if the entire file is loaded (server use.)
-// An image can be used by Hotspot and multiple reference points in the JDK, thus
-// it is desirable to share a reader.  To accomodate sharing, a share table is
-// defined (see ImageFileReaderTable in imageFile.cpp)  To track the number of
-// uses, ImageFileReader keeps a use count (_use).  Use is incremented when
-// 'opened' by reference point and decremented when 'closed'.  Use of zero
-// leads the ImageFileReader to be actually closed and discarded.
-class ImageFileReader : public CHeapObj<mtClass> {
-private:
-  // Manage a number of image files such that an image can be shared across
-  // multiple uses (ex. loader.)
-  static GrowableArray<ImageFileReader*>* _reader_table;
-
-  char* _name;         // Name of image
-  s4 _use;             // Use count
-  int _fd;             // File descriptor
-  Endian* _endian;     // Endian handler
-  u8 _file_size;       // File size in bytes
-  ImageHeader _header; // Image header
-  size_t _index_size;  // Total size of index
-  u1* _index_data;     // Raw index data
-  s4* _redirect_table; // Perfect hash redirect table
-  u4* _offsets_table;  // Location offset table
-  u1* _location_bytes; // Location attributes
-  u1* _string_bytes;   // String table
-
-  ImageFileReader(const char* name, bool big_endian);
-  ~ImageFileReader();
-
-  // Compute number of bytes in image file index.
-  inline u8 index_size() {
-    return sizeof(ImageHeader) +
-      table_length() * sizeof(u4) * 2 + locations_size() + strings_size();
-  }
-
-public:
-  enum {
-    // Image file marker.
-    IMAGE_MAGIC = 0xCAFEDADA,
-    // Endian inverted Image file marker.
-    IMAGE_MAGIC_INVERT = 0xDADAFECA,
-    // Image file major version number.
-    MAJOR_VERSION = 1,
-    // Image file minor version number.
-    MINOR_VERSION = 0
-  };
-
-  // Open an image file, reuse structure if file already open.
-  static ImageFileReader* open(const char* name, bool big_endian = Endian::is_big_endian());
-
-  // Close an image file if the file is not in use elsewhere.
-  static void close(ImageFileReader *reader);
-
-  // Return an id for the specifed ImageFileReader.
-  static u8 readerToID(ImageFileReader *reader);
-
-  // Validate the image id.
-  static bool idCheck(u8 id);
-
-  // Return an id for the specifed ImageFileReader.
-  static ImageFileReader* idToReader(u8 id);
-
-  // Open image file for read access.
-  bool open();
-
-  // Close image file.
-  void close();
-
-  // Read directly from the file.
-  bool read_at(u1* data, u8 size, u8 offset) const;
-
-  inline Endian* endian() const { return _endian; }
-
-  // Retrieve name of image file.
-  inline const char* name() const {
-    return _name;
-  }
-
-  // Retrieve size of image file.
-  inline u8 file_size() const {
-    return _file_size;
-  }
-
-  // Return first address of index data.
-  inline u1* get_index_address() const {
-    return _index_data;
-  }
-
-  // Return first address of resource data.
-  inline u1* get_data_address() const {
-    return _index_data + _index_size;
-  }
-
-  // Get the size of the index data.
-  size_t get_index_size() const {
-    return _index_size;
-  }
-
-  inline u4 table_length() const {
-    return _header.table_length(_endian);
-  }
-
-  inline u4 locations_size() const {
-    return _header.locations_size(_endian);
-  }
-
-  inline u4 strings_size()const  {
-    return _header.strings_size(_endian);
-  }
-
-  inline u4* offsets_table() const {
-    return _offsets_table;
-  }
-
-  // Increment use count.
-  inline void inc_use() {
-    _use++;
-  }
-
-  // Decrement use count.
-  inline bool dec_use() {
-    return --_use == 0;
-  }
-
-  // Return a string table accessor.
-  inline const ImageStrings get_strings() const {
-    return ImageStrings(_string_bytes, _header.strings_size(_endian));
-  }
-
-  // Return location attribute stream at offset.
-  inline u1* get_location_offset_data(u4 offset) const {
-    guarantee((u4)offset < _header.locations_size(_endian),
-              "offset exceeds location attributes size");
-    return offset != 0 ? _location_bytes + offset : NULL;
-  }
-
-  // Return location attribute stream for location i.
-  inline u1* get_location_data(u4 index) const {
-    guarantee((u4)index < _header.table_length(_endian),
-              "index exceeds location count");
-    u4 offset = _endian->get(_offsets_table[index]);
-
-    return get_location_offset_data(offset);
-  }
-
-  // Find the location attributes associated with the path.  Returns true if
-  // the location is found, false otherwise.
-  bool find_location(const char* path, ImageLocation& location) const;
-
-  // Assemble the location path.
-  void location_path(ImageLocation& location, char* path, size_t max) const;
-
-  // Verify that a found location matches the supplied path.
-  bool verify_location(ImageLocation& location, const char* path) const;
-
-  // Return the resource for the supplied path.
-  void get_resource(ImageLocation& location, u1* uncompressed_data) const;
-};
-#endif // SHARE_VM_CLASSFILE_IMAGEFILE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/jimage.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "jni.h"
+
+// Opaque reference to a JImage file.
+class JImageFile;
+// Opaque reference to an image file resource location.
+typedef jlong JImageLocationRef;
+
+// Max path length limit independent of platform.  Windows max path is 1024,
+// other platforms use 4096.  The JCK fails several tests when 1024 is used.
+#define JIMAGE_MAX_PATH 4096
+
+// JImage Error Codes
+
+// The image file is not prefixed with 0xCAFEDADA
+#define JIMAGE_BAD_MAGIC (-1)
+// The image file does not have a compatible (translatable) version
+#define JIMAGE_BAD_VERSION (-2)
+// The image file content is malformed
+#define JIMAGE_CORRUPTED (-3)
+
+/*
+ * JImageOpen - Given the supplied full path file name, open an image file. This
+ * function will also initialize tables and retrieve meta-data necessary to
+ * satisfy other functions in the API. If the image file has been previously
+ * open, a new open request will share memory and resources used by the previous
+ * open. A call to JImageOpen should be balanced by a call to JImageClose, to
+ * release memory and resources used. If the image file is not found or cannot
+ * be open, then NULL is returned and error will contain a reason for the
+ * failure; a positive value for a system error number, negative for a jimage
+ * specific error (see JImage Error Codes.)
+ *
+ *  Ex.
+ *   jint error;
+ *   JImageFile* jimage = (*JImageOpen)(JAVA_HOME "lib/modules/bootmodules.jimage", &error);
+ *   if (image == NULL) {
+ *     tty->print_cr("JImage failed to open: %d", error);
+ *     ...
+ *   }
+ *   ...
+ */
+
+extern "C" JImageFile* JIMAGE_Open(const char *name, jint* error);
+
+typedef JImageFile* (*JImageOpen_t)(const char *name, jint* error);
+
+/*
+ * JImageClose - Given the supplied open image file (see JImageOpen), release
+ * memory and resources used by the open file and close the file. If the image
+ * file is shared by other uses, release and close is deferred until the last use
+ * is also closed.
+ *
+ * Ex.
+ *  (*JImageClose)(image);
+ */
+
+extern "C" void JIMAGE_Close(JImageFile* jimage);
+
+typedef void (*JImageClose_t)(JImageFile* jimage);
+
+
+/*
+ * JImagePackageToModule - Given an open image file (see JImageOpen) and the name
+ * of a package, return the name of module where the package resides. If the
+ * package does not exist in the image file, the function returns NULL.
+ * The resulting string does/should not have to be released. All strings are
+ * utf-8, zero byte terminated.
+ *
+ * Ex.
+ *  const char* package = (*JImagePackageToModule)(image, "java/lang");
+ *  tty->print_cr(package);
+ *  —> java.base
+ */
+
+extern "C" const char * JIMAGE_PackageToModule(JImageFile* jimage, const char* package_name);
+
+typedef const char* (*JImagePackageToModule_t)(JImageFile* jimage, const char* package_name);
+
+
+/*
+ * JImageFindResource - Given an open image file (see JImageOpen), a module
+ * name, a version string and the name of a class/resource, return location
+ * information describing the resource and its size. If no resource is found, the
+ * function returns JIMAGE_NOT_FOUND and the value of size is undefined.
+ * The version number should be "9.0" and is not used in locating the resource.
+ * The resulting location does/should not have to be released.
+ * All strings are utf-8, zero byte terminated.
+ *
+ *  Ex.
+ *   jlong size;
+ *   JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
+ */
+extern "C" JImageLocationRef JIMAGE_FindResource(JImageFile* jimage,
+        const char* module_name, const char* version, const char* name,
+        jlong* size);
+
+typedef JImageLocationRef(*JImageFindResource_t)(JImageFile* jimage,
+        const char* module_name, const char* version, const char* name,
+        jlong* size);
+
+
+/*
+ * JImageGetResource - Given an open image file (see JImageOpen), a resource’s
+ * location information (see JImageFindResource), a buffer of appropriate
+ * size and the size, retrieve the bytes associated with the
+ * resource. If the size is less than the resource size then the read is truncated.
+ * If the size is greater than the resource size then the remainder of the buffer
+ * is zero filled.  The function will return the actual size of the resource.
+ *
+ * Ex.
+ *  jlong size;
+ *  JImageLocationRef location = (*JImageFindResource)(image, "java.base", "9.0", "java/lang/String.class", &size);
+ *  char* buffer = new char[size];
+ *  (*JImageGetResource)(image, location, buffer, size);
+ */
+extern "C" jlong JIMAGE_GetResource(JImageFile* jimage, JImageLocationRef location,
+        char* buffer, jlong size);
+
+typedef jlong(*JImageGetResource_t)(JImageFile* jimage, JImageLocationRef location,
+        char* buffer, jlong size);
+
+
+/*
+ * JImageResourceIterator - Given an open image file (see JImageOpen), a visitor
+ * function and a visitor argument, iterator through each of the image's resources.
+ * The visitor function is called with the image file, the module name, the
+ * package name, the base name, the extension and the visitor argument. The return
+ * value of the visitor function should be true, unless an early iteration exit is
+ * required. All strings are utf-8, zero byte terminated.file.
+ *
+ * Ex.
+ *   bool ctw_visitor(JImageFile* jimage, const char* module_name, const char* version, const char* package, const char* name, const char* extension, void* arg) {
+ *     if (strcmp(extension, “class”) == 0) {
+ *       char path[JIMAGE_MAX_PATH];
+ *       Thread* THREAD = Thread::current();
+ *       jio_snprintf(path, JIMAGE_MAX_PATH - 1, "/%s/%s", package, name);
+ *       ClassLoader::compile_the_world_in(path, (Handle)arg, THREAD);
+ *       return !HAS_PENDING_EXCEPTION;
+ *     }
+ *     return true;
+ *   }
+ *   (*JImageResourceIterator)(image, ctw_visitor, loader);
+ */
+
+typedef bool (*JImageResourceVisitor_t)(JImageFile* jimage,
+        const char* module_name, const char* version, const char* package,
+        const char* name, const char* extension, void* arg);
+
+extern "C" void JIMAGE_ResourceIterator(JImageFile* jimage,
+        JImageResourceVisitor_t visitor, void *arg);
+
+typedef void (*JImageResourceIterator_t)(JImageFile* jimage,
+        JImageResourceVisitor_t visitor, void* arg);
--- a/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -66,7 +66,8 @@
   virtual void do_klass(Klass* k);
   void do_klass_nv(Klass* k);
 
-  virtual void do_class_loader_data(ClassLoaderData* cld);
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
+  void do_cld_nv(ClassLoaderData* cld);
 };
 
 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
--- a/hotspot/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -50,11 +50,11 @@
 
 inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
   ClassLoaderData* cld = k->class_loader_data();
-  do_class_loader_data(cld);
+  do_cld_nv(cld);
 }
 inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
 
-inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
+inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
   assert(_klass_closure._oop_closure == this, "Must be");
 
   bool claim = true;  // Must claim the class loader data before processing.
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -702,7 +702,7 @@
         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
                     oop(bottom)) &&                                             \
         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
       bottom += _cfls->adjustObjectSize(word_sz);                               \
     } else {                                                                    \
       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
@@ -729,7 +729,7 @@
         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
                     oop(bottom)) &&                                             \
         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
       bottom += _cfls->adjustObjectSize(word_sz);                               \
     } else {                                                                    \
       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
@@ -2989,7 +2989,7 @@
   assert(task_size > CardTableModRefBS::card_size_in_words &&
          (task_size %  CardTableModRefBS::card_size_in_words == 0),
          "Otherwise arithmetic below would be incorrect");
-  MemRegion span = _gen->reserved();
+  MemRegion span = _old_gen->reserved();
   if (low != NULL) {
     if (span.contains(low)) {
       // Align low down to  a card boundary so that
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -99,7 +99,7 @@
   BlockOffsetArrayNonContigSpace _bt;
 
   CMSCollector* _collector;
-  ConcurrentMarkSweepGeneration* _gen;
+  ConcurrentMarkSweepGeneration* _old_gen;
 
   // Data structures for free blocks (used during allocation/sweeping)
 
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -212,7 +212,7 @@
                                            use_adaptive_freelists,
                                            dictionaryChoice);
   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
-  _cmsSpace->_gen = this;
+  _cmsSpace->_old_gen = this;
 
   _gc_stats = new CMSGCStats();
 
@@ -359,13 +359,13 @@
                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   if (cms_free > expected_promotion) {
     // Start a cms collection if there isn't enough space to promote
-    // for the next minor collection.  Use the padded average as
+    // for the next young collection.  Use the padded average as
     // a safety factor.
     cms_free -= expected_promotion;
 
     // Adjust by the safety factor.
     double cms_free_dbl = (double)cms_free;
-    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
+    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
     // Apply a further correction factor which tries to adjust
     // for recent occurance of concurrent mode failures.
     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
@@ -531,7 +531,7 @@
   if (CMSConcurrentMTEnabled) {
     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
       // just for now
-      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
+      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
     }
     if (ConcGCThreads > 1) {
       _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
@@ -592,7 +592,7 @@
   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 
   // Clip CMSBootstrapOccupancy between 0 and 100.
-  _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
+  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
 
   // Now tell CMS generations the identity of their collector
   ConcurrentMarkSweepGeneration::set_collector(this);
@@ -613,7 +613,7 @@
     _end_addr = gch->end_addr();
     assert(_young_gen != NULL, "no _young_gen");
     _eden_chunk_index = 0;
-    _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
+    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
   }
 
@@ -795,29 +795,22 @@
       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
-      gclog_or_tty->print_cr("  Desired free fraction %f",
-              desired_free_percentage);
-      gclog_or_tty->print_cr("  Maximum free fraction %f",
-              maximum_free_percentage);
-      gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity()/1000);
-      gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT,
-              desired_capacity/1000);
+      gclog_or_tty->print_cr("  Desired free fraction %f", desired_free_percentage);
+      gclog_or_tty->print_cr("  Maximum free fraction %f", maximum_free_percentage);
+      gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity() / 1000);
+      gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
       GenCollectedHeap* gch = GenCollectedHeap::heap();
       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
       size_t young_size = gch->young_gen()->capacity();
       gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
-      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT,
-              unsafe_max_alloc_nogc()/1000);
-      gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT,
-              contiguous_available()/1000);
-      gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)",
-              expand_bytes);
+      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
+      gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
+      gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
     }
     // safe if expansion fails
     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
     if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("  Expanded free fraction %f",
-        ((double) free()) / capacity());
+      gclog_or_tty->print_cr("  Expanded free fraction %f", ((double) free()) / capacity());
     }
   } else {
     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
@@ -834,16 +827,14 @@
   return cmsSpace()->freelistLock();
 }
 
-HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
-                                                  bool   tlab) {
+HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
   CMSSynchronousYieldRequest yr;
-  MutexLockerEx x(freelistLock(),
-                  Mutex::_no_safepoint_check_flag);
+  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
   return have_lock_and_allocate(size, tlab);
 }
 
 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
-                                                  bool   tlab /* ignored */) {
+                                                                bool   tlab /* ignored */) {
   assert_lock_strong(freelistLock());
   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
   HeapWord* res = cmsSpace()->allocate(adjustedSize);
@@ -2426,7 +2417,7 @@
 
     gch->gen_process_roots(&srs,
                            GenCollectedHeap::OldGen,
-                           true,   // younger gens are roots
+                           true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
                            &notOlder,
@@ -2498,7 +2489,7 @@
 
     gch->gen_process_roots(&srs,
                            GenCollectedHeap::OldGen,
-                           true,   // younger gens are roots
+                           true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
                            &notOlder,
@@ -2952,12 +2943,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
   assert(_collectorState == InitialMarking, "just checking");
 
-  // If there has not been a GC[n-1] since last GC[n] cycle completed,
-  // precede our marking with a collection of all
-  // younger generations to keep floating garbage to a minimum.
-  // XXX: we won't do this for now -- it's an optimization to be done later.
-
-  // already have locks
+  // Already have locks.
   assert_lock_strong(bitMapLock());
   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
 
@@ -3027,7 +3013,7 @@
 
       gch->gen_process_roots(&srs,
                              GenCollectedHeap::OldGen,
-                             true,   // younger gens are roots
+                             true,   // young gen as roots
                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
                              should_unload_classes(),
                              &notOlder,
@@ -3037,7 +3023,7 @@
   }
 
   // Clear mod-union table; it will be dirtied in the prologue of
-  // CMS generation per each younger generation collection.
+  // CMS generation per each young generation collection.
 
   assert(_modUnionTable.isAllClear(),
        "Was cleared in most recent final checkpoint phase"
@@ -3057,7 +3043,7 @@
   // assert(!SafepointSynchronize::is_at_safepoint(),
   //        "inconsistent argument?");
   // However that wouldn't be right, because it's possible that
-  // a safepoint is indeed in progress as a younger generation
+  // a safepoint is indeed in progress as a young generation
   // stop-the-world GC happens even as we mark in this generation.
   assert(_collectorState == Marking, "inconsistent state?");
   check_correct_thread_executing();
@@ -3065,7 +3051,7 @@
 
   // Weak ref discovery note: We may be discovering weak
   // refs in this generation concurrent (but interleaved) with
-  // weak ref discovery by a younger generation collector.
+  // weak ref discovery by the young generation collector.
 
   CMSTokenSyncWithLocks ts(true, bitMapLock());
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
@@ -3095,7 +3081,7 @@
 
   // Note that when we do a marking step we need to hold the
   // bit map lock -- recall that direct allocation (by mutators)
-  // and promotion (by younger generation collectors) is also
+  // and promotion (by the young generation collector) is also
   // marking the bit map. [the so-called allocate live policy.]
   // Because the implementation of bit map marking is not
   // robust wrt simultaneous marking of bits in the same word,
@@ -4049,7 +4035,7 @@
 // one of these methods, please check the other method too.
 
 size_t CMSCollector::preclean_mod_union_table(
-  ConcurrentMarkSweepGeneration* gen,
+  ConcurrentMarkSweepGeneration* old_gen,
   ScanMarkedObjectsAgainCarefullyClosure* cl) {
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -4064,10 +4050,10 @@
   // generation, but we might potentially miss cards when the
   // generation is rapidly expanding while we are in the midst
   // of precleaning.
-  HeapWord* startAddr = gen->reserved().start();
-  HeapWord* endAddr   = gen->reserved().end();
-
-  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
+  HeapWord* startAddr = old_gen->reserved().start();
+  HeapWord* endAddr   = old_gen->reserved().end();
+
+  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
 
   size_t numDirtyCards, cumNumDirtyCards;
   HeapWord *nextAddr, *lastAddr;
@@ -4109,7 +4095,7 @@
       HeapWord* stop_point = NULL;
       stopTimer();
       // Potential yield point
-      CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
+      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
                                bitMapLock());
       startTimer();
       {
@@ -4117,7 +4103,7 @@
         verify_overflow_empty();
         sample_eden();
         stop_point =
-          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       }
       if (stop_point != NULL) {
         // The careful iteration stopped early either because it found an
@@ -4152,15 +4138,15 @@
 // below are largely identical; if you need to modify
 // one of these methods, please check the other method too.
 
-size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
+size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
   ScanMarkedObjectsAgainCarefullyClosure* cl) {
   // strategy: it's similar to precleamModUnionTable above, in that
   // we accumulate contiguous ranges of dirty cards, mark these cards
   // precleaned, then scan the region covered by these cards.
-  HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
-  HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
-
-  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
+  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
+  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
+
+  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
 
   size_t numDirtyCards, cumNumDirtyCards;
   HeapWord *lastAddr, *nextAddr;
@@ -4197,13 +4183,13 @@
 
     if (!dirtyRegion.is_empty()) {
       stopTimer();
-      CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
+      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
       startTimer();
       sample_eden();
       verify_work_stacks_empty();
       verify_overflow_empty();
       HeapWord* stop_point =
-        gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       if (stop_point != NULL) {
         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
                "Should only be AbortablePreclean.");
@@ -4623,7 +4609,7 @@
     ResourceMark rm;
     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
     for (int i = 0; i < array->length(); i++) {
-      par_mrias_cl.do_class_loader_data(array->at(i));
+      par_mrias_cl.do_cld_nv(array->at(i));
     }
 
     // We don't need to keep track of new CLDs anymore.
@@ -5086,7 +5072,7 @@
   // preclean phase did of eden, plus the [two] tasks of
   // scanning the [two] survivor spaces. Further fine-grain
   // parallelization of the scanning of the survivor spaces
-  // themselves, and of precleaning of the younger gen itself
+  // themselves, and of precleaning of the young gen itself
   // is deferred to the future.
   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
 
@@ -5177,7 +5163,7 @@
 
     gch->gen_process_roots(&srs,
                            GenCollectedHeap::OldGen,
-                           true,  // younger gens as roots
+                           true,  // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
                            &mrias_cl,
@@ -5199,7 +5185,7 @@
     ResourceMark rm;
     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
     for (int i = 0; i < array->length(); i++) {
-      mrias_cl.do_class_loader_data(array->at(i));
+      mrias_cl.do_cld_nv(array->at(i));
     }
 
     // We don't need to keep track of new CLDs anymore.
@@ -5661,7 +5647,7 @@
   }
 }
 
-void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
+void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
   // We iterate over the space(s) underlying this generation,
   // checking the mark bit map to see if the bits corresponding
   // to specific blocks are marked or not. Blocks that are
@@ -5690,26 +5676,26 @@
   // check that we hold the requisite locks
   assert(have_cms_token(), "Should hold cms token");
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
-  assert_lock_strong(gen->freelistLock());
+  assert_lock_strong(old_gen->freelistLock());
   assert_lock_strong(bitMapLock());
 
   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
-  gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
-                                      _inter_sweep_estimate.padded_average(),
-                                      _intra_sweep_estimate.padded_average());
-  gen->setNearLargestChunk();
+  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+                                          _inter_sweep_estimate.padded_average(),
+                                          _intra_sweep_estimate.padded_average());
+  old_gen->setNearLargestChunk();
 
   {
-    SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
-    gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
+    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
+    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
     // We need to free-up/coalesce garbage/blocks from a
     // co-terminal free run. This is done in the SweepClosure
     // destructor; so, do not remove this scope, else the
     // end-of-sweep-census below will be off by a little bit.
   }
-  gen->cmsSpace()->sweep_completed();
-  gen->cmsSpace()->endSweepFLCensus(sweep_count());
+  old_gen->cmsSpace()->sweep_completed();
+  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
   if (should_unload_classes()) {                // unloaded classes this cycle,
     _concurrent_cycles_since_last_unload = 0;   // ... reset count
   } else {                                      // did not unload classes,
@@ -6324,12 +6310,12 @@
           // objArrays are precisely marked; restrict scanning
           // to dirty cards only.
           size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate(_scanningClosure, mr));
+                   p->oop_iterate_size(_scanningClosure, mr));
         } else {
           // A non-array may have been imprecisely marked; we need
           // to scan object in its entirety.
           size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate(_scanningClosure));
+                   p->oop_iterate_size(_scanningClosure));
         }
         #ifdef ASSERT
           size_t direct_size =
@@ -6417,7 +6403,7 @@
   // Note that we do not yield while we iterate over
   // the interior oops of p, pushing the relevant ones
   // on our marking stack.
-  size_t size = p->oop_iterate(_scanning_closure);
+  size_t size = p->oop_iterate_size(_scanning_closure);
   do_yield_check();
   // Observe that below, we do not abandon the preclean
   // phase as soon as we should; rather we empty the
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -723,7 +723,7 @@
 
  private:
   // Support for parallelizing young gen rescan in CMS remark phase
-  ParNewGeneration* _young_gen;  // the younger gen
+  ParNewGeneration* _young_gen;
 
   HeapWord** _top_addr;    // ... Top of Eden
   HeapWord** _end_addr;    // ... End of Eden
@@ -772,9 +772,9 @@
  private:
 
   // Concurrent precleaning work
-  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
+  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
-  size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
+  size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
                              ScanMarkedObjectsAgainCarefullyClosure* cl);
   // Does precleaning work, returning a quantity indicative of
   // the amount of "useful work" done.
@@ -797,7 +797,7 @@
   void refProcessingWork();
 
   // Concurrent sweeping work
-  void sweepWork(ConcurrentMarkSweepGeneration* gen);
+  void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
 
   // (Concurrent) resetting of support data structures
   void reset(bool concurrent);
@@ -1120,10 +1120,8 @@
   MemRegion used_region_at_save_marks() const;
 
   // Does a "full" (forced) collection invoked on this generation collect
-  // all younger generations as well? Note that the second conjunct is a
-  // hack to allow the collection of the younger gen first if the flag is
-  // set.
-  virtual bool full_collects_younger_generations() const {
+  // the young generation as well?
+  virtual bool full_collects_young_generation() const {
     return !ScavengeBeforeFullGC;
   }
 
@@ -1153,9 +1151,8 @@
 
   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
 
-  // Inform this (non-young) generation that a promotion failure was
-  // encountered during a collection of a younger generation that
-  // promotes into this generation.
+  // Inform this (old) generation that a promotion failure was
+  // encountered during a collection of the young generation.
   virtual void promotion_failure_occurred();
 
   bool should_collect(bool full, size_t size, bool tlab);
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -295,7 +295,7 @@
     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
   }
 
-  // If the younger gen collections were skipped, then the
+  // If the young gen collection was skipped, then the
   // number of promoted bytes will be 0 and adding it to the
   // average will incorrectly lessen the average.  It is, however,
   // also possible that no promotion was needed.
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -39,23 +39,17 @@
 
 // ======= Concurrent Mark Sweep Thread ========
 
-// The CMS thread is created when Concurrent Mark Sweep is used in the
-// older of two generations in a generational memory system.
+ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
+CMSCollector* ConcurrentMarkSweepThread::_collector         = NULL;
+bool ConcurrentMarkSweepThread::_should_terminate           = false;
+int  ConcurrentMarkSweepThread::_CMS_flag                   = CMS_nil;
 
-ConcurrentMarkSweepThread*
-     ConcurrentMarkSweepThread::_cmst     = NULL;
-CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
-bool ConcurrentMarkSweepThread::_should_terminate = false;
-int  ConcurrentMarkSweepThread::_CMS_flag         = CMS_nil;
+volatile jint ConcurrentMarkSweepThread::_pending_yields    = 0;
 
-volatile jint ConcurrentMarkSweepThread::_pending_yields      = 0;
-
-SurrogateLockerThread*
-     ConcurrentMarkSweepThread::_slt = NULL;
+SurrogateLockerThread* ConcurrentMarkSweepThread::_slt      = NULL;
 SurrogateLockerThread::SLT_msg_type
      ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
-Monitor*
-     ConcurrentMarkSweepThread::_sltMonitor = NULL;
+Monitor* ConcurrentMarkSweepThread::_sltMonitor             = NULL;
 
 ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
   : ConcurrentGCThread() {
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -69,20 +69,28 @@
                                        Stack<oop, mtGC>* overflow_stacks_,
                                        size_t desired_plab_sz_,
                                        ParallelTaskTerminator& term_) :
-  _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_),
-  _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
+  _to_space(to_space_),
+  _old_gen(old_gen_),
+  _young_gen(young_gen_),
+  _thread_num(thread_num_),
+  _work_queue(work_queue_set_->queue(thread_num_)),
+  _to_space_full(false),
   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
   _ageTable(false), // false ==> not the global age table, no perf data.
   _to_space_alloc_buffer(desired_plab_sz_),
-  _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this),
-  _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this),
+  _to_space_closure(young_gen_, this),
+  _old_gen_closure(young_gen_, this),
+  _to_space_root_closure(young_gen_, this),
+  _old_gen_root_closure(young_gen_, this),
   _older_gen_closure(young_gen_, this),
   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
                       &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
                       work_queue_set_, &term_),
-  _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this),
+  _is_alive_closure(young_gen_),
+  _scan_weak_ref_closure(young_gen_, this),
   _keep_alive_closure(&_scan_weak_ref_closure),
-  _strong_roots_time(0.0), _term_time(0.0)
+  _strong_roots_time(0.0),
+  _term_time(0.0)
 {
   #if TASKQUEUE_STATS
   _term_attempts = 0;
@@ -90,8 +98,7 @@
   _overflow_refill_objs = 0;
   #endif // TASKQUEUE_STATS
 
-  _survivor_chunk_array =
-    (ChunkArray*) old_gen()->get_data_recorder(thread_num());
+  _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
   _hash_seed = 17;  // Might want to take time-based random value.
   _start = os::elapsedTime();
   _old_gen_closure.set_generation(old_gen_);
@@ -154,7 +161,6 @@
   }
 }
 
-
 void ParScanThreadState::trim_queues(int max_size) {
   ObjToScanQueue* queue = work_queue();
   do {
@@ -222,15 +228,12 @@
 }
 
 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
-
-  // Otherwise, if the object is small enough, try to reallocate the
-  // buffer.
+  // If the object is small enough, try to reallocate the buffer.
   HeapWord* obj = NULL;
   if (!_to_space_full) {
     PLAB* const plab = to_space_alloc_buffer();
-    Space*            const sp   = to_space();
-    if (word_sz * 100 <
-        ParallelGCBufferWastePct * plab->word_sz()) {
+    Space* const sp  = to_space();
+    if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
       // Is small enough; abandon this buffer and start a new one.
       plab->retire();
       size_t buf_size = plab->word_sz();
@@ -241,8 +244,7 @@
         size_t free_bytes = sp->free();
         while(buf_space == NULL && free_bytes >= min_bytes) {
           buf_size = free_bytes >> LogHeapWordSize;
-          assert(buf_size == (size_t)align_object_size(buf_size),
-                 "Invariant");
+          assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
           buf_space  = sp->par_allocate(buf_size);
           free_bytes = sp->free();
         }
@@ -262,7 +264,6 @@
         // We're used up.
         _to_space_full = true;
       }
-
     } else {
       // Too large; allocate the object individually.
       obj = sp->par_allocate(word_sz);
@@ -271,7 +272,6 @@
   return obj;
 }
 
-
 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
   to_space_alloc_buffer()->undo_allocation(obj, word_sz);
 }
@@ -288,7 +288,7 @@
   // Initializes states for the specified number of threads;
   ParScanThreadStateSet(int                     num_threads,
                         Space&                  to_space,
-                        ParNewGeneration&       gen,
+                        ParNewGeneration&       young_gen,
                         Generation&             old_gen,
                         ObjToScanQueueSet&      queue_set,
                         Stack<oop, mtGC>*       overflow_stacks_,
@@ -315,21 +315,25 @@
 
 private:
   ParallelTaskTerminator& _term;
-  ParNewGeneration&       _gen;
+  ParNewGeneration&       _young_gen;
   Generation&             _old_gen;
  public:
   bool is_valid(int id) const { return id < length(); }
   ParallelTaskTerminator* terminator() { return &_term; }
 };
 
-
-ParScanThreadStateSet::ParScanThreadStateSet(
-  int num_threads, Space& to_space, ParNewGeneration& gen,
-  Generation& old_gen, ObjToScanQueueSet& queue_set,
-  Stack<oop, mtGC>* overflow_stacks,
-  size_t desired_plab_sz, ParallelTaskTerminator& term)
+ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
+                                             Space& to_space,
+                                             ParNewGeneration& young_gen,
+                                             Generation& old_gen,
+                                             ObjToScanQueueSet& queue_set,
+                                             Stack<oop, mtGC>* overflow_stacks,
+                                             size_t desired_plab_sz,
+                                             ParallelTaskTerminator& term)
   : ResourceArray(sizeof(ParScanThreadState), num_threads),
-    _gen(gen), _old_gen(old_gen), _term(term)
+    _young_gen(young_gen),
+    _old_gen(old_gen),
+    _term(term)
 {
   assert(num_threads > 0, "sanity check!");
   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
@@ -337,13 +341,12 @@
   // Initialize states.
   for (int i = 0; i < num_threads; ++i) {
     new ((ParScanThreadState*)_data + i)
-        ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
+        ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
                            overflow_stacks, desired_plab_sz, term);
   }
 }
 
-inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
-{
+inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
   assert(i >= 0 && i < length(), "sanity check!");
   return ((ParScanThreadState*)_data)[i];
 }
@@ -357,8 +360,7 @@
   }
 }
 
-void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed)
-{
+void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
   _term.reset_for_reuse(active_threads);
   if (promotion_failed) {
     for (int i = 0; i < length(); ++i) {
@@ -368,36 +370,27 @@
 }
 
 #if TASKQUEUE_STATS
-void
-ParScanThreadState::reset_stats()
-{
+void ParScanThreadState::reset_stats() {
   taskqueue_stats().reset();
   _term_attempts = 0;
   _overflow_refills = 0;
   _overflow_refill_objs = 0;
 }
 
-void ParScanThreadStateSet::reset_stats()
-{
+void ParScanThreadStateSet::reset_stats() {
   for (int i = 0; i < length(); ++i) {
     thread_state(i).reset_stats();
   }
 }
 
-void
-ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
-{
+void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
   st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- "
-                   "-------termination-------");
-  st->print_raw_cr("thr     ms        ms       %   "
-                   "    ms       %   attempts");
-  st->print_raw_cr("--- --------- --------- ------ "
-                   "--------- ------ --------");
+  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
+  st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
+  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
 }
 
-void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
-{
+void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
   print_termination_stats_hdr(st);
 
   for (int i = 0; i < length(); ++i) {
@@ -405,23 +398,20 @@
     const double elapsed_ms = pss.elapsed_time() * 1000.0;
     const double s_roots_ms = pss.strong_roots_time() * 1000.0;
     const double term_ms = pss.term_time() * 1000.0;
-    st->print_cr("%3d %9.2f %9.2f %6.2f "
-                 "%9.2f %6.2f " SIZE_FORMAT_W(8),
+    st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
                  i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
                  term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
   }
 }
 
 // Print stats related to work queue activity.
-void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
-{
+void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
   st->print_raw_cr("GC Task Stats");
   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 }
 
-void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
-{
+void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
   print_taskqueue_stats_hdr(st);
 
   TaskQueueStats totals;
@@ -443,8 +433,7 @@
 }
 #endif // TASKQUEUE_STATS
 
-void ParScanThreadStateSet::flush()
-{
+void ParScanThreadStateSet::flush() {
   // Work in this loop should be kept as lightweight as
   // possible since this might otherwise become a bottleneck
   // to scaling. Should we add heavy-weight work into this
@@ -454,12 +443,12 @@
 
     // Flush stats related to To-space PLAB activity and
     // retire the last buffer.
-    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats());
+    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
 
     // Every thread has its own age table.  We need to merge
     // them all into one.
     ageTable *local_table = par_scan_state.age_table();
-    _gen.age_table()->merge(local_table);
+    _young_gen.age_table()->merge(local_table);
 
     // Inform old gen that we're done.
     _old_gen.par_promote_alloc_done(i);
@@ -478,8 +467,7 @@
 
 ParScanClosure::ParScanClosure(ParNewGeneration* g,
                                ParScanThreadState* par_scan_state) :
-  OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
-{
+  OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
   _boundary = _g->reserved().end();
 }
 
@@ -531,24 +519,23 @@
   ObjToScanQueue* work_q = par_scan_state()->work_queue();
 
   while (true) {
-
     // Scan to-space and old-gen objs until we run out of both.
     oop obj_to_scan;
     par_scan_state()->trim_queues(0);
 
     // We have no local work, attempt to steal from other threads.
 
-    // attempt to steal work from promoted.
+    // Attempt to steal work from promoted.
     if (task_queues()->steal(par_scan_state()->thread_num(),
                              par_scan_state()->hash_seed(),
                              obj_to_scan)) {
       bool res = work_q->push(obj_to_scan);
       assert(res, "Empty queue should have room for a push.");
 
-      //   if successful, goto Start.
+      // If successful, goto Start.
       continue;
 
-      // try global overflow list.
+      // Try global overflow list.
     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
       continue;
     }
@@ -564,15 +551,17 @@
   par_scan_state()->end_term_time();
 }
 
-ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen,
-                             HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
+ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
+                             Generation* old_gen,
+                             HeapWord* young_old_boundary,
+                             ParScanThreadStateSet* state_set,
                              StrongRootsScope* strong_roots_scope) :
     AbstractGangTask("ParNewGeneration collection"),
     _young_gen(young_gen), _old_gen(old_gen),
     _young_old_boundary(young_old_boundary),
     _state_set(state_set),
     _strong_roots_scope(strong_roots_scope)
-  {}
+{}
 
 void ParNewGenTask::work(uint worker_id) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -595,8 +584,7 @@
   par_scan_state.start_strong_roots();
   gch->gen_process_roots(_strong_roots_scope,
                          GenCollectedHeap::YoungGen,
-                         true,  // Process younger gens, if any,
-                                // as strong roots.
+                         true,  // Process younger gens, if any, as strong roots.
                          GenCollectedHeap::SO_ScavengeCodeCache,
                          GenCollectedHeap::StrongAndWeakRoots,
                          &par_scan_state.to_space_root_closure(),
@@ -613,8 +601,7 @@
 #pragma warning( push )
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif
-ParNewGeneration::
-ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
+ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
   _overflow_list(NULL),
   _is_alive_closure(this),
@@ -625,20 +612,19 @@
   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 
-  for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
+  for (uint i = 0; i < ParallelGCThreads; i++) {
     ObjToScanQueue *q = new ObjToScanQueue();
     guarantee(q != NULL, "work_queue Allocation failure.");
-    _task_queues->register_queue(i1, q);
+    _task_queues->register_queue(i, q);
   }
 
-  for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
-    _task_queues->queue(i2)->initialize();
+  for (uint i = 0; i < ParallelGCThreads; i++) {
+    _task_queues->queue(i)->initialize();
+  }
 
   _overflow_stacks = NULL;
   if (ParGCUseLocalOverflow) {
-
-    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
-    // with ','
+    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
     typedef Stack<oop, mtGC> GCOopStack;
 
     _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
@@ -742,7 +728,7 @@
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 public:
   ParNewRefProcTaskProxy(ProcessTask& task,
-                         ParNewGeneration& gen,
+                         ParNewGeneration& young_gen,
                          Generation& old_gen,
                          HeapWord* young_old_boundary,
                          ParScanThreadStateSet& state_set);
@@ -768,11 +754,9 @@
     _old_gen(old_gen),
     _young_old_boundary(young_old_boundary),
     _state_set(state_set)
-{
-}
+{ }
 
-void ParNewRefProcTaskProxy::work(uint worker_id)
-{
+void ParNewRefProcTaskProxy::work(uint worker_id) {
   ResourceMark rm;
   HandleMark hm;
   ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
@@ -792,15 +776,12 @@
       _task(task)
   { }
 
-  virtual void work(uint worker_id)
-  {
+  virtual void work(uint worker_id) {
     _task.work(worker_id);
   }
 };
 
-
-void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
-{
+void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
@@ -812,8 +793,7 @@
                    _young_gen.promotion_failed());
 }
 
-void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
-{
+void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
@@ -821,8 +801,7 @@
   workers->run_task(&enq_task);
 }
 
-void ParNewRefProcTaskExecutor::set_single_threaded_mode()
-{
+void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
   _state_set.flush();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   gch->save_marks();
@@ -830,7 +809,8 @@
 
 ScanClosureWithParBarrier::
 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
-  ScanClosure(g, gc_barrier) {}
+  ScanClosure(g, gc_barrier)
+{ }
 
 EvacuateFollowersClosureGeneral::
 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
@@ -838,7 +818,7 @@
                                 OopsInGenClosure* older) :
   _gch(gch),
   _scan_cur_or_nonheap(cur), _scan_older(older)
-{}
+{ }
 
 void EvacuateFollowersClosureGeneral::do_void() {
   do {
@@ -850,7 +830,6 @@
   } while (!_gch->no_allocs_since_save_marks());
 }
 
-
 // A Generation that does parallel young-gen collection.
 
 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
@@ -996,9 +975,9 @@
     if (ZapUnusedHeapArea) {
       // This is now done here because of the piece-meal mangling which
       // can check for valid mangling at intermediate points in the
-      // collection(s).  When a minor collection fails to collect
+      // collection(s).  When a young collection fails to collect
       // sufficient space resizing of the young generation can occur
-      // an redistribute the spaces in the young generation.  Mangle
+      // and redistribute the spaces in the young generation.  Mangle
       // here so that unzapped regions don't get distributed to
       // other spaces.
       to()->mangle_unused_area();
@@ -1113,8 +1092,10 @@
 // thus avoiding the need to undo the copy as in
 // copy_to_survivor_space_avoiding_with_undo.
 
-oop ParNewGeneration::copy_to_survivor_space(
-        ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
+oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
+                                             oop old,
+                                             size_t sz,
+                                             markOop m) {
   // In the sequential version, this assert also says that the object is
   // not forwarded.  That might not be the case here.  It is the case that
   // the caller observed it to be not forwarded at some time in the past.
@@ -1141,8 +1122,7 @@
   }
 
   if (new_obj == NULL) {
-    // Either to-space is full or we decided to promote
-    // try allocating obj tenured
+    // Either to-space is full or we decided to promote try allocating obj tenured
 
     // Attempt to install a null forwarding pointer (atomically),
     // to claim the right to install the real forwarding pointer.
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -71,11 +71,7 @@
   ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
   ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
   ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
-  // One of these two will be passed to process_roots, which will
-  // set its generation.  The first is for two-gen configs where the
-  // old gen collects the perm gen; the second is for arbitrary configs.
-  // The second isn't used right now (it used to be used for the train, an
-  // incremental collector) but the declaration has been left as a reminder.
+  // Will be passed to process_roots to set its generation.
   ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
   // This closure will always be bound to the old gen; it will be used
   // in evacuate_followers.
@@ -85,7 +81,6 @@
   ParScanWeakRefClosure                _scan_weak_ref_closure;
   ParKeepAliveClosure                  _keep_alive_closure;
 
-
   Space* _to_space;
   Space* to_space() { return _to_space; }
 
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1143,7 +1143,7 @@
   while (curr < end) {
     Prefetch::read(curr, interval);
     oop obj = oop(curr);
-    int size = obj->oop_iterate(&cl);
+    int size = obj->oop_iterate_size(&cl);
     assert(size == obj->size(), "sanity");
     curr += size;
   }
--- a/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -367,7 +367,7 @@
   _max = _bottom + HeapRegion::min_region_size_in_words();
 
   // Tell mark-sweep that objects in this region are not to be marked.
-  G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
+  G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
 
   // Since we've modified the old set, call update_sizes.
   _g1h->g1mm()->update_sizes();
--- a/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1BlockOffsetTable.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/g1/g1BlockOffsetTable.hpp"
 #include "gc/g1/heapRegion.hpp"
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "gc/shared/space.hpp"
 
 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
@@ -68,15 +69,7 @@
   check_index(right, "right index out of range");
   assert(left <= right, "indexes out of order");
   size_t num_cards = right - left + 1;
-  if (UseMemSetInBOT) {
-    memset(&_offset_array[left], offset, num_cards);
-  } else {
-    size_t i = left;
-    const size_t end = i + num_cards;
-    for (; i < end; i++) {
-      _offset_array[i] = offset;
-    }
-  }
+  memset_with_concurrent_readers(&_offset_array[left], offset, num_cards);
 }
 
 // Variant of index_for that does not check the index for validity.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/nmethod.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "oops/oop.inline.hpp"
+
+template <typename T>
+void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
+  _work->do_oop(p);
+  T oop_or_narrowoop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(oop_or_narrowoop)) {
+    oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+    HeapRegion* hr = _g1h->heap_region_containing_raw(o);
+    assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
+    hr->add_strong_code_root(_nm);
+  }
+}
+
+void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(oop* o) {
+  do_oop_work(o);
+}
+
+void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop(narrowOop* o) {
+  do_oop_work(o);
+}
+
+void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) {
+  nmethod* nm = cb->as_nmethod_or_null();
+  if (nm != NULL) {
+    if (!nm->test_set_oops_do_mark()) {
+      _oc.set_nm(nm);
+      nm->oops_do(&_oc);
+      nm->fix_oop_relocations();
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "memory/iterator.hpp"
+
+class nmethod;
+
+class G1CodeBlobClosure : public CodeBlobClosure {
+  class HeapRegionGatheringOopClosure : public OopClosure {
+    G1CollectedHeap* _g1h;
+    OopClosure* _work;
+    nmethod* _nm;
+
+    template <typename T>
+    void do_oop_work(T* p);
+
+  public:
+    HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
+
+    void do_oop(oop* o);
+    void do_oop(narrowOop* o);
+
+    void set_nm(nmethod* nm) {
+      _nm = nm;
+    }
+  };
+
+  HeapRegionGatheringOopClosure _oc;
+public:
+  G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
+
+  void do_code_blob(CodeBlob* cb);
+};
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -65,6 +65,7 @@
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
+#include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -949,6 +950,7 @@
 }
 
 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MutexLockerEx x(Heap_lock);
@@ -1037,12 +1039,13 @@
     }
 
     // Notify mark-sweep of the archive range.
-    G1MarkSweep::mark_range_archive(curr_range);
+    G1MarkSweep::set_range_archive(curr_range, true);
   }
   return true;
 }
 
 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MemRegion reserved = _hrm.reserved();
@@ -1125,6 +1128,81 @@
   return result;
 }
 
+void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
+  assert(!is_init_completed(), "Expect to be called at JVM init time");
+  assert(ranges != NULL, "MemRegion array NULL");
+  assert(count != 0, "No MemRegions provided");
+  MemRegion reserved = _hrm.reserved();
+  HeapWord* prev_last_addr = NULL;
+  HeapRegion* prev_last_region = NULL;
+  size_t size_used = 0;
+  size_t uncommitted_regions = 0;
+
+  // For each Memregion, free the G1 regions that constitute it, and
+  // notify mark-sweep that the range is no longer to be considered 'archive.'
+  MutexLockerEx x(Heap_lock);
+  for (size_t i = 0; i < count; i++) {
+    HeapWord* start_address = ranges[i].start();
+    HeapWord* last_address = ranges[i].last();
+
+    assert(reserved.contains(start_address) && reserved.contains(last_address),
+           err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
+                   p2i(start_address), p2i(last_address)));
+    assert(start_address > prev_last_addr,
+           err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
+                   p2i(start_address), p2i(prev_last_addr)));
+    size_used += ranges[i].byte_size();
+    prev_last_addr = last_address;
+
+    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+
+    // Check for ranges that start in the same G1 region in which the previous
+    // range ended, and adjust the start address so we don't try to free
+    // the same region again. If the current range is entirely within that
+    // region, skip it.
+    if (start_region == prev_last_region) {
+      start_address = start_region->end();
+      if (start_address > last_address) {
+        continue;
+      }
+      start_region = _hrm.addr_to_region(start_address);
+    }
+    prev_last_region = last_region;
+
+    // After verifying that each region was marked as an archive region by
+    // alloc_archive_regions, set it free and empty and uncommit it.
+    HeapRegion* curr_region = start_region;
+    while (curr_region != NULL) {
+      guarantee(curr_region->is_archive(),
+                err_msg("Expected archive region at index %u", curr_region->hrm_index()));
+      uint curr_index = curr_region->hrm_index();
+      _old_set.remove(curr_region);
+      curr_region->set_free();
+      curr_region->set_top(curr_region->bottom());
+      if (curr_region != last_region) {
+        curr_region = _hrm.next_region_in_heap(curr_region);
+      } else {
+        curr_region = NULL;
+      }
+      _hrm.shrink_at(curr_index, 1);
+      uncommitted_regions++;
+    }
+
+    // Notify mark-sweep that this is no longer an archive range.
+    G1MarkSweep::set_range_archive(ranges[i], false);
+  }
+
+  if (uncommitted_regions != 0) {
+    ergo_verbose1(ErgoHeapSizing,
+                  "attempt heap shrinking",
+                  ergo_format_reason("uncommitted archive regions")
+                  ergo_format_byte("total size"),
+                  HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
+  }
+  decrease_used(size_used);
+}
+
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
                                                         uint* gc_count_before_ret,
                                                         uint* gclocker_retry_count_ret) {
@@ -2845,9 +2923,9 @@
 }
 
 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
-// must be smaller than the humongous object limit.
+// must be equal to the humongous object limit.
 size_t G1CollectedHeap::max_tlab_size() const {
-  return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
+  return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
 }
 
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
@@ -4051,7 +4129,9 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
+        g1_policy()->finalize_cset(target_pause_time_ms);
+
+        evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
 
         register_humongous_regions_with_cset();
 
@@ -4175,7 +4255,10 @@
         // investigate this in CR 7178365.
         double sample_end_time_sec = os::elapsedTime();
         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
-        g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
+        g1_policy()->record_collection_pause_end(pause_time_ms);
+
+        evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
+        evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
 
         MemoryService::track_memory_usage();
 
@@ -4501,8 +4584,7 @@
                  bool only_young, bool claim)
         : _oop_closure(oop_closure),
           _oop_in_klass_closure(oop_closure->g1(),
-                                oop_closure->pss(),
-                                oop_closure->rp()),
+                                oop_closure->pss()),
           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
           _claim(claim) {
 
@@ -4531,18 +4613,18 @@
       bool only_young = _g1h->collector_state()->gcs_are_young();
 
       // Non-IM young GC.
-      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
                                                                                only_young, // Only process dirty klasses.
                                                                                false);     // No need to claim CLDs.
       // IM young GC.
       //    Strong roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
                                                                                false, // Process all klasses.
                                                                                true); // Need to claim CLDs.
       //    Weak roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp);
+      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
                                                                                     false, // Process all klasses.
                                                                                     true); // Need to claim CLDs.
@@ -4582,9 +4664,9 @@
                                       worker_id);
 
       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
-      _root_processor->scan_remembered_sets(&push_heap_rs_cl,
-                                            weak_root_cl,
-                                            worker_id);
+      _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
+                                                      weak_root_cl,
+                                                      worker_id);
       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
 
       double term_sec = 0.0;
@@ -5241,9 +5323,9 @@
     G1ParScanThreadState*           pss = _pss[worker_id];
     pss->set_ref_processor(NULL);
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
@@ -5341,9 +5423,9 @@
     pss->set_ref_processor(NULL);
     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss, NULL);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
+    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
+
+    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
@@ -5451,9 +5533,9 @@
   // closures while we're actually processing the discovered
   // reference objects.
 
-  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss, NULL);
-
-  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
+  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss);
+
+  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
 
   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -757,6 +757,12 @@
   // alloc_archive_regions, and after class loading has occurred.
   void fill_archive_regions(MemRegion* range, size_t count);
 
+  // For each of the specified MemRegions, uncommit the containing G1 regions
+  // which had been allocated by alloc_archive_regions. This should be called
+  // rather than fill_archive_regions at JVM init time if the archive file
+  // mapping failed, with the same non-overlapping and sorted MemRegion array.
+  void dealloc_archive_regions(MemRegion* range, size_t count);
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -181,15 +181,6 @@
     G1ErgoVerbose::set_enabled(false);
   }
 
-  // Verify PLAB sizes
-  const size_t region_size = HeapRegion::GrainWords;
-  if (YoungPLABSize > region_size || OldPLABSize > region_size) {
-    char buffer[128];
-    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most " SIZE_FORMAT,
-                 OldPLABSize > region_size ? "Old" : "Young", region_size);
-    vm_exit_during_initialization(buffer);
-  }
-
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 
@@ -932,7 +923,7 @@
 // Anything below that is considered to be zero
 #define MIN_TIMER_GRANULARITY 0.0000001
 
-void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
+void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
   double end_time_sec = os::elapsedTime();
   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
          "otherwise, the subtraction below does not make sense");
@@ -964,9 +955,6 @@
   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
                           end_time_sec, _g1->gc_tracer_stw()->gc_id());
 
-  evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
-  evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
-
   if (update_stats) {
     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
     // this is where we update the allocation rate of the application
@@ -1883,7 +1871,7 @@
 }
 
 
-void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
+void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
   double young_start_time_sec = os::elapsedTime();
 
   YoungList* young_list = _g1->young_list();
@@ -2093,7 +2081,6 @@
 
   double non_young_end_time_sec = os::elapsedTime();
   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
-  evacuation_info.set_collectionset_regions(cset_region_length());
 }
 
 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -604,10 +604,6 @@
 
   virtual G1CollectorPolicy* as_g1_policy() { return this; }
 
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::G1CollectorPolicyKind;
-  }
-
   G1CollectorState* collector_state();
 
   G1GCPhaseTimes* phase_times() const { return _phase_times; }
@@ -634,13 +630,11 @@
   virtual HeapWord* satisfy_failed_allocation(size_t size,
                                               bool is_tlab);
 
-  BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
-
   bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
 
   // Record the start and end of an evacuation pause.
   void record_collection_pause_start(double start_time_sec);
-  void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
+  void record_collection_pause_end(double pause_time_ms);
 
   // Record the start and end of a full collection.
   void record_full_collection_start();
@@ -682,6 +676,10 @@
     return _bytes_copied_during_gc;
   }
 
+  size_t collection_set_bytes_used_before() const {
+    return _collection_set_bytes_used_before;
+  }
+
   // Determine whether there are candidate regions so that the
   // next GC should be mixed. The two action strings are used
   // in the ergo output when the method returns true or false.
@@ -691,7 +689,7 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
+  void finalize_cset(double target_pause_time_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
--- a/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -54,17 +54,46 @@
                      _allocated, _wasted, _region_end_waste, _unused, used()));
       _allocated = 1;
     }
-    // We account region end waste fully to PLAB allocation. This is not completely fair,
-    // but is a conservative assumption because PLABs may be sized flexibly while we
-    // cannot adjust direct allocations.
-    // In some cases, wasted_frac may become > 1 but that just reflects the problem
-    // with region_end_waste.
-    double wasted_frac    = (double)(_unused + _wasted + _region_end_waste) / (double)_allocated;
-    size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
-    if (target_refills == 0) {
-      target_refills = 1;
-    }
-    size_t cur_plab_sz = used() / target_refills;
+    // The size of the PLAB caps the amount of space that can be wasted at the
+    // end of the collection. In the worst case the last PLAB could be completely
+    // empty.
+    // This allows us to calculate the new PLAB size to achieve the
+    // TargetPLABWastePct given the latest memory usage and that the last buffer
+    // will be G1LastPLABAverageOccupancy full.
+    //
+    // E.g. assume that if in the current GC 100 words were allocated and a
+    // TargetPLABWastePct of 10 had been set.
+    //
+    // So we could waste up to 10 words to meet that percentage. Given that we
+    // also assume that that buffer is typically half-full, the new desired PLAB
+    // size is set to 20 words.
+    //
+    // The amount of allocation performed should be independent of the number of
+    // threads, so should the maximum waste we can spend in total. So if
+    // we used n threads to allocate, each of them can spend maximum waste/n words in
+    // a first rough approximation. The number of threads only comes into play later
+    // when actually retrieving the actual desired PLAB size.
+    //
+    // After calculating this optimal PLAB size the algorithm applies the usual
+    // exponential decaying average over this value to guess the next PLAB size.
+    //
+    // We account region end waste fully to PLAB allocation (in the calculation of
+    // what we consider as "used_for_waste_calculation" below). This is not
+    // completely fair, but is a conservative assumption because PLABs may be sized
+    // flexibly while we cannot adjust inline allocations.
+    // Allocation during GC will try to minimize region end waste so this impact
+    // should be minimal.
+    //
+    // We need to cover overflow when calculating the amount of space actually used
+    // by objects in PLABs when subtracting the region end waste.
+    // Region end waste may be higher than actual allocation. This may occur if many
+    // threads do not allocate anything but a few rather large objects. In this
+    // degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
+    // which is an okay reaction.
+    size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
+
+    size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
+    size_t const cur_plab_sz = (double)total_waste_allowed / G1LastPLABAverageOccupancy;
     // Take historical weighted average
     _filter.sample(cur_plab_sz);
     // Clip from above and below, and align to object boundary
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -74,7 +74,7 @@
   assert(rp != NULL, "should be non-NULL");
   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
 
-  GenMarkSweep::_ref_processor = rp;
+  GenMarkSweep::set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
   // When collecting the permanent generation Method*s may be moving,
@@ -108,7 +108,7 @@
   JvmtiExport::gc_epilogue();
 
   // refs processing: clean slate
-  GenMarkSweep::_ref_processor = NULL;
+  GenMarkSweep::set_ref_processor(NULL);
 }
 
 
@@ -310,9 +310,9 @@
                                  HeapRegion::GrainBytes);
 }
 
-void G1MarkSweep::mark_range_archive(MemRegion range) {
+void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
   assert(_archive_check_enabled, "archive range check not enabled");
-  _archive_region_map.set_by_address(range, true);
+  _archive_region_map.set_by_address(range, is_archive);
 }
 
 bool G1MarkSweep::in_archive_range(oop object) {
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -58,8 +58,8 @@
   // Create the _archive_region_map which is used to identify archive objects.
   static void enable_archive_object_check();
 
-  // Mark the regions containing the specified address range as archive regions.
-  static void mark_range_archive(MemRegion range);
+  // Set the regions containing the specified address range as archive/non-archive.
+  static void set_range_archive(MemRegion range, bool is_archive);
 
   // Check if an object is in an archive region using the _archive_region_map.
   static bool in_archive_range(oop object);
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -125,8 +125,7 @@
   template <class T> void do_oop_work(T* p);
 
 public:
-  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
-                   ReferenceProcessor* rp) :
+  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
       G1ParCopyHelper(g1, par_scan_state) {
     assert(_ref_processor == NULL, "sanity");
   }
@@ -141,7 +140,6 @@
 
   G1CollectedHeap*      g1()  { return _g1; };
   G1ParScanThreadState* pss() { return _par_scan_state; }
-  ReferenceProcessor*   rp()  { return _ref_processor; };
 };
 
 typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -186,6 +186,21 @@
   return dest(state);
 }
 
+void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
+                                                  oop const old, size_t word_sz, uint age,
+                                                  HeapWord * const obj_ptr,
+                                                  const AllocationContext_t context) const {
+  G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
+  if (alloc_buf->contains(obj_ptr)) {
+    _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
+                                                             dest_state.value() == InCSetState::Old,
+                                                             alloc_buf->word_sz());
+  } else {
+    _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
+                                                              dest_state.value() == InCSetState::Old);
+  }
+}
+
 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
                                                  oop const old,
                                                  markOop const old_mark) {
@@ -219,6 +234,10 @@
         return handle_evacuation_failure_par(old, old_mark);
       }
     }
+    if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
+      // The events are checked individually as part of the actual commit
+      report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
+    }
   }
 
   assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -173,6 +173,10 @@
                                   bool previous_plab_refill_failed);
 
   inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
+
+  void report_promotion_event(InCSetState const dest_state,
+                              oop const old, size_t word_sz, uint age,
+                              HeapWord * const obj_ptr, const AllocationContext_t context) const;
  public:
 
   oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -26,6 +26,7 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -228,12 +229,15 @@
 };
 
 void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
-                      CodeBlobClosure* code_root_cl,
+                      OopClosure* non_heap_roots,
                       uint worker_i) {
   double rs_time_start = os::elapsedTime();
+
+  G1CodeBlobClosure code_root_cl(non_heap_roots);
+
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
-  ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
+  ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
 
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
@@ -295,7 +299,7 @@
 }
 
 void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
-                                           CodeBlobClosure* code_root_cl,
+                                           OopClosure* non_heap_roots,
                                            uint worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
@@ -318,7 +322,7 @@
   DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
 
   updateRS(&into_cset_dcq, worker_i);
-  scanRS(oc, code_root_cl, worker_i);
+  scanRS(oc, non_heap_roots, worker_i);
 
   // We now clear the cached values of _cset_rs_update_cl for this worker
   _cset_rs_update_cl[worker_i] = NULL;
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -85,7 +85,7 @@
   // invoked "blk->set_region" to set the "from" region correctly
   // beforehand.)
   //
-  // Invoke code_root_cl->do_code_blob on the unmarked nmethods
+  // Apply non_heap_roots on the oops of the unmarked nmethods
   // on the strong code roots list for each region in the
   // collection set.
   //
@@ -95,7 +95,7 @@
   // the "i" passed to the calling thread's work(i) function.
   // In the sequential case this param will be ignored.
   void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
-                                   CodeBlobClosure* code_root_cl,
+                                   OopClosure* non_heap_roots,
                                    uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
@@ -107,7 +107,7 @@
   void cleanup_after_oops_into_collection_set_do();
 
   void scanRS(G1ParPushHeapRSClosure* oc,
-              CodeBlobClosure* code_root_cl,
+              OopClosure* non_heap_roots,
               uint worker_i);
 
   void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -28,6 +28,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
@@ -40,57 +41,6 @@
 #include "runtime/mutex.hpp"
 #include "services/management.hpp"
 
-class G1CodeBlobClosure : public CodeBlobClosure {
-  class HeapRegionGatheringOopClosure : public OopClosure {
-    G1CollectedHeap* _g1h;
-    OopClosure* _work;
-    nmethod* _nm;
-
-    template <typename T>
-    void do_oop_work(T* p) {
-      _work->do_oop(p);
-      T oop_or_narrowoop = oopDesc::load_heap_oop(p);
-      if (!oopDesc::is_null(oop_or_narrowoop)) {
-        oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
-        HeapRegion* hr = _g1h->heap_region_containing_raw(o);
-        assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
-        hr->add_strong_code_root(_nm);
-      }
-    }
-
-  public:
-    HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
-
-    void do_oop(oop* o) {
-      do_oop_work(o);
-    }
-
-    void do_oop(narrowOop* o) {
-      do_oop_work(o);
-    }
-
-    void set_nm(nmethod* nm) {
-      _nm = nm;
-    }
-  };
-
-  HeapRegionGatheringOopClosure _oc;
-public:
-  G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
-
-  void do_code_blob(CodeBlob* cb) {
-    nmethod* nm = cb->as_nmethod_or_null();
-    if (nm != NULL) {
-      if (!nm->test_set_oops_do_mark()) {
-        _oc.set_nm(nm);
-        nm->oops_do(&_oc);
-        nm->fix_oop_relocations();
-      }
-    }
-  }
-};
-
-
 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
 
@@ -321,14 +271,6 @@
   }
 }
 
-void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
-                                           OopClosure* scan_non_heap_weak_roots,
-                                           uint worker_i) {
-  G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
-
-  _g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
-}
-
 uint G1RootProcessor::n_workers() const {
   return _srs.n_threads();
 }
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -107,13 +107,6 @@
                          CLDClosure* clds,
                          CodeBlobClosure* blobs);
 
-  // Apply scan_rs to all locations in the union of the remembered sets for all
-  // regions in the collection set
-  // (having done "set_region" to indicate the region in which the root resides),
-  void scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
-                            OopClosure* scan_non_heap_weak_roots,
-                            uint worker_i);
-
   // Number of worker threads used by the root processor.
   uint n_workers() const;
 };
--- a/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -27,6 +27,7 @@
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/satbQueue.hpp"
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -108,15 +109,7 @@
   jbyte *const first = byte_for(mr.start());
   jbyte *const last = byte_after(mr.last());
 
-  // Below we may use an explicit loop instead of memset() because on
-  // certain platforms memset() can give concurrent readers phantom zeros.
-  if (UseMemSetInBOT) {
-    memset(first, g1_young_gen, last - first);
-  } else {
-    for (jbyte* i = first; i < last; i++) {
-      *i = g1_young_gen;
-    }
-  }
+  memset_with_concurrent_readers(first, g1_young_gen, last - first);
 }
 
 #ifndef PRODUCT
@@ -207,7 +200,7 @@
   // Otherwise, log it.
   G1SATBCardTableLoggingModRefBS* g1_bs =
     barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
-  g1_bs->write_ref_field_work(field, new_val);
+  g1_bs->write_ref_field_work(field, new_val, false);
 }
 
 void
--- a/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1SATBCardTableModRefBS.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -147,6 +147,10 @@
  private:
   G1SATBCardTableLoggingModRefBSChangedListener _listener;
   DirtyCardQueueSet& _dcqs;
+
+ protected:
+  virtual void write_ref_field_work(void* field, oop new_val, bool release);
+
  public:
   static size_t compute_size(size_t mem_region_size_in_words) {
     size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
@@ -165,8 +169,6 @@
 
   virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
 
-  void write_ref_field_work(void* field, oop new_val, bool release = false);
-
   // Can be called from static contexts.
   static void write_ref_field_static(void* field, oop new_val);
 
--- a/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -82,6 +82,11 @@
           "If true, enable reference discovery during concurrent "          \
           "marking and reference processing at the end of remark.")         \
                                                                             \
+  experimental(double, G1LastPLABAverageOccupancy, 50.0,                    \
+               "The expected average occupancy of the last PLAB in "        \
+               "percent.")                                                  \
+               range(0.001, 100.0)                                          \
+                                                                            \
   product(size_t, G1SATBBufferSize, 1*K,                                    \
           "Number of entries in an SATB log buffer.")                       \
                                                                             \
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -68,7 +68,7 @@
   // or it was allocated after marking finished, then we add it. Otherwise
   // we can safely ignore the object.
   if (!g1h->is_obj_dead(oop(cur), _hr)) {
-    oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
+    oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
   } else {
     oop_size = _hr->block_size(cur);
   }
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -426,7 +426,7 @@
       (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
     uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
 
-    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
+    shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
 
     cur = idx_last_found;
     removed += to_remove;
@@ -437,6 +437,17 @@
   return removed;
 }
 
+void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
+#ifdef ASSERT
+  for (uint i = index; i < (index + num_regions); i++) {
+    assert(is_available(i), err_msg("Expected available region at index %u", i));
+    assert(at(i)->is_empty(), err_msg("Expected empty region at index %u", i));
+    assert(at(i)->is_free(), err_msg("Expected free region at index %u", i));
+  }
+#endif
+  uncommit_regions(index, num_regions);
+}
+
 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
   guarantee(start_idx < _allocated_heapregions_length, "checking");
   guarantee(res_idx != NULL, "checking");
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -241,6 +241,10 @@
   // Return the actual number of uncommitted regions.
   uint shrink_by(uint num_regions_to_remove);
 
+  // Uncommit a number of regions starting at the specified index, which must be available,
+  // empty, and free.
+  void shrink_at(uint index, size_t num_regions);
+
   void verify();
 
   // Do some sanity checking.
--- a/hotspot/src/share/vm/gc/g1/heapRegionType.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/g1/heapRegionType.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -35,7 +35,7 @@
   // We encode the value of the heap region type so the generation can be
   // determined quickly. The tag is split into two parts:
   //
-  //   major type (young, humongous)                         : top N-1 bits
+  //   major type (young, old, humongous, archive)           : top N-1 bits
   //   minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
   //
   // If there's need to increase the number of minor types in the
--- a/hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/cardTableExtension.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -89,7 +89,7 @@
     CheckForUnmarkedOops object_check(_young_gen, _card_table);
     obj->oop_iterate_no_header(&object_check);
     if (object_check.has_unmarked_oop()) {
-      assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
+      guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
     }
   }
 };
--- a/hotspot/src/share/vm/gc/parallel/cardTableExtension.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/cardTableExtension.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -56,13 +56,7 @@
   CardTableExtension(MemRegion whole_heap) :
     CardTableModRefBS(
       whole_heap,
-      // Concrete tag should be BarrierSet::CardTableExtension.
-      // That will presently break things in a bunch of places though.
-      // The concrete tag is used as a dispatch key in many places, and
-      // CardTableExtension does not correctly dispatch in some of those
-      // uses. This will be addressed as part of a reorganization of the
-      // BarrierSet hierarchy.
-      BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
+      BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
     { }
 
   // Scavenge support
--- a/hotspot/src/share/vm/gc/parallel/immutableSpace.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/immutableSpace.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -44,7 +44,7 @@
   HeapWord* t = end();
   // Could call objects iterate, but this is easier.
   while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(cl);
+    obj_addr += oop(obj_addr)->oop_iterate_size(cl);
   }
 }
 
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -213,15 +213,6 @@
   return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
 }
 
-void MutableSpace::oop_iterate(ExtendedOopClosure* cl) {
-  HeapWord* obj_addr = bottom();
-  HeapWord* t = top();
-  // Could call objects iterate, but this is easier.
-  while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(cl);
-  }
-}
-
 void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
   HeapWord* obj_addr = bottom();
   HeapWord* t = top();
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -134,7 +134,6 @@
   bool cas_deallocate(HeapWord *obj, size_t size);
 
   // Iteration.
-  void oop_iterate(ExtendedOopClosure* cl);
   void oop_iterate_no_header(OopClosure* cl);
   void object_iterate(ObjectClosure* cl);
 
--- a/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -30,26 +30,22 @@
 #include "gc/parallel/psParallelCompact.hpp"
 #include "gc/parallel/psScavenge.hpp"
 
-inline size_t ParallelScavengeHeap::total_invocations()
-{
+inline size_t ParallelScavengeHeap::total_invocations() {
   return UseParallelOldGC ? PSParallelCompact::total_invocations() :
     PSMarkSweep::total_invocations();
 }
 
-inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
-{
+inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
   const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
   return size < eden_size / 2;
 }
 
-inline void ParallelScavengeHeap::invoke_scavenge()
-{
+inline void ParallelScavengeHeap::invoke_scavenge() {
   PSScavenge::invoke();
 }
 
 inline bool ParallelScavengeHeap::is_in_young(oop p) {
   // Assumes the the old gen address range is lower than that of the young gen.
-  const void* loc = (void*) p;
   bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
   assert(result == young_gen()->is_in_reserved(p),
         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -299,7 +299,7 @@
   // subtracted out.
   size_t eden_limit = max_eden_size;
 
-  const double gc_cost_limit = GCTimeLimit/100.0;
+  const double gc_cost_limit = GCTimeLimit / 100.0;
 
   // Which way should we go?
   // if pause requirement is not met
--- a/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -60,7 +60,7 @@
 
 void PSMarkSweep::initialize() {
   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
-  _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
+  set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
   _counters = new CollectorCounters("PSMarkSweep", 1);
 }
 
--- a/hotspot/src/share/vm/gc/parallel/psOldGen.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psOldGen.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -486,12 +486,12 @@
   object_space()->verify();
 }
 class VerifyObjectStartArrayClosure : public ObjectClosure {
-  PSOldGen* _gen;
+  PSOldGen* _old_gen;
   ObjectStartArray* _start_array;
 
  public:
-  VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
-    _gen(gen), _start_array(start_array) { }
+  VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
+    _old_gen(old_gen), _start_array(start_array) { }
 
   virtual void do_object(oop obj) {
     HeapWord* test_addr = (HeapWord*)obj + 1;
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -958,7 +958,7 @@
 {
   // Update the from & to space pointers in space_info, since they are swapped
   // at each young gen gc.  Do the update unconditionally (even though a
-  // promotion failure does not swap spaces) because an unknown number of minor
+  // promotion failure does not swap spaces) because an unknown number of young
   // collections will have swapped the spaces an unknown number of times.
   GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -303,7 +303,7 @@
     // completed(), which is desirable since a region must be claimed before it
     // can be completed.
     bool available() const { return _dc_and_los < dc_one; }
-    bool claimed() const   { return _dc_and_los >= dc_claimed; }
+    bool claimed()   const { return _dc_and_los >= dc_claimed; }
     bool completed() const { return _dc_and_los >= dc_completed; }
 
     // These are not atomic.
@@ -979,7 +979,6 @@
   static bool   _dwl_initialized;
 #endif  // #ifdef ASSERT
 
-
  public:
   static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }
 
--- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -597,9 +597,9 @@
         // to allow resizes that may have been inhibited by the
         // relative location of the "to" and "from" spaces.
 
-        // Resizing the old gen at minor collects can cause increases
+        // Resizing the old gen at young collections can cause increases
         // that don't feed back to the generation sizing policy until
-        // a major collection.  Don't resize the old gen here.
+        // a full collection.  Don't resize the old gen here.
 
         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                         size_policy->calculated_survivor_size_in_bytes());
--- a/hotspot/src/share/vm/gc/parallel/psTasks.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psTasks.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -172,10 +172,10 @@
 
 void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
   // There are not old-to-young pointers if the old gen is empty.
-  assert(!_gen->object_space()->is_empty(),
+  assert(!_old_gen->object_space()->is_empty(),
     "Should not be called is there is no work");
-  assert(_gen != NULL, "Sanity");
-  assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
+  assert(_old_gen != NULL, "Sanity");
+  assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity");
   assert(_stripe_number < ParallelGCThreads, "Sanity");
 
   {
@@ -183,8 +183,8 @@
     CardTableExtension* card_table =
       barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
 
-    card_table->scavenge_contents_parallel(_gen->start_array(),
-                                           _gen->object_space(),
+    card_table->scavenge_contents_parallel(_old_gen->start_array(),
+                                           _old_gen->object_space(),
                                            _gen_top,
                                            pm,
                                            _stripe_number,
--- a/hotspot/src/share/vm/gc/parallel/psTasks.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/parallel/psTasks.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -160,17 +160,17 @@
 
 class OldToYoungRootsTask : public GCTask {
  private:
-  PSOldGen* _gen;
+  PSOldGen* _old_gen;
   HeapWord* _gen_top;
   uint _stripe_number;
   uint _stripe_total;
 
  public:
-  OldToYoungRootsTask(PSOldGen *gen,
+  OldToYoungRootsTask(PSOldGen *old_gen,
                       HeapWord* gen_top,
                       uint stripe_number,
                       uint stripe_total) :
-    _gen(gen),
+    _old_gen(old_gen),
     _gen_top(gen_top),
     _stripe_number(stripe_number),
     _stripe_total(stripe_total) { }
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -106,14 +106,14 @@
   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 {
   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
-  _gen = (DefNewGeneration*)_gch->young_gen();
+  _young_gen = (DefNewGeneration*)_gch->young_gen();
 }
 
 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
   do {
     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
   } while (!_gch->no_allocs_since_save_marks());
-  guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
+  guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 }
 
 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
@@ -200,8 +200,9 @@
   _from_space = new ContiguousSpace();
   _to_space   = new ContiguousSpace();
 
-  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
+  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
     vm_exit_during_initialization("Could not allocate a new gen space");
+  }
 
   // Compute the maximum eden and survivor space sizes. These sizes
   // are computed assuming the entire reserved space is committed.
@@ -655,7 +656,7 @@
     if (ZapUnusedHeapArea) {
       // This is now done here because of the piece-meal mangling which
       // can check for valid mangling at intermediate points in the
-      // collection(s).  When a minor collection fails to collect
+      // collection(s).  When a young collection fails to collect
       // sufficient space resizing of the young generation can occur
       // an redistribute the spaces in the young generation.  Mangle
       // here so that unzapped regions don't get distributed to
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -193,7 +193,7 @@
 
   class FastEvacuateFollowersClosure: public VoidClosure {
     GenCollectedHeap* _gch;
-    DefNewGeneration* _gen;
+    DefNewGeneration* _young_gen;
     FastScanClosure* _scan_cur_or_nonheap;
     FastScanClosure* _scan_older;
   public:
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -57,8 +57,8 @@
   // each generation, allowing them in turn to examine the modified
   // field.
   //
-  // We could check that p is also in an older generation, but
-  // dirty cards in the youngest gen are never scanned, so the
+  // We could check that p is also in the old generation, but
+  // dirty cards in the young gen are never scanned, so the
   // extra check probably isn't worthwhile.
   if (GenCollectedHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
--- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -67,7 +67,7 @@
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
-  _ref_processor = rp;
+  set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
   GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
@@ -136,7 +136,7 @@
   }
 
   // refs processing: clean slate
-  _ref_processor = NULL;
+  set_ref_processor(NULL);
 
   // Update heap occupancy information which is used as
   // input to soft ref clearing policy at the next gc.
--- a/hotspot/src/share/vm/gc/serial/markSweep.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/markSweep.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -28,11 +28,20 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
+#include "gc/shared/specialized_oop_closures.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/instanceClassLoaderKlass.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
+#include "oops/instanceRefKlass.inline.hpp"
 #include "oops/methodData.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/stack.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1StringDedup.hpp"
+#endif // INCLUDE_ALL_GCS
 
 uint                    MarkSweep::_total_invocations = 0;
 
@@ -50,176 +59,101 @@
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
 
-void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
-void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
-
-MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
+MarkAndPushClosure            MarkSweep::mark_and_push_closure;
 CLDToOopClosure               MarkSweep::follow_cld_closure(&mark_and_push_closure);
 CLDToOopClosure               MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
 
-template <typename T>
-void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p)       { mark_and_push(p); }
-void MarkSweep::MarkAndPushClosure::do_oop(oop* p)        { do_oop_nv(p); }
-void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p)  { do_oop_nv(p); }
+inline void MarkSweep::mark_object(oop obj) {
+#if INCLUDE_ALL_GCS
+  if (G1StringDedup::is_enabled()) {
+    // We must enqueue the object before it is marked
+    // as we otherwise can't read the object's age.
+    G1StringDedup::enqueue_from_mark(obj);
+  }
+#endif
+  // some marks may contain information we need to preserve so we store them away
+  // and overwrite the mark.  We'll restore it at the end of markSweep.
+  markOop mark = obj->mark();
+  obj->set_mark(markOopDesc::prototype()->set_marked());
 
-void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
-  MarkSweep::follow_cld_closure.do_cld(cld);
-}
-
-void InstanceKlass::oop_ms_follow_contents(oop obj) {
-  assert(obj != NULL, "can't follow the content of NULL object");
-  MarkSweep::follow_klass(this);
-
-  oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::mark_and_push_closure);
+  if (mark->must_be_preserved(obj)) {
+    preserve_mark(obj, mark);
+  }
 }
 
-void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) {
-  InstanceKlass::oop_ms_follow_contents(obj);
-
-  // Follow the klass field in the mirror
-  Klass* klass = java_lang_Class::as_Klass(obj);
-  if (klass != NULL) {
-    // An anonymous class doesn't have its own class loader, so the call
-    // to follow_klass will mark and push its java mirror instead of the
-    // class loader. When handling the java mirror for an anonymous class
-    // we need to make sure its class loader data is claimed, this is done
-    // by calling follow_class_loader explicitly. For non-anonymous classes
-    // the call to follow_class_loader is made when the class loader itself
-    // is handled.
-    if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
-      MarkSweep::follow_class_loader(klass->class_loader_data());
-    } else {
-      MarkSweep::follow_klass(klass);
+template <class T> inline void MarkSweep::mark_and_push(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
+      mark_object(obj);
+      _marking_stack.push(obj);
     }
-  } else {
-    // If klass is NULL then this a mirror for a primitive type.
-    // We don't have to follow them, since they are handled as strong
-    // roots in Universe::oops_do.
-    assert(java_lang_Class::is_primitive(obj), "Sanity check");
-  }
-
-  oop_oop_iterate_statics<true>(obj, &MarkSweep::mark_and_push_closure);
-}
-
-void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) {
-  InstanceKlass::oop_ms_follow_contents(obj);
-
-  ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
-
-  // We must NULL check here, since the class loader
-  // can be found before the loader data has been set up.
-  if(loader_data != NULL) {
-    MarkSweep::follow_class_loader(loader_data);
   }
 }
 
-template <class T>
-static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) {
-  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
-  T heap_oop = oopDesc::load_heap_oop(referent_addr);
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj));
-    }
-  )
-  if (!oopDesc::is_null(heap_oop)) {
-    oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!referent->is_gc_marked() &&
-        MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) {
-      // reference was discovered, referent will be traversed later
-      klass->InstanceKlass::oop_ms_follow_contents(obj);
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " PTR_FORMAT, p2i(obj));
-        }
-      )
-      return;
-    } else {
-      // treat referent as normal oop
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " PTR_FORMAT, p2i(obj));
-        }
-      )
-      MarkSweep::mark_and_push(referent_addr);
-    }
-  }
-  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
-  // Treat discovered as normal oop, if ref is not "active",
-  // i.e. if next is non-NULL.
-  T  next_oop = oopDesc::load_heap_oop(next_addr);
-  if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
-    T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
-    debug_only(
-      if(TraceReferenceGC && PrintGCDetails) {
-        gclog_or_tty->print_cr("   Process discovered as normal "
-                               PTR_FORMAT, p2i(discovered_addr));
-      }
-    )
-    MarkSweep::mark_and_push(discovered_addr);
-  }
-  // treat next as normal oop.  next is a link in the reference queue.
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("   Process next as normal " PTR_FORMAT, p2i(next_addr));
-    }
-  )
-  MarkSweep::mark_and_push(next_addr);
-  klass->InstanceKlass::oop_ms_follow_contents(obj);
+inline void MarkSweep::follow_klass(Klass* klass) {
+  oop op = klass->klass_holder();
+  MarkSweep::mark_and_push(&op);
+}
+
+inline void MarkSweep::follow_cld(ClassLoaderData* cld) {
+  MarkSweep::follow_cld_closure.do_cld(cld);
 }
 
-void InstanceRefKlass::oop_ms_follow_contents(oop obj) {
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(this, obj);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(this, obj);
+template <typename T>
+inline void MarkAndPushClosure::do_oop_nv(T* p)                 { MarkSweep::mark_and_push(p); }
+void MarkAndPushClosure::do_oop(oop* p)                         { do_oop_nv(p); }
+void MarkAndPushClosure::do_oop(narrowOop* p)                   { do_oop_nv(p); }
+inline bool MarkAndPushClosure::do_metadata_nv()                { return true; }
+bool MarkAndPushClosure::do_metadata()                          { return do_metadata_nv(); }
+inline void MarkAndPushClosure::do_klass_nv(Klass* k)           { MarkSweep::follow_klass(k); }
+void MarkAndPushClosure::do_klass(Klass* k)                     { do_klass_nv(k); }
+inline void MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) { MarkSweep::follow_cld(cld); }
+void MarkAndPushClosure::do_cld(ClassLoaderData* cld)           { do_cld_nv(cld); }
+
+template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
+  mark_and_push(p);
+}
+
+void MarkSweep::push_objarray(oop obj, size_t index) {
+  ObjArrayTask task(obj, index);
+  assert(task.is_valid(), "bad ObjArrayTask");
+  _objarray_stack.push(task);
+}
+
+inline void MarkSweep::follow_array(objArrayOop array) {
+  MarkSweep::follow_klass(array->klass());
+  // Don't push empty arrays to avoid unnecessary work.
+  if (array->length() > 0) {
+    MarkSweep::push_objarray(array, 0);
   }
 }
 
-template <class T>
-static void oop_ms_follow_contents_specialized(oop obj, int index) {
-  objArrayOop a = objArrayOop(obj);
-  const size_t len = size_t(a->length());
-  const size_t beg_index = size_t(index);
-  assert(beg_index < len || len == 0, "index too large");
-
-  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
-  const size_t end_index = beg_index + stride;
-  T* const base = (T*)a->base();
-  T* const beg = base + beg_index;
-  T* const end = base + end_index;
-
-  // Push the non-NULL elements of the next stride on the marking stack.
-  for (T* e = beg; e < end; e++) {
-    MarkSweep::mark_and_push<T>(e);
-  }
-
-  if (end_index < len) {
-    MarkSweep::push_objarray(a, end_index); // Push the continuation.
+inline void MarkSweep::follow_object(oop obj) {
+  assert(obj->is_gc_marked(), "should be marked");
+  if (obj->is_objArray()) {
+    // Handle object arrays explicitly to allow them to
+    // be split into chunks if needed.
+    MarkSweep::follow_array((objArrayOop)obj);
+  } else {
+    obj->oop_iterate(&mark_and_push_closure);
   }
 }
 
-void ObjArrayKlass::oop_ms_follow_contents(oop obj) {
-  assert (obj->is_array(), "obj must be array");
-  MarkSweep::follow_klass(this);
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(obj, 0);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(obj, 0);
-  }
-}
+void MarkSweep::follow_array_chunk(objArrayOop array, int index) {
+  const int len = array->length();
+  const int beg_index = index;
+  assert(beg_index < len || len == 0, "index too large");
 
-void TypeArrayKlass::oop_ms_follow_contents(oop obj) {
-  assert(obj->is_typeArray(),"must be a type array");
-  // Performance tweak: We skip iterating over the klass pointer since we
-  // know that Universe::TypeArrayKlass never moves.
-}
+  const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
+  const int end_index = beg_index + stride;
 
-void MarkSweep::follow_array(objArrayOop array, int index) {
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(array, index);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(array, index);
+  array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
+
+  if (end_index < len) {
+    MarkSweep::push_objarray(array, end_index); // Push the continuation.
   }
 }
 
@@ -233,7 +167,7 @@
     // Process ObjArrays one at a time to avoid marking stack bloat.
     if (!_objarray_stack.is_empty()) {
       ObjArrayTask task = _objarray_stack.pop();
-      follow_array(objArrayOop(task.obj()), task.index());
+      follow_array_chunk(objArrayOop(task.obj()), task.index());
     }
   } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
 }
@@ -242,6 +176,24 @@
 
 void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
 
+template <class T> inline void MarkSweep::follow_root(T* p) {
+  assert(!Universe::heap()->is_in_reserved(p),
+         "roots shouldn't be things within the heap");
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
+      mark_object(obj);
+      follow_object(obj);
+    }
+  }
+  follow_stack();
+}
+
+void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
+void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
+
 void PreservedMark::adjust_pointer() {
   MarkSweep::adjust_pointer(&_obj);
 }
@@ -266,6 +218,11 @@
   }
 }
 
+void MarkSweep::set_ref_processor(ReferenceProcessor* rp) {
+  _ref_processor = rp;
+  mark_and_push_closure.set_ref_processor(_ref_processor);
+}
+
 MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
 
 template <typename T>
@@ -405,3 +362,6 @@
   // know that Universe::TypeArrayKlass never moves.
   return t->object_size();
 }
+
+// Generate MS specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
--- a/hotspot/src/share/vm/gc/serial/markSweep.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/markSweep.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -49,6 +49,7 @@
 
 // declared at end
 class PreservedMark;
+class MarkAndPushClosure;
 
 class MarkSweep : AllStatic {
   //
@@ -60,13 +61,6 @@
     virtual void do_oop(narrowOop* p);
   };
 
-  class MarkAndPushClosure: public ExtendedOopClosure {
-   public:
-    template <typename T> void do_oop_nv(T* p);
-    virtual void do_oop(oop* p);
-    virtual void do_oop(narrowOop* p);
-  };
-
   class FollowStackClosure: public VoidClosure {
    public:
     virtual void do_void();
@@ -146,6 +140,7 @@
 
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
+  static void set_ref_processor(ReferenceProcessor* rp);
 
   // Archive Object handling
   static inline bool is_archive_object(oop object);
@@ -153,34 +148,55 @@
   static STWGCTimer* gc_timer() { return _gc_timer; }
   static SerialOldTracer* gc_tracer() { return _gc_tracer; }
 
+  static void preserve_mark(oop p, markOop mark);
+                                // Save the mark word so it can be restored later
+  static void adjust_marks();   // Adjust the pointers in the preserved marks table
+  static void restore_marks();  // Restore the marks that we saved in preserve_mark
+
+  static int adjust_pointers(oop obj);
+
+  static void follow_stack();   // Empty marking stack.
+
+  static void follow_klass(Klass* klass);
+
+  static void follow_cld(ClassLoaderData* cld);
+
+  template <class T> static inline void adjust_pointer(T* p);
+
+  // Check mark and maybe push on marking stack
+  template <class T> static void mark_and_push(T* p);
+
+ private:
   // Call backs for marking
   static void mark_object(oop obj);
   // Mark pointer and follow contents.  Empty marking stack afterwards.
   template <class T> static inline void follow_root(T* p);
 
-  // Check mark and maybe push on marking stack
-  template <class T> static void mark_and_push(T* p);
-
   static inline void push_objarray(oop obj, size_t index);
 
-  static void follow_stack();   // Empty marking stack.
-
   static void follow_object(oop obj);
 
-  static void follow_array(objArrayOop array, int index);
+  static void follow_array(objArrayOop array);
+
+  static void follow_array_chunk(objArrayOop array, int index);
+};
 
-  static void follow_klass(Klass* klass);
-
-  static void follow_class_loader(ClassLoaderData* cld);
+class MarkAndPushClosure: public ExtendedOopClosure {
+public:
+  template <typename T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 
-  static int adjust_pointers(oop obj);
+  virtual bool do_metadata();
+  bool do_metadata_nv();
 
-  static void preserve_mark(oop p, markOop mark);
-                                // Save the mark word so it can be restored later
-  static void adjust_marks();   // Adjust the pointers in the preserved marks table
-  static void restore_marks();  // Restore the marks that we saved in preserve_mark
+  virtual void do_klass(Klass* k);
+  void do_klass_nv(Klass* k);
 
-  template <class T> static inline void adjust_pointer(T* p);
+  virtual void do_cld(ClassLoaderData* cld);
+  void do_cld_nv(ClassLoaderData* cld);
+
+  void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
 };
 
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/hotspot/src/share/vm/gc/serial/markSweep.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/markSweep.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -26,38 +26,13 @@
 #define SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
 
 #include "gc/serial/markSweep.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "oops/instanceClassLoaderKlass.inline.hpp"
-#include "oops/instanceKlass.inline.hpp"
-#include "oops/instanceMirrorKlass.inline.hpp"
-#include "oops/instanceRefKlass.inline.hpp"
+#include "memory/universe.hpp"
 #include "oops/markOop.inline.hpp"
-#include "oops/objArrayKlass.inline.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/stack.inline.hpp"
+#include "oops/oop.inline.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
 #endif // INCLUDE_ALL_GCS
 
-inline void MarkSweep::mark_object(oop obj) {
-#if INCLUDE_ALL_GCS
-  if (G1StringDedup::is_enabled()) {
-    // We must enqueue the object before it is marked
-    // as we otherwise can't read the object's age.
-    G1StringDedup::enqueue_from_mark(obj);
-  }
-#endif
-  // some marks may contain information we need to preserve so we store them away
-  // and overwrite the mark.  We'll restore it at the end of markSweep.
-  markOop mark = obj->mark();
-  obj->set_mark(markOopDesc::prototype()->set_marked());
-
-  if (mark->must_be_preserved(obj)) {
-    preserve_mark(obj, mark);
-  }
-}
-
 inline bool MarkSweep::is_archive_object(oop object) {
 #if INCLUDE_ALL_GCS
   return (G1MarkSweep::archive_check_enabled() &&
@@ -67,51 +42,6 @@
 #endif
 }
 
-inline void MarkSweep::follow_klass(Klass* klass) {
-  oop op = klass->klass_holder();
-  MarkSweep::mark_and_push(&op);
-}
-
-inline void MarkSweep::follow_object(oop obj) {
-  assert(obj->is_gc_marked(), "should be marked");
-
-  obj->ms_follow_contents();
-}
-
-template <class T> inline void MarkSweep::follow_root(T* p) {
-  assert(!Universe::heap()->is_in_reserved(p),
-         "roots shouldn't be things within the heap");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_archive_object(obj)) {
-      mark_object(obj);
-      follow_object(obj);
-    }
-  }
-  follow_stack();
-}
-
-template <class T> inline void MarkSweep::mark_and_push(T* p) {
-//  assert(Universe::heap()->is_in_reserved(p), "should be in object space");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_archive_object(obj)) {
-      mark_object(obj);
-      _marking_stack.push(obj);
-    }
-  }
-}
-
-void MarkSweep::push_objarray(oop obj, size_t index) {
-  ObjArrayTask task(obj, index);
-  assert(task.is_valid(), "bad ObjArrayTask");
-  _objarray_stack.push(task);
-}
-
 inline int MarkSweep::adjust_pointers(oop obj) {
   return obj->ms_adjust_pointers();
 }
@@ -139,8 +69,4 @@
   }
 }
 
-template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
-  mark_and_push(p);
-}
-
 #endif // SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -108,7 +108,7 @@
                     free());
     }
   }
-  // If we had to expand to accommodate promotions from younger generations
+  // If we had to expand to accommodate promotions from the young generation
   if (!result && _capacity_at_prologue < capacity()) {
     result = true;
     if (PrintGC && Verbose) {
@@ -140,11 +140,11 @@
   // that are of interest at this point.
   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
   if (!full && current_is_young) {
-    // Calculate size of data promoted from the younger generations
+    // Calculate size of data promoted from the young generation
     // before doing the collection.
     size_t used_before_gc = used();
 
-    // If the younger gen collections were skipped, then the
+    // If the young gen collection was skipped, then the
     // number of promoted bytes will be 0 and adding it to the
     // average will incorrectly lessen the average.  It is, however,
     // also possible that no promotion was needed.
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -42,10 +42,10 @@
   friend class VM_PopulateDumpSharedSpace;
 
  protected:
-  ContiguousSpace*  _the_space;       // Actual space holding objects
+  ContiguousSpace*    _the_space;       // Actual space holding objects
 
-  GenerationCounters*   _gen_counters;
-  CSpaceCounters*       _space_counters;
+  GenerationCounters* _gen_counters;
+  CSpaceCounters*     _space_counters;
 
   // Allocation failure
   virtual bool expand(size_t bytes, size_t expand_bytes);
@@ -54,6 +54,7 @@
   ContiguousSpace* space() const { return _the_space; }
 
   void assert_correct_size_change_locking();
+
  public:
   TenuredGeneration(ReservedSpace rs,
                     size_t initial_byte_size,
@@ -66,10 +67,9 @@
   const char* short_name() const { return "Tenured"; }
 
   // Does a "full" (forced) collection invoked on this generation collect
-  // all younger generations as well? Note that this is a
-  // hack to allow the collection of the younger gen first if the flag is
-  // set.
-  virtual bool full_collects_younger_generations() const {
+  // the young generation as well? Note that this is a hack to allow the
+  // collection of the young gen first if the flag is set.
+  virtual bool full_collects_young_generation() const {
     return !ScavengeBeforeFullGC;
   }
 
@@ -99,15 +99,16 @@
                        bool clear_all_soft_refs,
                        size_t size,
                        bool is_tlab);
+
   HeapWord* expand_and_allocate(size_t size,
                                 bool is_tlab,
                                 bool parallel = false);
 
   virtual void prepare_for_verify();
 
-
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
+
   bool should_collect(bool   full,
                       size_t word_size,
                       bool   is_tlab);
--- a/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -266,22 +266,22 @@
     }
 
     // The policy does not have enough data until at least some
-    // minor collections have been done.
+    // young collections have been done.
     _young_gen_policy_is_ready =
       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
 
     // Calculate variables used to estimate pause time vs. gen sizes
-    double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
+    double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
     update_minor_pause_young_estimator(minor_pause_in_ms);
     update_minor_pause_old_estimator(minor_pause_in_ms);
 
     if (PrintAdaptiveSizePolicy && Verbose) {
       gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
-        "minor gc cost: %f  average: %f", collection_cost,
-        _avg_minor_gc_cost->average());
+                          "minor gc cost: %f  average: %f", collection_cost,
+                          _avg_minor_gc_cost->average());
       gclog_or_tty->print_cr("  minor pause: %f minor period %f",
-        minor_pause_in_ms,
-        _latest_minor_mutator_interval_seconds * MILLIUNITS);
+                             minor_pause_in_ms,
+                             _latest_minor_mutator_interval_seconds * MILLIUNITS);
     }
 
     // Calculate variable used to estimate collection cost vs. gen sizes
@@ -295,8 +295,7 @@
   _minor_timer.start();
 }
 
-size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden,
-                                            uint percent_change) {
+size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
   size_t eden_heap_delta;
   eden_heap_delta = cur_eden / 100 * percent_change;
   return eden_heap_delta;
@@ -312,8 +311,7 @@
   return eden_heap_delta;
 }
 
-size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo,
-                                             uint percent_change) {
+size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
   size_t promo_heap_delta;
   promo_heap_delta = cur_promo / 100 * percent_change;
   return promo_heap_delta;
--- a/hotspot/src/share/vm/gc/shared/barrierSet.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/barrierSet.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -132,6 +132,9 @@
   // First the pre-write versions...
   template <class T> inline void write_ref_field_pre(T* field, oop new_val);
 private:
+  // Helper for write_ref_field_pre and friends, testing for specialized cases.
+  bool devirtualize_reference_writes() const;
+
   // Keep this private so as to catch violations at build time.
   virtual void write_ref_field_pre_work(     void* field, oop new_val) { guarantee(false, "Not needed"); };
 protected:
@@ -142,7 +145,7 @@
   // ...then the post-write version.
   inline void write_ref_field(void* field, oop new_val, bool release = false);
 protected:
-  virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
+  virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
 public:
 
   // Invoke the barrier, if any, necessary when writing the "bytes"-byte
--- a/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -32,8 +32,18 @@
 // performance-critical calls when the barrier is the most common
 // card-table kind.
 
+inline bool BarrierSet::devirtualize_reference_writes() const {
+  switch (kind()) {
+  case CardTableForRS:
+  case CardTableExtension:
+    return true;
+  default:
+    return false;
+  }
+}
+
 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
   } else {
     write_ref_field_pre_work(field, new_val);
@@ -41,7 +51,7 @@
 }
 
 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
   } else {
     write_ref_field_work(field, new_val, release);
@@ -77,7 +87,7 @@
 
 
 inline void BarrierSet::write_region(MemRegion mr) {
-  if (kind() == CardTableModRef) {
+  if (devirtualize_reference_writes()) {
     barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
   } else {
     write_region_work(mr);
--- a/hotspot/src/share/vm/gc/shared/blockOffsetTable.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/blockOffsetTable.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -25,9 +25,12 @@
 #ifndef SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
 #define SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
 
+#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "memory/memRegion.hpp"
 #include "memory/virtualspace.hpp"
+#include "runtime/globals.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 
 // The CollectedHeap type requires subtypes to implement a method
 // "block_start".  For some subtypes, notably generational
@@ -126,6 +129,19 @@
   VirtualSpace _vs;
   u_char* _offset_array;          // byte array keeping backwards offsets
 
+  void fill_range(size_t start, size_t num_cards, u_char offset) {
+    void* start_ptr = &_offset_array[start];
+#if INCLUDE_ALL_GCS
+    // If collector is concurrent, special handling may be needed.
+    assert(!UseG1GC, "Shouldn't be here when using G1");
+    if (UseConcMarkSweepGC) {
+      memset_with_concurrent_readers(start_ptr, offset, num_cards);
+      return;
+    }
+#endif // INCLUDE_ALL_GCS
+    memset(start_ptr, offset, num_cards);
+  }
+
  protected:
   // Bounds checking accessors:
   // For performance these have to devolve to array accesses in product builds.
@@ -160,20 +176,7 @@
     assert(left  < right, "Heap addresses out of order");
     size_t num_cards = pointer_delta(right, left) >> LogN_words;
 
-    // Below, we may use an explicit loop instead of memset()
-    // because on certain platforms memset() can give concurrent
-    // readers "out-of-thin-air," phantom zeros; see 6948537.
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[index_for(left)], offset, num_cards);
-    } else {
-      size_t i = index_for(left);
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        // Elided until CR 6977974 is fixed properly.
-        // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
-        _offset_array[i] = offset;
-      }
-    }
+    fill_range(index_for(left), num_cards, offset);
   }
 
   void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
@@ -182,20 +185,7 @@
     assert(left  <= right, "indexes out of order");
     size_t num_cards = right - left + 1;
 
-    // Below, we may use an explicit loop instead of memset
-    // because on certain platforms memset() can give concurrent
-    // readers "out-of-thin-air," phantom zeros; see 6948537.
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[left], offset, num_cards);
-    } else {
-      size_t i = left;
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        // Elided until CR 6977974 is fixed properly.
-        // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
-        _offset_array[i] = offset;
-      }
-    }
+    fill_range(left, num_cards, offset);
   }
 
   void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
--- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -183,7 +183,7 @@
   // these functions here for performance.
 
   void write_ref_field_work(oop obj, size_t offset, oop newVal);
-  virtual void write_ref_field_work(void* field, oop newVal, bool release = false);
+  virtual void write_ref_field_work(void* field, oop newVal, bool release);
 public:
 
   bool has_write_ref_array_opt() { return true; }
--- a/hotspot/src/share/vm/gc/shared/cardTableModRefBSForCTRS.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBSForCTRS.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -31,13 +31,7 @@
 CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
   CardTableModRefBS(
     whole_heap,
-    // Concrete tag should be BarrierSet::CardTableForRS.
-    // That will presently break things in a bunch of places though.
-    // The concrete tag is used as a dispatch key in many places, and
-    // CardTableForRS does not correctly dispatch in some of those
-    // uses. This will be addressed as part of a reorganization of the
-    // BarrierSet hierarchy.
-    BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS)),
+    BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
   // LNC functionality
   _lowest_non_clean(NULL),
   _lowest_non_clean_chunk_size(NULL),
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/cardTableRS.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -80,7 +80,9 @@
         break;
       }
     }
-    if (!seen) return v;
+    if (!seen) {
+      return v;
+    }
   }
   ShouldNotReachHere();
   return 0;
@@ -502,7 +504,7 @@
       //
       // The main point below is that the parallel card scanning code
       // deals correctly with these stale card values. There are two main
-      // cases to consider where we have a stale "younger gen" value and a
+      // cases to consider where we have a stale "young gen" value and a
       // "derivative" case to consider, where we have a stale
       // "cur_younger_gen_and_prev_non_clean" value, as will become
       // apparent in the case analysis below.
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -160,16 +160,20 @@
 // Memory state functions.
 
 
-CollectedHeap::CollectedHeap() {
+CollectedHeap::CollectedHeap() :
+  _barrier_set(NULL),
+  _is_gc_active(false),
+  _total_collections(0),
+  _total_full_collections(0),
+  _gc_cause(GCCause::_no_gc),
+  _gc_lastcause(GCCause::_no_gc),
+  _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
+{
   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   const size_t elements_per_word = HeapWordSize / sizeof(jint);
   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
                                              max_len / elements_per_word);
 
-  _barrier_set = NULL;
-  _is_gc_active = false;
-  _total_collections = _total_full_collections = 0;
-  _gc_cause = _gc_lastcause = GCCause::_no_gc;
   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 
@@ -184,7 +188,7 @@
                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
                              80, GCCause::to_string(_gc_lastcause), CHECK);
   }
-  _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
+
   // Create the ring log
   if (LogEvents) {
     _gc_heap_log = new GCHeapLog();
@@ -570,8 +574,8 @@
 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
   if (HeapDumpBeforeFullGC) {
     GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());
-    // We are doing a "major" collection and a heap dump before
-    // major collection has been requested.
+    // We are doing a full collection and a heap dump before
+    // full collection has been requested.
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramBeforeFullGC) {
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -111,13 +111,6 @@
   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
 
-  enum Name {
-    CollectorPolicyKind,
-    GenCollectorPolicyKind,
-    ConcurrentMarkSweepPolicyKind,
-    G1CollectorPolicyKind
-  };
-
   AdaptiveSizePolicy* size_policy() { return _size_policy; }
   bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
   void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
@@ -150,8 +143,6 @@
 #endif // INCLUDE_ALL_GCS
 
 
-  virtual BarrierSet::Name barrier_set_name() = 0;
-
   virtual GenRemSet* create_rem_set(MemRegion reserved);
 
   // This method controls how a collector satisfies a request
@@ -182,10 +173,6 @@
     ShouldNotReachHere();
   }
 
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::CollectorPolicyKind;
-  }
-
   // Do any updates required to global flags that are due to heap initialization
   // changes
   virtual void post_heap_initialize() = 0;
@@ -298,12 +285,6 @@
   virtual void post_heap_initialize() {
     assert(_max_young_size == MaxNewSize, "Should be taken care of by initialize_size_info");
   }
-
-  BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
-
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::GenCollectorPolicyKind;
-  }
 };
 
 class MarkSweepPolicy : public GenCollectorPolicy {
--- a/hotspot/src/share/vm/gc/shared/gcTrace.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/gcTrace.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -88,6 +88,8 @@
   send_reference_stats_event(REF_WEAK, rps.weak_count());
   send_reference_stats_event(REF_FINAL, rps.final_count());
   send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
+  send_reference_stats_event(REF_CLEANER, rps.cleaner_count());
+  send_reference_stats_event(REF_JNI, rps.jni_weak_ref_count());
 }
 
 #if INCLUDE_SERVICES
@@ -173,6 +175,11 @@
   _tenuring_threshold = tenuring_threshold;
 }
 
+bool YoungGCTracer::should_report_promotion_events() const {
+  return should_report_promotion_in_new_plab_event() ||
+          should_report_promotion_outside_plab_event();
+}
+
 bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
   return should_send_promotion_in_new_plab_event();
 }
--- a/hotspot/src/share/vm/gc/shared/gcTrace.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/gcTrace.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -172,6 +172,7 @@
    *
    * plab_size is the size of the newly allocated PLAB in bytes.
    */
+  bool should_report_promotion_events() const;
   bool should_report_promotion_in_new_plab_event() const;
   bool should_report_promotion_outside_plab_event() const;
   void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -464,7 +464,7 @@
     bool prepared_for_verification = false;
     bool collected_old = false;
     bool old_collects_young = complete &&
-                              _old_gen->full_collects_younger_generations();
+                              _old_gen->full_collects_young_generation();
     if (!old_collects_young &&
         _young_gen->should_collect(full, size, is_tlab)) {
       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
@@ -521,7 +521,7 @@
     // a whole heap collection.
     complete = complete || collected_old;
 
-    if (complete) { // We did a "major" collection
+    if (complete) { // We did a full collection
       // FIXME: See comment at pre_full_gc_dump call
       post_full_gc_dump(NULL);   // do any post full gc dumps
     }
@@ -668,13 +668,13 @@
 
 void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
                                          GenerationType type,
-                                         bool younger_gens_as_roots,
+                                         bool young_gen_as_roots,
                                          ScanningOption so,
                                          bool only_strong_roots,
                                          OopsInGenClosure* not_older_gens,
                                          OopsInGenClosure* older_gens,
                                          CLDClosure* cld_closure) {
-  const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
+  const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots;
 
   bool is_moving_collection = false;
   if (type == YoungGen || is_adjust_phase) {
@@ -691,7 +691,7 @@
                 cld_closure, weak_cld_closure,
                 &mark_code_closure);
 
-  if (younger_gens_as_roots) {
+  if (young_gen_as_roots) {
     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
       if (type == OldGen) {
         not_older_gens->set_generation(_young_gen);
@@ -763,25 +763,25 @@
 void GenCollectedHeap::collect(GCCause::Cause cause) {
   if (should_do_concurrent_full_gc(cause)) {
 #if INCLUDE_ALL_GCS
-    // mostly concurrent full collection
+    // Mostly concurrent full collection.
     collect_mostly_concurrent(cause);
 #else  // INCLUDE_ALL_GCS
     ShouldNotReachHere();
 #endif // INCLUDE_ALL_GCS
   } else if (cause == GCCause::_wb_young_gc) {
-    // minor collection for WhiteBox API
+    // Young collection for the WhiteBox API.
     collect(cause, YoungGen);
   } else {
 #ifdef ASSERT
   if (cause == GCCause::_scavenge_alot) {
-    // minor collection only
+    // Young collection only.
     collect(cause, YoungGen);
   } else {
-    // Stop-the-world full collection
+    // Stop-the-world full collection.
     collect(cause, OldGen);
   }
 #else
-    // Stop-the-world full collection
+    // Stop-the-world full collection.
     collect(cause, OldGen);
 #endif
   }
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -173,8 +173,7 @@
 
   size_t max_capacity() const;
 
-  HeapWord* mem_allocate(size_t size,
-                         bool*  gc_overhead_limit_was_exceeded);
+  HeapWord* mem_allocate(size_t size, bool*  gc_overhead_limit_was_exceeded);
 
   // We may support a shared contiguous allocation area, if the youngest
   // generation does.
@@ -403,7 +402,7 @@
 
   void gen_process_roots(StrongRootsScope* scope,
                          GenerationType type,
-                         bool younger_gens_as_roots,
+                         bool young_gen_as_roots,
                          ScanningOption so,
                          bool only_strong_roots,
                          OopsInGenClosure* not_older_gens,
--- a/hotspot/src/share/vm/gc/shared/genRemSet.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/genRemSet.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -110,13 +110,11 @@
   virtual void print() {}
 
   // Informs the RS that the given memregion contains no references to
-  // younger generations.
+  // the young generation.
   virtual void clear(MemRegion mr) = 0;
 
-  // Informs the RS that there are no references to generations
-  // younger than gen from generations gen and older.
-  // The parameter clear_perm indicates if the perm_gen's
-  // remembered set should also be processed/cleared.
+  // Informs the RS that there are no references to the young generation
+  // from old_gen.
   virtual void clear_into_younger(Generation* old_gen) = 0;
 
   // Informs the RS that refs in the given "mr" may have changed
--- a/hotspot/src/share/vm/gc/shared/generation.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/generation.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -80,7 +80,6 @@
                               // first two fields are word-sized.)
 };
 
-
 class Generation: public CHeapObj<mtGC> {
   friend class VMStructs;
  private:
@@ -299,8 +298,7 @@
   // word of "obj" may have been overwritten with a forwarding pointer, and
   // also taking care to copy the klass pointer *last*.  Returns the new
   // object if successful, or else NULL.
-  virtual oop par_promote(int thread_num,
-                          oop obj, markOop m, size_t word_sz);
+  virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz);
 
   // Informs the current generation that all par_promote_alloc's in the
   // collection have been completed; any supporting data structures can be
@@ -315,7 +313,7 @@
 
   // This generation will collect all younger generations
   // during a full collection.
-  virtual bool full_collects_younger_generations() const { return false; }
+  virtual bool full_collects_young_generation() const { return false; }
 
   // This generation does in-place marking, meaning that mark words
   // are mutated during the marking phase and presumably reinitialized
@@ -370,18 +368,18 @@
 
   // Some generations may require some cleanup or preparation actions before
   // allowing a collection.  The default is to do nothing.
-  virtual void gc_prologue(bool full) {};
+  virtual void gc_prologue(bool full) {}
 
   // Some generations may require some cleanup actions after a collection.
   // The default is to do nothing.
-  virtual void gc_epilogue(bool full) {};
+  virtual void gc_epilogue(bool full) {}
 
   // Save the high water marks for the used space in a generation.
-  virtual void record_spaces_top() {};
+  virtual void record_spaces_top() {}
 
   // Some generations may need to be "fixed-up" after some allocation
   // activity to make them parsable again. The default is to do nothing.
-  virtual void ensure_parsability() {};
+  virtual void ensure_parsability() {}
 
   // Time (in ms) when we were last collected or now if a collection is
   // in progress.
@@ -417,7 +415,7 @@
   virtual void adjust_pointers();
   // Mark sweep support phase4
   virtual void compact();
-  virtual void post_compact() {ShouldNotReachHere();}
+  virtual void post_compact() { ShouldNotReachHere(); }
 
   // Support for CMS's rescan. In this general form we return a pointer
   // to an abstract object that can be used, based on specific previously
@@ -432,7 +430,7 @@
 
   // Some generations may require some cleanup actions before allowing
   // a verification.
-  virtual void prepare_for_verify() {};
+  virtual void prepare_for_verify() {}
 
   // Accessing "marks".
 
@@ -483,7 +481,7 @@
 
   // Give each generation an opportunity to do clean up for any
   // contributed scratch.
-  virtual void reset_scratch() {};
+  virtual void reset_scratch() {}
 
   // When an older generation has been collected, and perhaps resized,
   // this method will be invoked on all younger generations (from older to
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/shared/memset_with_concurrent_readers.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include <string.h>
+#include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/ostream.hpp"
+
+#if INCLUDE_ALL_GCS
+
+// Unit test
+#ifdef ASSERT
+
+static unsigned line_byte(const char* line, size_t i) {
+  return unsigned(line[i]) & 0xFF;
+}
+
+// Verify memset_with_concurrent_readers mimics memset.
+// We don't attempt to verify the concurrent reader case.
+void test_memset_with_concurrent_readers() {
+  const size_t chunk_size = 8 * BytesPerWord;
+  const unsigned chunk_count = 4;
+  const size_t block_size = (chunk_count + 4) * chunk_size;
+  char block[block_size];
+  char clear_block[block_size];
+  char set_block[block_size];
+
+  // block format:
+  // 0: unused leading chunk
+  // 1: chunk written from start index to end of chunk
+  // ... nchunks fully written chunks
+  // N: chunk written from start of chunk to end index
+  // N+1: unused trailing chunk
+
+  const int clear_value = 0;
+  const int set_value = 0xAC;
+
+  memset(clear_block, clear_value, block_size);
+  memset(set_block, set_value, block_size);
+
+  for (unsigned nchunks = 0; nchunks <= chunk_count; ++nchunks) {
+    for (size_t start = 1; start <= chunk_size; ++start) {
+      for (size_t end = 0; end <= chunk_size; ++end) {
+        size_t set_start = chunk_size + start;
+        size_t set_end = (2 + nchunks) * chunk_size + end;
+        size_t set_size = set_end - set_start;
+
+        memset(block, clear_value, block_size);
+        memset_with_concurrent_readers(&block[set_start], set_value, set_size);
+        bool head_clear = !memcmp(clear_block, block, set_start);
+        bool middle_set = !memcmp(set_block, block + set_start, set_size);
+        bool tail_clear = !memcmp(clear_block, block + set_end, block_size - set_end);
+        if (!(head_clear && middle_set && tail_clear)) {
+          tty->print_cr("*** memset_with_concurrent_readers failed: "
+                        "set start " SIZE_FORMAT ", set end " SIZE_FORMAT,
+                        set_start, set_end);
+          for (unsigned chunk = 0; chunk < (block_size / chunk_size); ++chunk) {
+            for (unsigned line = 0; line < (chunk_size / BytesPerWord); ++line) {
+              const char* lp = &block[chunk * chunk_size + line * BytesPerWord];
+              tty->print_cr("%d,%d: %2x %2x  %2x %2x  %2x %2x  %2x %2x",
+                            chunk, line,
+                            line_byte(lp, 0), line_byte(lp, 1),
+                            line_byte(lp, 2), line_byte(lp, 3),
+                            line_byte(lp, 4), line_byte(lp, 5),
+                            line_byte(lp, 6), line_byte(lp, 7));
+            }
+          }
+          assert(head_clear, "leading byte not clear");
+          assert(middle_set, "memset byte not set");
+          assert(tail_clear, "trailing bye not clear");
+        }
+      }
+    }
+  }
+}
+
+#endif // end unit test
+
+#endif // INCLUDE_ALL_GCS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/shared/memset_with_concurrent_readers.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SRC_SHARE_VM_GC_SHARED_MEMSETWITHCONCURRENTREADERS_HPP
+#define SRC_SHARE_VM_GC_SHARED_MEMSETWITHCONCURRENTREADERS_HPP
+
+#include <stddef.h>
+#include <string.h>
+#include "utilities/macros.hpp"
+
+// Only used by concurrent collectors.
+#if INCLUDE_ALL_GCS
+
+// Fill a block of memory with value, like memset, but with the
+// understanding that there may be concurrent readers of that memory.
+void memset_with_concurrent_readers(void* to, int value, size_t size);
+
+#ifdef TARGET_ARCH_sparc
+
+// SPARC requires special handling.  See SPARC-specific definition.
+
+#else
+// All others just use memset.
+
+inline void memset_with_concurrent_readers(void* to, int value, size_t size) {
+  ::memset(to, value, size);
+}
+
+#endif // End of target dispatch.
+
+#endif // INCLUDE_ALL_GCS
+
+#endif // include guard
--- a/hotspot/src/share/vm/gc/shared/modRefBarrierSet.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/modRefBarrierSet.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -60,7 +60,6 @@
     : BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
   ~ModRefBarrierSet() { }
 
-  virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
 public:
   void write_prim_field(HeapWord* field, size_t bytes,
                         juint val1, juint val2) {}
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -243,10 +243,13 @@
       process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
                                  is_alive, keep_alive, complete_gc, task_executor);
 
-    // Process cleaners, but include them in phantom statistics.  We expect
-    // Cleaner references to be temporary, and don't want to deal with
-    // possible incompatibilities arising from making it more visible.
-    phantom_count +=
+  }
+
+  // Cleaners
+  size_t cleaner_count = 0;
+  {
+    GCTraceTime tt("Cleaners", trace_time, false, gc_timer, gc_id);
+    cleaner_count =
       process_discovered_reflist(_discoveredCleanerRefs, NULL, true,
                                  is_alive, keep_alive, complete_gc, task_executor);
   }
@@ -256,15 +259,17 @@
   // that is not how the JDK1.2 specification is. See #4126360. Native code can
   // thus use JNI weak references to circumvent the phantom references and
   // resurrect a "post-mortem" object.
+  size_t jni_weak_ref_count = 0;
   {
     GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id);
     if (task_executor != NULL) {
       task_executor->set_single_threaded_mode();
     }
-    process_phaseJNI(is_alive, keep_alive, complete_gc);
+    jni_weak_ref_count =
+      process_phaseJNI(is_alive, keep_alive, complete_gc);
   }
 
-  return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count);
+  return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count, cleaner_count, jni_weak_ref_count);
 }
 
 #ifndef PRODUCT
@@ -291,17 +296,17 @@
 }
 #endif
 
-void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
-                                          OopClosure*        keep_alive,
-                                          VoidClosure*       complete_gc) {
-#ifndef PRODUCT
+size_t ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
+                                            OopClosure*        keep_alive,
+                                            VoidClosure*       complete_gc) {
+  DEBUG_ONLY(size_t check_count = count_jni_refs();)
+  size_t count = JNIHandles::weak_oops_do(is_alive, keep_alive);
+  assert(count == check_count, "Counts didn't match");
+  complete_gc->do_void();
   if (PrintGCDetails && PrintReferenceGC) {
-    unsigned int count = count_jni_refs();
-    gclog_or_tty->print(", %u refs", count);
+    gclog_or_tty->print(", " SIZE_FORMAT " refs", count);
   }
-#endif
-  JNIHandles::weak_oops_do(is_alive, keep_alive);
-  complete_gc->do_void();
+  return count;
 }
 
 
@@ -941,9 +946,10 @@
       list = &_discoveredCleanerRefs[id];
       break;
     case REF_NONE:
+    case REF_JNI:
       // we should not reach here if we are an InstanceRefKlass
     default:
-      ShouldNotReachHere();
+      guarantee(false, err_msg("rt should not be %d", rt));
   }
   if (TraceReferenceGC && PrintGCDetails) {
     gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
@@ -1059,7 +1065,7 @@
     // can mark through them now, rather than delaying that
     // to the reference-processing phase. Since all current
     // time-stamp policies advance the soft-ref clock only
-    // at a major collection cycle, this is always currently
+    // at a full collection cycle, this is always currently
     // accurate.
     if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) {
       return false;
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -247,7 +247,7 @@
   DiscoveredList* _discoveredCleanerRefs;
 
  public:
-  static int number_of_subclasses_of_ref() { return (REF_CLEANER - REF_OTHER); }
+  static int number_of_subclasses_of_ref() { return REF_LISTS_COUNT; }
 
   uint num_q()                             { return _num_q; }
   uint max_num_q()                         { return _max_num_q; }
@@ -271,9 +271,9 @@
                                     VoidClosure*                 complete_gc,
                                     AbstractRefProcTaskExecutor* task_executor);
 
-  void process_phaseJNI(BoolObjectClosure* is_alive,
-                        OopClosure*        keep_alive,
-                        VoidClosure*       complete_gc);
+  size_t process_phaseJNI(BoolObjectClosure* is_alive,
+                          OopClosure*        keep_alive,
+                          VoidClosure*       complete_gc);
 
   // Work methods used by the method process_discovered_reflist
   // Phase1: keep alive all those referents that are otherwise
--- a/hotspot/src/share/vm/gc/shared/referenceProcessorStats.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/referenceProcessorStats.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -36,22 +36,30 @@
   size_t _weak_count;
   size_t _final_count;
   size_t _phantom_count;
+  size_t _cleaner_count;
+  size_t _jni_weak_ref_count;
 
  public:
   ReferenceProcessorStats() :
     _soft_count(0),
     _weak_count(0),
     _final_count(0),
-    _phantom_count(0) {}
+    _phantom_count(0),
+    _cleaner_count(0),
+    _jni_weak_ref_count(0) {}
 
   ReferenceProcessorStats(size_t soft_count,
                           size_t weak_count,
                           size_t final_count,
-                          size_t phantom_count) :
+                          size_t phantom_count,
+                          size_t cleaner_count,
+                          size_t jni_weak_ref_count) :
     _soft_count(soft_count),
     _weak_count(weak_count),
     _final_count(final_count),
-    _phantom_count(phantom_count)
+    _phantom_count(phantom_count),
+    _cleaner_count(cleaner_count),
+    _jni_weak_ref_count(jni_weak_ref_count)
   {}
 
   size_t soft_count() const {
@@ -69,5 +77,13 @@
   size_t phantom_count() const {
     return _phantom_count;
   }
+
+  size_t cleaner_count() const {
+    return _cleaner_count;
+  }
+
+  size_t jni_weak_ref_count() const {
+    return _jni_weak_ref_count;
+  }
 };
 #endif
--- a/hotspot/src/share/vm/gc/shared/space.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/space.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -237,7 +237,7 @@
                                                    HeapWord* bottom,    \
                                                    HeapWord* top,       \
                                                    ClosureType* cl) {   \
-  bottom += oop(bottom)->oop_iterate(cl, mr);                           \
+  bottom += oop(bottom)->oop_iterate_size(cl, mr);                      \
   if (bottom < top) {                                                   \
     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
     while (next_obj < top) {                                            \
@@ -508,7 +508,7 @@
     HeapWord* t = mr.end();                                                 \
     while (obj_addr < t) {                                                  \
       assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
-      obj_addr += oop(obj_addr)->oop_iterate(blk);                          \
+      obj_addr += oop(obj_addr)->oop_iterate_size(blk);                     \
     }                                                                       \
   }
 
@@ -523,7 +523,7 @@
   HeapWord* t = top();
   // Could call objects iterate, but this is easier.
   while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(blk);
+    obj_addr += oop(obj_addr)->oop_iterate_size(blk);
   }
 }
 
@@ -578,7 +578,7 @@
       Prefetch::write(p, interval);                                       \
       debug_only(HeapWord* prev = p);                                     \
       oop m = oop(p);                                                     \
-      p += m->oop_iterate(blk);                                           \
+      p += m->oop_iterate_size(blk);                                      \
     }                                                                     \
   } while (t < top());                                                    \
                                                                           \
--- a/hotspot/src/share/vm/gc/shared/specialized_oop_closures.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/specialized_oop_closures.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -42,6 +42,8 @@
 class ScanClosure;
 class FastScanClosure;
 class FilteringClosure;
+// MarkSweep
+class MarkAndPushClosure;
 // ParNew
 class ParScanWithBarrierClosure;
 class ParScanWithoutBarrierClosure;
@@ -87,6 +89,9 @@
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f)             \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
 
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)      \
+  f(MarkAndPushClosure,_nv)
+
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)     \
   f(MarkRefsIntoAndScanClosure,_nv)                     \
@@ -101,10 +106,12 @@
 
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)            \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)           \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)
 #else  // INCLUDE_ALL_GCS
-#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)
 #endif // INCLUDE_ALL_GCS
 
 
--- a/hotspot/src/share/vm/gc/shared/taskqueue.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/taskqueue.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -258,8 +258,8 @@
 
 #ifdef ASSERT
 bool ObjArrayTask::is_valid() const {
-  return _obj != NULL && _obj->is_objArray() && _index > 0 &&
-    _index < objArrayOop(_obj)->length();
+  return _obj != NULL && _obj->is_objArray() && _index >= 0 &&
+      _index < objArrayOop(_obj)->length();
 }
 #endif // ASSERT
 
--- a/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -213,15 +213,18 @@
   size_t                   _size;     // size of object to be allocated
   Metaspace::MetadataType  _mdtype;
   ClassLoaderData*         _loader_data;
+
  public:
   VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
-                                  size_t size, Metaspace::MetadataType mdtype,
+                                  size_t size,
+                                  Metaspace::MetadataType mdtype,
                                   uint gc_count_before,
                                   uint full_gc_count_before,
                                   GCCause::Cause gc_cause)
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
   }
+
   virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   virtual void doit();
   MetaWord* result() const       { return _result; }
--- a/hotspot/src/share/vm/memory/filemap.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -707,12 +707,16 @@
                                     addr, string_ranges[i].byte_size(), si->_read_only,
                                     si->_allow_exec);
         if (base == NULL || base != addr) {
+          // dealloc the string regions from java heap
+          dealloc_string_regions();
           fail_continue("Unable to map shared string space at required address.");
           return false;
         }
       }
 
       if (!verify_string_regions()) {
+        // dealloc the string regions from java heap
+        dealloc_string_regions();
         fail_continue("Shared string regions are corrupt");
         return false;
       }
@@ -745,12 +749,14 @@
 }
 
 void FileMapInfo::fixup_string_regions() {
+#if INCLUDE_ALL_GCS
   // If any string regions were found, call the fill routine to make them parseable.
   // Note that string_ranges may be non-NULL even if no ranges were found.
   if (num_ranges != 0) {
     assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
     G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
   }
+#endif
 }
 
 bool FileMapInfo::verify_region_checksum(int i) {
@@ -793,20 +799,14 @@
   }
 }
 
-void FileMapInfo::unmap_string_regions() {
-  for (int i = MetaspaceShared::first_string;
-           i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
-    struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
-    size_t used = si->_used;
-    if (used > 0) {
-      size_t size = align_size_up(used, os::vm_allocation_granularity());
-      char* addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
-                                             (narrowOop)si->_addr._offset));
-      if (!os::unmap_memory(addr, size)) {
-        fail_stop("Unable to unmap shared space.");
-      }
-    }
+// dealloc the archived string region from java heap
+void FileMapInfo::dealloc_string_regions() {
+#if INCLUDE_ALL_GCS
+  if (num_ranges > 0) {
+    assert(string_ranges != NULL, "Null string_ranges array with non-zero count");
+    G1CollectedHeap::heap()->dealloc_archive_regions(string_ranges, num_ranges);
   }
+#endif
 }
 
 void FileMapInfo::assert_mark(bool check) {
@@ -967,7 +967,9 @@
         map_info->_header->_space[i]._addr._base = NULL;
       }
     }
-    map_info->unmap_string_regions();
+    // Dealloc the string regions only without unmapping. The string regions are part
+    // of the java heap. Unmapping of the heap regions are managed by GC.
+    map_info->dealloc_string_regions();
   } else if (DumpSharedSpaces) {
     fail_stop("%s", msg);
   }
--- a/hotspot/src/share/vm/memory/filemap.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -208,7 +208,7 @@
   bool  verify_string_regions();
   void  fixup_string_regions();
   void  unmap_region(int i);
-  void  unmap_string_regions();
+  void  dealloc_string_regions();
   bool  verify_region_checksum(int i);
   void  close();
   bool  is_open() { return _file_open; }
--- a/hotspot/src/share/vm/memory/iterator.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -61,7 +61,7 @@
   //
   // 1) do_klass on the header klass pointer.
   // 2) do_klass on the klass pointer in the mirrors.
-  // 3) do_class_loader_data on the class loader data in class loaders.
+  // 3) do_cld   on the class loader data in class loaders.
   //
   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
   // to be updated together, or else the devirtualization will break.
@@ -71,13 +71,14 @@
   // ExtendedOopClosures that don't need to walk the metadata.
   // Currently, only CMS and G1 need these.
 
+  bool do_metadata_nv()      { return false; }
   virtual bool do_metadata() { return do_metadata_nv(); }
-  bool do_metadata_nv()      { return false; }
 
-  virtual void do_klass(Klass* k)   { do_klass_nv(k); }
-  void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
+  void do_klass_nv(Klass* k)      { ShouldNotReachHere(); }
+  virtual void do_klass(Klass* k) { do_klass_nv(k); }
 
-  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
+  void do_cld_nv(ClassLoaderData* cld)      { ShouldNotReachHere(); }
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 
   // True iff this closure may be safely applied more than once to an oop
   // location without an intervening "major reset" (like the end of a GC).
@@ -180,13 +181,14 @@
     _klass_closure.initialize(this);
   }
 
-  virtual bool do_metadata()    { return do_metadata_nv(); }
-  inline  bool do_metadata_nv() { return true; }
+  bool do_metadata_nv()      { return true; }
+  virtual bool do_metadata() { return do_metadata_nv(); }
 
-  virtual void do_klass(Klass* k);
   void do_klass_nv(Klass* k);
+  virtual void do_klass(Klass* k) { do_klass_nv(k); }
 
-  virtual void do_class_loader_data(ClassLoaderData* cld);
+  void do_cld_nv(ClassLoaderData* cld);
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 };
 
 // ObjectClosure is used for iterating through an object space
@@ -370,6 +372,7 @@
  public:
   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
+  template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 };
 
@@ -378,6 +381,7 @@
  public:
   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
+  template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 };
 
--- a/hotspot/src/share/vm/memory/iterator.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/memory/iterator.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -36,7 +36,7 @@
 #include "oops/typeArrayKlass.inline.hpp"
 #include "utilities/debug.hpp"
 
-inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
+inline void MetadataAwareOopClosure::do_cld_nv(ClassLoaderData* cld) {
   assert(_klass_closure._oop_closure == this, "Must be");
 
   bool claim = true;  // Must claim the class loader data before processing.
@@ -45,11 +45,9 @@
 
 inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
   ClassLoaderData* cld = k->class_loader_data();
-  do_class_loader_data(cld);
+  do_cld_nv(cld);
 }
 
-inline void MetadataAwareOopClosure::do_klass(Klass* k)       { do_klass_nv(k); }
-
 #ifdef ASSERT
 // This verification is applied to all visited oops.
 // The closures can turn is off by overriding should_verify_oops().
@@ -78,6 +76,10 @@
   closure->do_klass_nv(k);
 }
 template <class OopClosureType>
+void Devirtualizer<true>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
+  closure->do_cld_nv(cld);
+}
+template <class OopClosureType>
 inline bool Devirtualizer<true>::do_metadata(OopClosureType* closure) {
   // Make sure the non-virtual and the virtual versions match.
   assert(closure->do_metadata_nv() == closure->do_metadata(), "Inconsistency in do_metadata");
@@ -96,6 +98,10 @@
   closure->do_klass(k);
 }
 template <class OopClosureType>
+void Devirtualizer<false>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
+  closure->do_cld(cld);
+}
+template <class OopClosureType>
 bool Devirtualizer<false>::do_metadata(OopClosureType* closure) {
   return closure->do_metadata();
 }
--- a/hotspot/src/share/vm/memory/referenceType.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/memory/referenceType.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -32,11 +32,15 @@
 enum ReferenceType {
   REF_NONE,      // Regular class
   REF_OTHER,     // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
+  ///////////////// Only the types below have their own discovered lists
   REF_SOFT,      // Subclass of java/lang/ref/SoftReference
   REF_WEAK,      // Subclass of java/lang/ref/WeakReference
   REF_FINAL,     // Subclass of java/lang/ref/FinalReference
   REF_PHANTOM,   // Subclass of java/lang/ref/PhantomReference
-  REF_CLEANER    // Subclass of sun/misc/Cleaner
+  REF_CLEANER,   // Subclass of sun/misc/Cleaner
+  ///////////////// Only the types in the above range have their own discovered lists
+  REF_JNI,        // JNI weak refs
+  REF_LISTS_COUNT = REF_CLEANER - REF_OTHER  // Number of discovered lists
 };
 
 #endif // SHARE_VM_MEMORY_REFERENCETYPE_HPP
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -147,30 +147,30 @@
 // Array oop iteration macros for declarations.
 // Used to generate the declarations in the *ArrayKlass header files.
 
-#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                  \
-  int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
+#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                   \
+  void oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
 
 #if INCLUDE_ALL_GCS
 // Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below.
-#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)           \
-  int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
+#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)            \
+  void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
 
 // Array oop iteration macros for definitions.
 // Used to generate the definitions in the *ArrayKlass.inline.hpp files.
 
-#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix)                                 \
-                                                                                                         \
-int KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) {  \
-  return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                        \
+#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix)                                  \
+                                                                                                          \
+void KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) {  \
+  oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                                \
 }
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)          \
-int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
-  /* No reverse implementation ATM. */                                                   \
-  return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                          \
+#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)           \
+void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
+  /* No reverse implementation ATM. */                                                    \
+  oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                                  \
 }
 #else
 #define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)
--- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -51,7 +51,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -71,19 +70,19 @@
   // Forward iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
 #if INCLUDE_ALL_GCS
   // Reverse iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif
 
   // Bounded range iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
  public:
 
--- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
 
 #include "classfile/javaClasses.hpp"
+#include "memory/iterator.inline.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -34,48 +35,42 @@
 #include "utilities/macros.hpp"
 
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
+inline void InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
     // cld can be null if we have a non-registered class loader.
     if (cld != NULL) {
-      closure->do_class_loader_data(cld);
+      Devirtualizer<nv>::do_cld(closure, cld);
     }
   }
-
-  return size;
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
-  int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
+inline void InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   assert(!Devirtualizer<nv>::do_metadata(closure),
       "Code to handle metadata is not implemented");
-
-  return size;
 }
 #endif // INCLUDE_ALL_GCS
 
 
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
+inline void InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     if (mr.contains(obj)) {
       ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
       // cld can be null if we have a non-registered class loader.
       if (cld != NULL) {
-        closure->do_class_loader_data(cld);
+        Devirtualizer<nv>::do_cld(closure, cld);
       }
     }
   }
-
-  return size;
 }
 
 #define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1014,7 +1014,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
--- a/hotspot/src/share/vm/oops/instanceKlass.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceKlass.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -38,6 +38,8 @@
 // as the previous macro based implementation.
 #ifdef TARGET_COMPILER_visCPP
 #define INLINE __forceinline
+#elif defined(TARGET_COMPILER_sparcWorks)
+#define INLINE __attribute__((always_inline))
 #else
 #define INLINE inline
 #endif
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -91,7 +91,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -121,21 +120,21 @@
   // Forward iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
 
   // Reverse iteration
 #if INCLUDE_ALL_GCS
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif
 
 
   // Bounded range iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Iterate over the static fields.
   template <bool nv, class OopClosureType>
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -53,30 +53,40 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Klass* klass = java_lang_Class::as_Klass(obj);
     // We'll get NULL for primitive mirrors.
     if (klass != NULL) {
-      Devirtualizer<nv>::do_klass(closure, klass);
+      if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
+        // An anonymous class doesn't have its own class loader, so when handling
+        // the java mirror for an anonymous class we need to make sure its class
+        // loader data is claimed, this is done by calling do_cld explicitly.
+        // For non-anonymous classes the call to do_cld is made when the class
+        // loader itself is handled.
+        Devirtualizer<nv>::do_cld(closure, klass->class_loader_data());
+      } else {
+        Devirtualizer<nv>::do_klass(closure, klass);
+      }
+    } else {
+      // If klass is NULL then this a mirror for a primitive type.
+      // We don't have to follow them, since they are handled as strong
+      // roots in Universe::oops_do.
+      assert(java_lang_Class::is_primitive(obj), "Sanity check");
     }
   }
 
   oop_oop_iterate_statics<nv>(obj, closure);
-
-  return oop_size(obj);
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
-
-  return oop_size(obj);
 }
 #endif
 
@@ -115,7 +125,7 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+void InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
   InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
@@ -129,8 +139,6 @@
   }
 
   oop_oop_iterate_statics_bounded<nv>(obj, closure, mr);
-
-  return oop_size(obj);
 }
 
 #define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
--- a/hotspot/src/share/vm/oops/instanceRefKlass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -67,7 +67,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -88,19 +87,19 @@
 private:
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Reverse iteration
 #if INCLUDE_ALL_GCS
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
   // Bounded range iteration
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Reference processing part of the iterators.
 
--- a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -106,37 +106,27 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
+void InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
-
-  return size;
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::
-oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
+void InstanceRefKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
-
-  return size;
 }
 #endif // INCLUDE_ALL_GCS
 
 
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
+void InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   oop_oop_iterate_ref_processing_bounded<nv>(obj, closure, mr);
-
-  return size;
 }
 
 // Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
--- a/hotspot/src/share/vm/oops/klass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/klass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -572,7 +572,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  virtual void oop_ms_follow_contents(oop obj) = 0;
   virtual int  oop_ms_adjust_pointers(oop obj) = 0;
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -584,17 +583,17 @@
 
   // Iterators specialized to particular subtypes
   // of ExtendedOopClosure, to avoid closure virtual calls.
-#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                          \
-  virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0;                        \
-  /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */                    \
-  virtual int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
+#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                           \
+  virtual void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0;                        \
+  /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */                     \
+  virtual void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
 
 #if INCLUDE_ALL_GCS
-#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                    \
-  virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
+#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                     \
+  virtual void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
@@ -661,35 +660,35 @@
 // Used to generate declarations in the *Klass header files.
 
 #define OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                    \
-  int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
-  int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
+  void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
+  void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)              \
-  int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
+#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)               \
+  void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
 
 // Oop iteration macros for definitions.
 // Used to generate definitions in the *Klass.inline.hpp files.
 
-#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix)             \
-int KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {  \
-  return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                \
+#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix)              \
+void KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {  \
+  oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                        \
 }
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)             \
-int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
-  return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                  \
+#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)              \
+void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
+  oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                          \
 }
 #else
 #define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)
 #endif
 
-#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix)                           \
-int KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) {  \
-  return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr);                          \
+#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix)                            \
+void KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) {  \
+  oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr);                                  \
 }
 
 #endif // SHARE_VM_OOPS_KLASS_HPP
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -105,7 +105,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -125,15 +124,15 @@
 
   // Iterate over oop elements and metadata.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Iterate over oop elements within mr, and metadata.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Iterate over oop elements with indices within [start, end), and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
+  inline void oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
 
   // Iterate over oop elements within [start, end), and metadata.
   // Specialized for [T = oop] or [T = narrowOop].
--- a/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -85,46 +85,31 @@
 }
 
 template <bool nv, typename OopClosureType>
-int ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+void ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
   assert (obj->is_array(), "obj must be array");
   objArrayOop a = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call.
-  int size = a->object_size();
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Devirtualizer<nv>::do_klass(closure, obj->klass());
   }
 
   oop_oop_iterate_elements<nv>(a, closure);
-
-  return size;
 }
 
 template <bool nv, typename OopClosureType>
-int ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+void ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
   assert(obj->is_array(), "obj must be array");
   objArrayOop a  = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call
-  int size = a->object_size();
-
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Devirtualizer<nv>::do_klass(closure, a->klass());
   }
 
   oop_oop_iterate_elements_bounded<nv>(a, closure, mr);
-
-  return size;
 }
 
 template <bool nv, typename T, class OopClosureType>
 void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) {
-  if (Devirtualizer<nv>::do_metadata(closure)) {
-    Devirtualizer<nv>::do_klass(closure, a->klass());
-  }
-
   T* low = start == 0 ? cast_from_oop<T*>(a) : a->obj_at_addr<T>(start);
   T* high = (T*)a->base() + end;
 
@@ -134,21 +119,15 @@
 // Like oop_oop_iterate but only iterates over a specified range and only used
 // for objArrayOops.
 template <bool nv, class OopClosureType>
-int ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) {
+void ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) {
   assert(obj->is_array(), "obj must be array");
   objArrayOop a  = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call
-  int size = a->object_size();
-
   if (UseCompressedOops) {
     oop_oop_iterate_range_specialized<nv, narrowOop>(a, closure, start, end);
   } else {
     oop_oop_iterate_range_specialized<nv, oop>(a, closure, start, end);
   }
-
-  return size;
 }
 
 #define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)    \
--- a/hotspot/src/share/vm/oops/objArrayOop.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/objArrayOop.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -46,8 +46,8 @@
 
 #define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                    \
                                                                                    \
-int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) {  \
-  return ((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
+void objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) {  \
+  ((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
 }
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN)
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -106,7 +106,7 @@
 
   // special iterators for index ranges, returns size of object
 #define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix)     \
-  int oop_iterate_range(OopClosureType* blk, int start, int end);
+  void oop_iterate_range(OopClosureType* blk, int start, int end);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayOop_OOP_ITERATE_DECL)
--- a/hotspot/src/share/vm/oops/oop.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/oop.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -330,7 +330,6 @@
   // Garbage Collection support
 
   // Mark Sweep
-  void ms_follow_contents();
   // Adjust all pointers in this object to point at it's forwarded location and
   // return the size of this oop.  This is used by the MarkSweep collector.
   int  ms_adjust_pointers();
@@ -344,17 +343,25 @@
 
 
   // iterators, returns size of object
-#define OOP_ITERATE_DECL(OopClosureType, nv_suffix)                      \
-  int oop_iterate(OopClosureType* blk);                                  \
-  int oop_iterate(OopClosureType* blk, MemRegion mr);  // Only in mr.
+#define OOP_ITERATE_DECL(OopClosureType, nv_suffix)                     \
+  void oop_iterate(OopClosureType* blk);                                \
+  void oop_iterate(OopClosureType* blk, MemRegion mr);  // Only in mr.
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DECL)
 
+#define OOP_ITERATE_SIZE_DECL(OopClosureType, nv_suffix)                    \
+  int oop_iterate_size(OopClosureType* blk);                                \
+  int oop_iterate_size(OopClosureType* blk, MemRegion mr);  // Only in mr.
+
+  ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_SIZE_DECL)
+  ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_SIZE_DECL)
+
+
 #if INCLUDE_ALL_GCS
 
-#define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)            \
-  int oop_iterate_backwards(OopClosureType* blk);
+#define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)  \
+  void oop_iterate_backwards(OopClosureType* blk);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL)
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -695,10 +695,6 @@
   }
 }
 
-inline void oopDesc::ms_follow_contents() {
-  klass()->oop_ms_follow_contents(this);
-}
-
 inline int oopDesc::ms_adjust_pointers() {
   debug_only(int check_size = size());
   int s = klass()->oop_ms_adjust_pointers(this);
@@ -730,34 +726,50 @@
 }
 #endif
 
-#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                   \
-                                                                      \
-inline int oopDesc::oop_iterate(OopClosureType* blk) {                \
-  return klass()->oop_oop_iterate##nv_suffix(this, blk);              \
-}                                                                     \
-                                                                      \
-inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {  \
-  return klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);  \
+#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                    \
+                                                                       \
+inline void oopDesc::oop_iterate(OopClosureType* blk) {                \
+  klass()->oop_oop_iterate##nv_suffix(this, blk);                      \
+}                                                                      \
+                                                                       \
+inline void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {  \
+  klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);          \
 }
 
+#define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)               \
+                                                                       \
+inline int oopDesc::oop_iterate_size(OopClosureType* blk) {            \
+  Klass* k = klass();                                                  \
+  int size = size_given_klass(k);                                      \
+  k->oop_oop_iterate##nv_suffix(this, blk);                            \
+  return size;                                                         \
+}                                                                      \
+                                                                       \
+inline int oopDesc::oop_iterate_size(OopClosureType* blk,              \
+                                     MemRegion mr) {                   \
+  Klass* k = klass();                                                  \
+  int size = size_given_klass(k);                                      \
+  k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);                \
+  return size;                                                         \
+}
 
 inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
   // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
   // the do_oop calls, but turns off all other features in ExtendedOopClosure.
   NoHeaderExtendedOopClosure cl(blk);
-  return oop_iterate(&cl);
+  return oop_iterate_size(&cl);
 }
 
 inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
   NoHeaderExtendedOopClosure cl(blk);
-  return oop_iterate(&cl, mr);
+  return oop_iterate_size(&cl, mr);
 }
 
 #if INCLUDE_ALL_GCS
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)       \
                                                                     \
-inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {    \
-  return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);  \
+inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) {   \
+  klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);         \
 }
 #else
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
@@ -765,6 +777,7 @@
 
 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix)  \
   OOP_ITERATE_DEFN(OopClosureType, nv_suffix)               \
+  OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)          \
   OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
--- a/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -75,7 +75,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -90,15 +89,15 @@
 
  private:
   // The implementation used by all oop_oop_iterate functions in TypeArrayKlasses.
-  inline int oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
+  inline void oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
 
   // Wraps oop_oop_iterate_impl to conform to macros.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Wraps oop_oop_iterate_impl to conform to macros.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
  public:
 
--- a/hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -33,22 +33,20 @@
 
 class ExtendedOopClosure;
 
-inline int TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) {
+inline void TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) {
   assert(obj->is_typeArray(),"must be a type array");
-  typeArrayOop t = typeArrayOop(obj);
   // Performance tweak: We skip iterating over the klass pointer since we
   // know that Universe::TypeArrayKlass never moves.
-  return t->object_size();
 }
 
 template <bool nv, typename OopClosureType>
-int TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  return oop_oop_iterate_impl(obj, closure);
+void TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  oop_oop_iterate_impl(obj, closure);
 }
 
 template <bool nv, typename OopClosureType>
-int TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  return oop_oop_iterate_impl(obj, closure);
+void TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  oop_oop_iterate_impl(obj, closure);
 }
 
 #define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)    \
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1522,7 +1522,7 @@
       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
       break;
 
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
     case BarrierSet::ModRef:
       break;
@@ -1539,7 +1539,7 @@
     case BarrierSet::G1SATBCTLogging:
       return true; // Can move it if no safepoint
 
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
     case BarrierSet::ModRef:
       return true; // There is no pre-barrier
@@ -1565,7 +1565,7 @@
       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
       break;
 
-    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableForRS:
     case BarrierSet::CardTableExtension:
       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
       break;
@@ -3791,7 +3791,7 @@
   Node* cast = __ CastPX(__ ctrl(), adr);
 
   // Divide by card size
-  assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
+  assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
          "Only one we handle so far.");
   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
 
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -57,8 +57,6 @@
 # include "classfile/classFileParser.hpp"
 # include "classfile/classFileStream.hpp"
 # include "classfile/classLoader.hpp"
-# include "classfile/imageDecompressor.hpp"
-# include "classfile/imageFile.hpp"
 # include "classfile/javaClasses.hpp"
 # include "classfile/symbolTable.hpp"
 # include "classfile/systemDictionary.hpp"
@@ -232,7 +230,6 @@
 # include "utilities/constantTag.hpp"
 # include "utilities/copy.hpp"
 # include "utilities/debug.hpp"
-# include "utilities/endian.hpp"
 # include "utilities/exceptions.hpp"
 # include "utilities/globalDefinitions.hpp"
 # include "utilities/growableArray.hpp"
--- a/hotspot/src/share/vm/prims/jni.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jni.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -3869,6 +3869,7 @@
 void TestBufferingOopClosure_test();
 void TestCodeCacheRemSet_test();
 void FreeRegionList_test();
+void test_memset_with_concurrent_readers();
 #endif
 
 void execute_internal_vm_tests() {
@@ -3910,6 +3911,7 @@
     if (UseG1GC) {
       run_unit_test(FreeRegionList_test());
     }
+    run_unit_test(test_memset_with_concurrent_readers());
 #endif
     tty->print_cr("All internal VM tests passed");
   }
--- a/hotspot/src/share/vm/prims/jvm.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -24,8 +24,6 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoader.hpp"
-#include "classfile/imageDecompressor.hpp"
-#include "classfile/imageFile.hpp"
 #include "classfile/javaAssertions.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "classfile/stringTable.hpp"
@@ -71,7 +69,6 @@
 #include "utilities/copy.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
-#include "utilities/endian.hpp"
 #include "utilities/events.hpp"
 #include "utilities/histogram.hpp"
 #include "utilities/macros.hpp"
@@ -3668,244 +3665,3 @@
   info->is_attachable = AttachListener::is_attach_supported();
 }
 JVM_END
-
-// jdk.internal.jimage /////////////////////////////////////////////////////////
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-
-// Java entry to open an image file for sharing.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jlong,
-JVM_ImageOpen(JNIEnv *env, const char *nativePath, jboolean big_endian)) {
-  JVMWrapper("JVM_ImageOpen");
-  // Open image file for reading.
-  ImageFileReader* reader = ImageFileReader::open(nativePath, big_endian != JNI_FALSE);
-  // Return image ID as a jlong.
-  return ImageFileReader::readerToID(reader);
-}
-JVM_END
-
-// Java entry for closing a shared image file.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(void,
-JVM_ImageClose(JNIEnv *env, jlong id)) {
-  JVMWrapper("JVM_ImageClose");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // If valid reader the close.
-  if (reader != NULL) {
-    ImageFileReader::close(reader);
-  }
-}
-JVM_END
-
-// Java entry for accessing the base address of the image index.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jlong,
-JVM_ImageGetIndexAddress(JNIEnv *env, jlong id)) {
-  JVMWrapper("JVM_ImageGetIndexAddress");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // If valid reader return index base address (as jlong) else zero.
-  return  reader != NULL ? (jlong)reader->get_index_address() : 0L;
-}
-JVM_END
-
-// Java entry for accessing the base address of the image data.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jlong,
-JVM_ImageGetDataAddress(JNIEnv *env, jlong id)) {
-  JVMWrapper("JVM_ImageGetDataAddress");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // If valid reader return data base address (as jlong) else zero.
-  return MemoryMapImage && reader != NULL ? (jlong)reader->get_data_address() : 0L;
-}
-JVM_END
-
-// Java entry for reading an uncompressed resource from the image.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jboolean,
-JVM_ImageRead(JNIEnv *env, jlong id, jlong offset,
-              unsigned char* uncompressedAddress, jlong uncompressed_size)) {
-  JVMWrapper("JVM_ImageRead");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);\
-  // If not a valid reader the fail the read.
-  if (reader == NULL) return false;
-  // Get the file offset of resource data.
-  u8 file_offset = reader->get_index_size() + offset;
-  // Check validity of arguments.
-  if (offset < 0 ||
-      uncompressed_size < 0 ||
-      file_offset > reader->file_size() - uncompressed_size) {
-      return false;
-  }
-  // Read file content into buffer.
-  return (jboolean)reader->read_at((u1*)uncompressedAddress, uncompressed_size,
-                                   file_offset);
-}
-JVM_END
-
-// Java entry for reading a compressed resource from the image.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jboolean,
-JVM_ImageReadCompressed(JNIEnv *env,
-                    jlong id, jlong offset,
-                    unsigned char* compressedAddress, jlong compressed_size,
-                    unsigned char* uncompressedAddress, jlong uncompressed_size)) {
-  JVMWrapper("JVM_ImageReadCompressed");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // If not a valid reader the fail the read.
-  if (reader == NULL) return false;
-  // Get the file offset of resource data.
-  u8 file_offset = reader->get_index_size() + offset;
-  // Check validity of arguments.
-  if (offset < 0 ||
-      compressed_size < 0 ||
-      uncompressed_size < 0 ||
-      file_offset > reader->file_size() - compressed_size) {
-      return false;
-  }
-
-  // Read file content into buffer.
-  bool is_read = reader->read_at(compressedAddress, compressed_size,
-                                 file_offset);
-  // If successfully read then decompress.
-  if (is_read) {
-    const ImageStrings strings = reader->get_strings();
-    ImageDecompressor::decompress_resource(compressedAddress, uncompressedAddress,
-    uncompressed_size, &strings, true);
-  }
-  return (jboolean)is_read;
-}
-JVM_END
-
-// Java entry for retrieving UTF-8 bytes from image string table.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(const char*, JVM_ImageGetStringBytes(JNIEnv *env, jlong id, jint offset)) {
-  JVMWrapper("JVM_ImageGetStringBytes");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // Fail if not valid reader.
-  if (reader == NULL) return NULL;
-  // Manage image string table.
-  ImageStrings strings = reader->get_strings();
-  // Retrieve string adrress from table.
-  const char* data = strings.get(offset);
-  return data;
-}
-JVM_END
-
-// Utility function to copy location information into a jlong array.
-// WARNING: This function is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-static void image_expand_location(JNIEnv *env, jlong* rawAttributes, ImageLocation& location) {
-  // Copy attributes from location.
-  for (int kind = ImageLocation::ATTRIBUTE_END + 1;
-           kind < ImageLocation::ATTRIBUTE_COUNT;
-           kind++) {
-    rawAttributes[kind] = location.get_attribute(kind);
-  }
-}
-
-// Java entry for retrieving location attributes for attribute offset.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jlong*, JVM_ImageGetAttributes(JNIEnv *env, jlong* rawAttributes, jlong id, jint offset)) {
-  JVMWrapper("JVM_ImageGetAttributes");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // Fail if not valid reader.
-  if (reader == NULL) return NULL;
-  // Retrieve first byte address of resource's location attribute stream.
-  u1* data = reader->get_location_offset_data(offset);
-  // Fail if not valid offset.
-  if (data == NULL) return NULL;
-  // Expand stream into array.
-  ImageLocation location(data);
-  image_expand_location(env, rawAttributes, location);
-  return rawAttributes;
-}
-JVM_END
-
-// Java entry for retrieving location attributes count for attribute offset.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jsize, JVM_ImageGetAttributesCount(JNIEnv *env)) {
-  JVMWrapper("JVM_ImageGetAttributesCount");
-  return ImageLocation::ATTRIBUTE_COUNT;
-}
-JVM_END
-
-// Java entry for retrieving location attributes for named resource.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jlong*,
-JVM_ImageFindAttributes(JNIEnv *env, jlong* rawAttributes, jbyte* rawBytes, jsize size, jlong id)) {
-  JVMWrapper("JVM_ImageFindAttributes");
-  // Mark for temporary buffers.
-  ResourceMark rm;
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // Fail if not valid reader.
-  if (reader == NULL) return NULL;
-  // Convert byte array to a cstring.
-  char* path = NEW_RESOURCE_ARRAY(char, size + 1);
-  memcpy(path, rawBytes, size);
-  path[size] = '\0';
-  // Locate resource location data.
-  ImageLocation location;
-  bool found = reader->find_location(path, location);
-  // Resource not found.
-  if (!found) return NULL;
-  // Expand stream into array.
-  image_expand_location(env, rawAttributes, location);
-  return rawAttributes;
-}
-JVM_END
-
-// Java entry for retrieving all the attribute stream offsets from an image.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(jint*, JVM_ImageAttributeOffsets(JNIEnv *env, jint* rawOffsets, unsigned int length, jlong id)) {
-  JVMWrapper("JVM_ImageAttributeOffsets");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // Fail if not valid reader.
-  if (reader == NULL) return NULL;
-  // Determine endian for reader.
-  Endian* endian = reader->endian();
-  // Get base address of attribute stream offsets table.
-  u4* offsets_table = reader->offsets_table();
-  // Allocate int array result.
-  // Copy values to result (converting endian.)
-  for (u4 i = 0; i < length; i++) {
-    rawOffsets[i] = endian->get(offsets_table[i]);
-  }
-  return rawOffsets;
-}
-JVM_END
-
-// Java entry for retrieving all the attribute stream offsets length from an image.
-// WARNING: This API is experimental and temporary during JDK 9 development
-// cycle. It will not be supported in the eventual JDK 9 release.
-JVM_ENTRY(unsigned int, JVM_ImageAttributeOffsetsLength(JNIEnv *env, jlong id)) {
-  JVMWrapper("JVM_ImageAttributeOffsetsLength");
-  // Convert image ID to image reader structure.
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  // Fail if not valid reader.
-  if (reader == NULL) return 0;
-  // Get perfect hash table length.
-  u4 length = reader->table_length();
-  return (jint) length;
-}
-JVM_END
--- a/hotspot/src/share/vm/prims/jvm.h	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvm.h	Tue Sep 08 16:10:37 2015 +0200
@@ -571,52 +571,6 @@
 JNIEXPORT jboolean JNICALL
 JVM_SupportsCX8(void);
 
-/*
- * jdk.internal.jimage
- * WARNING: This API is experimental and temporary during JDK 9 development
- * cycle. It will not be supported in the eventual JDK 9 release.
- */
-
-JNIEXPORT jlong JNICALL
-JVM_ImageOpen(JNIEnv *env, const char *nativePath, jboolean big_endian);
-
-JNIEXPORT void JNICALL
-JVM_ImageClose(JNIEnv *env, jlong id);
-
-JNIEXPORT jlong JNICALL
-JVM_ImageGetIndexAddress(JNIEnv *env, jlong id);
-
-JNIEXPORT jlong JNICALL
-JVM_ImageGetDataAddress(JNIEnv *env,jlong id);
-
-JNIEXPORT jboolean JNICALL
-JVM_ImageRead(JNIEnv *env, jlong id, jlong offset,
-            unsigned char* uncompressedAddress, jlong uncompressed_size);
-
-
-JNIEXPORT jboolean JNICALL
-JVM_ImageReadCompressed(JNIEnv *env, jlong id, jlong offset,
-            unsigned char* compressedBuffer, jlong compressed_size,
-            unsigned char* uncompressedBuffer, jlong uncompressed_size);
-
-JNIEXPORT const char* JNICALL
-JVM_ImageGetStringBytes(JNIEnv *env, jlong id, jint offset);
-
-JNIEXPORT jlong* JNICALL
-JVM_ImageGetAttributes(JNIEnv *env, jlong* rawAttributes, jlong id, jint offset);
-
-JNIEXPORT jsize JNICALL
-JVM_ImageGetAttributesCount(JNIEnv *env);
-
-JNIEXPORT jlong* JNICALL
-JVM_ImageFindAttributes(JNIEnv *env, jlong* rawAttributes, jbyte* rawBytes, jsize size, jlong id);
-
-JNIEXPORT jint* JNICALL
-JVM_ImageAttributeOffsets(JNIEnv *env, jint* rawOffsets, unsigned int length, jlong id);
-
-JNIEXPORT unsigned int JNICALL
-JVM_ImageAttributeOffsetsLength(JNIEnv *env, jlong id);
-
 /*************************************************************************
  PART 2: Support for the Verifier and Class File Format Checker
  ************************************************************************/
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -3482,7 +3482,7 @@
 
   for (SystemProperty* p = Arguments::system_properties(); p != NULL; p = p->next()) {
     if (strcmp(property, p->key()) == 0) {
-      if (p->set_value((char *)value_ptr)) {
+      if (p->set_value(value_ptr)) {
         err =  JVMTI_ERROR_NONE;
       }
     }
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -2181,8 +2181,8 @@
   JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(f);
 }
 
-void JvmtiExport::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
-  JvmtiTagMap::weak_oops_do(is_alive, f);
+size_t JvmtiExport::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  return JvmtiTagMap::weak_oops_do(is_alive, f);
 }
 
 void JvmtiExport::gc_epilogue() {
--- a/hotspot/src/share/vm/prims/jvmtiExport.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvmtiExport.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -366,7 +366,7 @@
   static void clear_detected_exception   (JavaThread* thread) NOT_JVMTI_RETURN;
 
   static void oops_do(OopClosure* f) NOT_JVMTI_RETURN;
-  static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) NOT_JVMTI_RETURN;
+  static size_t weak_oops_do(BoolObjectClosure* b, OopClosure* f) NOT_JVMTI_RETURN_(0);
   static void gc_epilogue() NOT_JVMTI_RETURN;
 
   static void transition_pending_onload_raw_monitors() NOT_JVMTI_RETURN;
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -3284,32 +3284,35 @@
 }
 
 
-void JvmtiTagMap::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+size_t JvmtiTagMap::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
   // No locks during VM bring-up (0 threads) and no safepoints after main
   // thread creation and before VMThread creation (1 thread); initial GC
   // verification can happen in that window which gets to here.
   assert(Threads::number_of_threads() <= 1 ||
          SafepointSynchronize::is_at_safepoint(),
          "must be executed at a safepoint");
+  size_t count = 0;
   if (JvmtiEnv::environments_might_exist()) {
     JvmtiEnvIterator it;
     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
       JvmtiTagMap* tag_map = env->tag_map();
       if (tag_map != NULL && !tag_map->is_empty()) {
-        tag_map->do_weak_oops(is_alive, f);
+        count += tag_map->do_weak_oops(is_alive, f);
       }
     }
   }
+  return count;
 }
 
-void JvmtiTagMap::do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f) {
+size_t JvmtiTagMap::do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f) {
 
   // does this environment have the OBJECT_FREE event enabled
   bool post_object_free = env()->is_enabled(JVMTI_EVENT_OBJECT_FREE);
 
   // counters used for trace message
-  int freed = 0;
-  int moved = 0;
+  size_t freed = 0;
+  size_t moved = 0;
+  size_t stayed = 0;
 
   JvmtiTagHashmap* hashmap = this->hashmap();
 
@@ -3318,7 +3321,7 @@
 
   // if the hashmap is empty then we can skip it
   if (hashmap->_entry_count == 0) {
-    return;
+    return 0;
   }
 
   // now iterate through each entry in the table
@@ -3380,6 +3383,7 @@
         } else {
           // object didn't move
           prev = entry;
+          stayed++;
         }
       }
 
@@ -3398,10 +3402,12 @@
 
   // stats
   if (TraceJVMTIObjectTagging) {
-    int post_total = hashmap->_entry_count;
-    int pre_total = post_total + freed;
-
-    tty->print_cr("(%d->%d, %d freed, %d total moves)",
-        pre_total, post_total, freed, moved);
+    size_t post_total = hashmap->_entry_count;
+    size_t pre_total = post_total + freed;
+
+    tty->print_cr("(" SIZE_FORMAT "->" SIZE_FORMAT ", " SIZE_FORMAT " freed, " SIZE_FORMAT " stayed, " SIZE_FORMAT " moved)",
+        pre_total, post_total, freed, stayed, moved);
   }
+
+  return (freed + stayed + moved);
 }
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -60,7 +60,7 @@
   inline Mutex* lock()                      { return &_lock; }
   inline JvmtiEnv* env() const              { return _env; }
 
-  void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
+  size_t do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
 
   // iterate over all entries in this tag map
   void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
@@ -122,8 +122,8 @@
                                    jint* count_ptr, jobject** object_result_ptr,
                                    jlong** tag_result_ptr);
 
-  static void weak_oops_do(
-      BoolObjectClosure* is_alive, OopClosure* f) NOT_JVMTI_RETURN;
+  static size_t weak_oops_do(BoolObjectClosure* is_alive,
+                             OopClosure* f) NOT_JVMTI_RETURN_(0);
 };
 
 #endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -27,7 +27,6 @@
 #include <new>
 
 #include "classfile/classLoaderData.hpp"
-#include "classfile/imageFile.hpp"
 #include "classfile/stringTable.hpp"
 #include "code/codeCache.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
@@ -56,6 +55,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
+#include "gc/parallel/adjoiningGenerations.hpp"
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_NMT
 #include "services/mallocSiteTable.hpp"
@@ -297,6 +297,11 @@
   return p->size() * HeapWordSize;
 WB_END
 
+WB_ENTRY(jlong, WB_GetHeapSpaceAlignment(JNIEnv* env, jobject o))
+  size_t alignment = Universe::heap()->collector_policy()->space_alignment();
+  return (jlong)alignment;
+WB_END
+
 #if INCLUDE_ALL_GCS
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
@@ -335,6 +340,17 @@
   return (jint)HeapRegion::GrainBytes;
 WB_END
 
+WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
+  ParallelScavengeHeap* ps = ParallelScavengeHeap::heap();
+  size_t alignment = ps->gens()->virtual_spaces()->alignment();
+  return (jlong)alignment;
+WB_END
+
+WB_ENTRY(jlong, WB_PSHeapGenerationAlignment(JNIEnv* env, jobject o))
+  size_t alignment = ParallelScavengeHeap::heap()->generation_alignment();
+  return (jlong)alignment;
+WB_END
+
 WB_ENTRY(jobject, WB_G1AuxiliaryMemoryUsage(JNIEnv* env))
   ResourceMark rm(THREAD);
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -1144,131 +1160,7 @@
   return (jlong) MetaspaceGC::capacity_until_GC();
 WB_END
 
-WB_ENTRY(jboolean, WB_ReadImageFile(JNIEnv* env, jobject wb, jstring imagefile))
-  const char* filename = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(imagefile));
-  return ImageFileReader::open(filename) != NULL;
-WB_END
 
-WB_ENTRY(jlong, WB_imageOpenImage(JNIEnv *env, jobject wb, jstring path, jboolean big_endian))
-  ThreadToNativeFromVM ttn(thread);
-  const char *nativePath = env->GetStringUTFChars(path, NULL);
-  jlong ret = JVM_ImageOpen(env, nativePath, big_endian);
-
-  env->ReleaseStringUTFChars(path, nativePath);
-  return ret;
-WB_END
-
-WB_ENTRY(void, WB_imageCloseImage(JNIEnv *env, jobject wb, jlong id))
-  ThreadToNativeFromVM ttn(thread);
-  JVM_ImageClose(env, id);
-WB_END
-
-WB_ENTRY(jlong, WB_imageGetIndexAddress(JNIEnv *env, jobject wb, jlong id))
-  ThreadToNativeFromVM ttn(thread);
-  return JVM_ImageGetIndexAddress(env, id);
-WB_END
-
-WB_ENTRY(jlong, WB_imageGetDataAddress(JNIEnv *env, jobject wb, jlong id))
-  ThreadToNativeFromVM ttn(thread);
-  return JVM_ImageGetDataAddress(env, id);
-WB_END
-
-WB_ENTRY(jboolean, WB_imageRead(JNIEnv *env, jobject wb, jlong id, jlong offset, jobject uncompressedBuffer, jlong uncompressed_size))
-  ThreadToNativeFromVM ttn(thread);
-  if (uncompressedBuffer == NULL) {
-    return JNI_FALSE;
-  }
-  unsigned char* uncompressedAddress =
-          (unsigned char*) env->GetDirectBufferAddress(uncompressedBuffer);
-  return JVM_ImageRead(env, id, offset, uncompressedAddress, uncompressed_size);
-WB_END
-
-WB_ENTRY(jboolean, WB_imageReadCompressed(JNIEnv *env, jobject wb, jlong id, jlong offset, jobject compressedBuffer, jlong compressed_size, jobject uncompressedBuffer, jlong uncompressed_size))
-  ThreadToNativeFromVM ttn(thread);
-  if (uncompressedBuffer == NULL || compressedBuffer == NULL) {
-    return false;
-  }
-  // Get address of read direct buffer.
-  unsigned char* compressedAddress =
-        (unsigned char*) env->GetDirectBufferAddress(compressedBuffer);
-  // Get address of decompression direct buffer.
-  unsigned char* uncompressedAddress =
-        (unsigned char*) env->GetDirectBufferAddress(uncompressedBuffer);
-  return JVM_ImageReadCompressed(env, id, offset, compressedAddress, compressed_size, uncompressedAddress, uncompressed_size);
-WB_END
-
-WB_ENTRY(jbyteArray, WB_imageGetStringBytes(JNIEnv *env, jobject wb, jlong id, jlong offset))
-  ThreadToNativeFromVM ttn(thread);
-  const char* data = JVM_ImageGetStringBytes(env, id, offset);
-  // Determine String length.
-  size_t size = strlen(data);
-  // Allocate byte array.
-  jbyteArray byteArray = env->NewByteArray((jsize) size);
-  // Get array base address.
-  jbyte* rawBytes = env->GetByteArrayElements(byteArray, NULL);
-  // Copy bytes from image string table.
-  memcpy(rawBytes, data, size);
-  // Release byte array base address.
-  env->ReleaseByteArrayElements(byteArray, rawBytes, 0);
-  return byteArray;
-WB_END
-
-WB_ENTRY(jlong, WB_imageGetStringsSize(JNIEnv *env, jobject wb, jlong id))
-  ImageFileReader* reader = ImageFileReader::idToReader(id);
-  return reader? reader->strings_size() : 0L;
-WB_END
-
-WB_ENTRY(jlongArray, WB_imageGetAttributes(JNIEnv *env, jobject wb, jlong id, jint offset))
-  ThreadToNativeFromVM ttn(thread);
-  // Allocate a jlong large enough for all location attributes.
-  jlongArray attributes = env->NewLongArray(JVM_ImageGetAttributesCount(env));
-  // Get base address for jlong array.
-  jlong* rawAttributes = env->GetLongArrayElements(attributes, NULL);
-  jlong* ret = JVM_ImageGetAttributes(env, rawAttributes, id, offset);
-  // Release jlong array base address.
-  env->ReleaseLongArrayElements(attributes, rawAttributes, 0);
-    return ret == NULL ? NULL : attributes;
-WB_END
-
-WB_ENTRY(jlongArray, WB_imageFindAttributes(JNIEnv *env, jobject wb, jlong id, jbyteArray utf8))
-  ThreadToNativeFromVM ttn(thread);
-  // Allocate a jlong large enough for all location attributes.
-  jlongArray attributes = env->NewLongArray(JVM_ImageGetAttributesCount(env));
-  // Get base address for jlong array.
-  jlong* rawAttributes = env->GetLongArrayElements(attributes, NULL);
-  jsize size = env->GetArrayLength(utf8);
-  jbyte* rawBytes = env->GetByteArrayElements(utf8, NULL);
-  jlong* ret = JVM_ImageFindAttributes(env, rawAttributes, rawBytes, size, id);
-  env->ReleaseByteArrayElements(utf8, rawBytes, 0);
-  env->ReleaseLongArrayElements(attributes, rawAttributes, 0);
-  return ret == NULL ? NULL : attributes;
-WB_END
-
-WB_ENTRY(jintArray, WB_imageAttributeOffsets(JNIEnv *env, jobject wb, jlong id))
-  ThreadToNativeFromVM ttn(thread);
-  unsigned int length = JVM_ImageAttributeOffsetsLength(env, id);
-  if (length == 0) {
-    return NULL;
-  }
-  jintArray offsets = env->NewIntArray(length);
-  // Get base address of result.
-  jint* rawOffsets = env->GetIntArrayElements(offsets, NULL);
-  jint* ret = JVM_ImageAttributeOffsets(env, rawOffsets, length, id);
-  // Release result base address.
-  env->ReleaseIntArrayElements(offsets, rawOffsets, 0);
-  return ret == NULL ? NULL : offsets;
-WB_END
-
-WB_ENTRY(jint, WB_imageGetIntAtAddress(JNIEnv *env, jobject wb, jlong address, jint offset, jboolean big_endian))
-  unsigned char* arr = (unsigned char*) address + offset;
-  jint uraw;
-  if (big_endian) {
-     uraw = arr[0] << 24 | arr[1]<<16 | (arr[2]<<8) | arr[3];
-  } else {
-      uraw = arr[0] | arr[1]<<8 | (arr[2]<<16) | arr[3]<<24;
-  }
-  return uraw;
-WB_END
 
 WB_ENTRY(void, WB_AssertMatchingSafepointCalls(JNIEnv* env, jobject o, jboolean mutexSafepointValue, jboolean attemptedNoSafepointValue))
   Monitor::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
@@ -1457,6 +1349,7 @@
   {CC"getVMPageSize",                    CC"()I",                   (void*)&WB_GetVMPageSize     },
   {CC"getVMAllocationGranularity",       CC"()J",                   (void*)&WB_GetVMAllocationGranularity },
   {CC"getVMLargePageSize",               CC"()J",                   (void*)&WB_GetVMLargePageSize},
+  {CC"getHeapSpaceAlignment",            CC"()J",                   (void*)&WB_GetHeapSpaceAlignment},
   {CC"isClassAlive0",                    CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive      },
   {CC"parseCommandLine0",
       CC"(Ljava/lang/String;C[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
@@ -1481,6 +1374,8 @@
   {CC"g1StartConcMarkCycle",       CC"()Z",           (void*)&WB_G1StartMarkCycle  },
   {CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
                                                       (void*)&WB_G1AuxiliaryMemoryUsage  },
+  {CC"psVirtualSpaceAlignment",CC"()J",               (void*)&WB_PSVirtualSpaceAlignment},
+  {CC"psHeapGenerationAlignment",CC"()J",             (void*)&WB_PSHeapGenerationAlignment},
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_NMT
   {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
@@ -1576,21 +1471,6 @@
   {CC"getCodeBlob",        CC"(J)[Ljava/lang/Object;",(void*)&WB_GetCodeBlob        },
   {CC"getThreadStackSize", CC"()J",                   (void*)&WB_GetThreadStackSize },
   {CC"getThreadRemainingStackSize", CC"()J",          (void*)&WB_GetThreadRemainingStackSize },
-  {CC"readImageFile",      CC"(Ljava/lang/String;)Z", (void*)&WB_ReadImageFile },
-  {CC"imageOpenImage",     CC"(Ljava/lang/String;Z)J",(void*)&WB_imageOpenImage },
-  {CC"imageCloseImage",    CC"(J)V",                  (void*)&WB_imageCloseImage },
-  {CC"imageGetIndexAddress",CC"(J)J",                 (void*)&WB_imageGetIndexAddress},
-  {CC"imageGetDataAddress",CC"(J)J",                  (void*)&WB_imageGetDataAddress},
-  {CC"imageRead",          CC"(JJLjava/nio/ByteBuffer;J)Z",
-                                                      (void*)&WB_imageRead    },
-  {CC"imageReadCompressed",CC"(JJLjava/nio/ByteBuffer;JLjava/nio/ByteBuffer;J)Z",
-                                                      (void*)&WB_imageReadCompressed},
-  {CC"imageGetStringBytes",CC"(JI)[B",                (void*)&WB_imageGetStringBytes},
-  {CC"imageGetStringsSize",CC"(J)J",                  (void*)&WB_imageGetStringsSize},
-  {CC"imageGetAttributes", CC"(JI)[J",                (void*)&WB_imageGetAttributes},
-  {CC"imageFindAttributes",CC"(J[B)[J",               (void*)&WB_imageFindAttributes},
-  {CC"imageAttributeOffsets",CC"(J)[I",               (void*)&WB_imageAttributeOffsets},
-  {CC"imageGetIntAtAddress",CC"(JIZ)I",                (void*)&WB_imageGetIntAtAddress},
   {CC"assertMatchingSafepointCalls", CC"(ZZ)V",       (void*)&WB_AssertMatchingSafepointCalls },
   {CC"isMonitorInflated0", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated  },
   {CC"forceSafepoint",     CC"()V",                   (void*)&WB_ForceSafepoint     },
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -983,53 +983,61 @@
 
 bool Arguments::add_property(const char* prop) {
   const char* eq = strchr(prop, '=');
-  char* key;
-  // ns must be static--its address may be stored in a SystemProperty object.
-  const static char ns[1] = {0};
-  char* value = (char *)ns;
-
-  size_t key_len = (eq == NULL) ? strlen(prop) : (eq - prop);
-  key = AllocateHeap(key_len + 1, mtInternal);
-  strncpy(key, prop, key_len);
-  key[key_len] = '\0';
-
-  if (eq != NULL) {
-    size_t value_len = strlen(prop) - key_len - 1;
-    value = AllocateHeap(value_len + 1, mtInternal);
-    strncpy(value, &prop[key_len + 1], value_len + 1);
+  const char* key;
+  const char* value = "";
+
+  if (eq == NULL) {
+    // property doesn't have a value, thus use passed string
+    key = prop;
+  } else {
+    // property have a value, thus extract it and save to the
+    // allocated string
+    size_t key_len = eq - prop;
+    char* tmp_key = AllocateHeap(key_len + 1, mtInternal);
+
+    strncpy(tmp_key, prop, key_len);
+    tmp_key[key_len] = '\0';
+    key = tmp_key;
+
+    value = &prop[key_len + 1];
   }
 
   if (strcmp(key, "java.compiler") == 0) {
     process_java_compiler_argument(value);
-    FreeHeap(key);
-    if (eq != NULL) {
-      FreeHeap(value);
-    }
-    return true;
-  } else if (strcmp(key, "sun.java.command") == 0) {
-    _java_command = value;
-
     // Record value in Arguments, but let it get passed to Java.
   } else if (strcmp(key, "sun.java.launcher.is_altjvm") == 0 ||
              strcmp(key, "sun.java.launcher.pid") == 0) {
     // sun.java.launcher.is_altjvm and sun.java.launcher.pid property are
     // private and are processed in process_sun_java_launcher_properties();
     // the sun.java.launcher property is passed on to the java application
-    FreeHeap(key);
-    if (eq != NULL) {
-      FreeHeap(value);
-    }
-    return true;
-  } else if (strcmp(key, "java.vendor.url.bug") == 0) {
-    // save it in _java_vendor_url_bug, so JVM fatal error handler can access
-    // its value without going through the property list or making a Java call.
-    _java_vendor_url_bug = value;
   } else if (strcmp(key, "sun.boot.library.path") == 0) {
     PropertyList_unique_add(&_system_properties, key, value, true);
-    return true;
-  }
-  // Create new property and add at the end of the list
-  PropertyList_unique_add(&_system_properties, key, value);
+  } else {
+    if (strcmp(key, "sun.java.command") == 0) {
+      if (_java_command != NULL) {
+        os::free(_java_command);
+      }
+      _java_command = os::strdup_check_oom(value, mtInternal);
+    } else if (strcmp(key, "java.vendor.url.bug") == 0) {
+      if (_java_vendor_url_bug != DEFAULT_VENDOR_URL_BUG) {
+        assert(_java_vendor_url_bug != NULL, "_java_vendor_url_bug is NULL");
+        os::free((void *)_java_vendor_url_bug);
+      }
+      // save it in _java_vendor_url_bug, so JVM fatal error handler can access
+      // its value without going through the property list or making a Java call.
+      _java_vendor_url_bug = os::strdup_check_oom(value, mtInternal);
+    }
+
+    // Create new property and add at the end of the list
+    PropertyList_unique_add(&_system_properties, key, value);
+  }
+
+  if (key != prop) {
+    // SystemProperty copy passed value, thus free previously allocated
+    // memory
+    FreeHeap((void *)key);
+  }
+
   return true;
 }
 
@@ -1046,7 +1054,7 @@
   // Ensure Agent_OnLoad has the correct initial values.
   // This may not be the final mode; mode may change later in onload phase.
   PropertyList_unique_add(&_system_properties, "java.vm.info",
-                          (char*)VM_Version::vm_info_string(), false);
+                          VM_Version::vm_info_string(), false);
 
   UseInterpreter             = true;
   UseCompiler                = true;
@@ -1583,9 +1591,6 @@
 #endif // _LP64
 #endif // !ZERO
 
-  // Set up runtime image flags.
-  set_runtime_image_flags();
-
   CodeCacheExtensions::set_ergonomics_flags();
 }
 
@@ -1840,16 +1845,6 @@
   }
 }
 
-  // Set up runtime image flags
-void Arguments::set_runtime_image_flags() {
-#ifdef _LP64
-  // Memory map image file by default on 64 bit machines.
-  if (FLAG_IS_DEFAULT(MemoryMapImage)) {
-    FLAG_SET_ERGO(bool, MemoryMapImage, true);
-  }
-#endif
-}
-
 // This must be called after ergonomics.
 void Arguments::set_bytecode_flags() {
   if (!RewriteBytecodes) {
@@ -1858,7 +1853,7 @@
 }
 
 // Aggressive optimization flags  -XX:+AggressiveOpts
-void Arguments::set_aggressive_opts_flags() {
+jint Arguments::set_aggressive_opts_flags() {
 #ifdef COMPILER2
   if (AggressiveUnboxing) {
     if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
@@ -1885,7 +1880,9 @@
     // Feed the cache size setting into the JDK
     char buffer[1024];
     sprintf(buffer, "java.lang.Integer.IntegerCache.high=" INTX_FORMAT, AutoBoxCacheMax);
-    add_property(buffer);
+    if (!add_property(buffer)) {
+      return JNI_ENOMEM;
+    }
   }
   if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) {
     FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500);
@@ -1898,12 +1895,14 @@
 //      FLAG_SET_DEFAULT(EliminateZeroing, true);
 //    }
   }
+
+  return JNI_OK;
 }
 
 //===========================================================================================================
 // Parsing of java.compiler property
 
-void Arguments::process_java_compiler_argument(char* arg) {
+void Arguments::process_java_compiler_argument(const char* arg) {
   // For backwards compatibility, Djava.compiler=NONE or ""
   // causes us to switch to -Xint mode UNLESS -Xdebug
   // is also specified.
@@ -3130,8 +3129,10 @@
       jio_fprintf(defaultStream::output_stream(),
           "CreateMinidumpOnCrash is replaced by CreateCoredumpOnCrash: CreateCoredumpOnCrash is off\n");
     } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
-      // Skip -XX:Flags= since that case has already been handled
-      if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
+      // Skip -XX:Flags= and -XX:VMOptionsFile= since those cases have
+      // already been handled
+      if ((strncmp(tail, "Flags=", strlen("Flags=")) != 0) &&
+          (strncmp(tail, "VMOptionsFile=", strlen("VMOptionsFile=")) != 0)) {
         if (!process_argument(tail, args->ignoreUnrecognized, origin)) {
           return JNI_EINVAL;
         }
@@ -3383,6 +3384,7 @@
 class ScopedVMInitArgs : public StackObj {
  private:
   JavaVMInitArgs _args;
+  bool           _is_set;
 
  public:
   ScopedVMInitArgs() {
@@ -3390,6 +3392,7 @@
     _args.nOptions = 0;
     _args.options = NULL;
     _args.ignoreUnrecognized = false;
+    _is_set = false;
   }
 
   // Populates the JavaVMInitArgs object represented by this
@@ -3398,6 +3401,7 @@
   // returns anything other than JNI_OK, then this object is in a
   // partially constructed state, and should be abandoned.
   jint set_args(GrowableArray<JavaVMOption>* options) {
+    _is_set = true;
     JavaVMOption* options_arr = NEW_C_HEAP_ARRAY_RETURN_NULL(
         JavaVMOption, options->length(), mtInternal);
     if (options_arr == NULL) {
@@ -3421,6 +3425,7 @@
   }
 
   JavaVMInitArgs* get() { return &_args; }
+  bool is_set()         { return _is_set; }
 
   ~ScopedVMInitArgs() {
     if (_args.options == NULL) return;
@@ -3429,6 +3434,35 @@
     }
     FREE_C_HEAP_ARRAY(JavaVMOption, _args.options);
   }
+
+  // Insert options into this option list, to replace option at
+  // vm_options_file_pos (-XX:VMOptionsFile)
+  jint insert(const JavaVMInitArgs* args,
+              const JavaVMInitArgs* args_to_insert,
+              const int vm_options_file_pos) {
+    assert(_args.options == NULL, "shouldn't be set yet");
+    assert(args_to_insert->nOptions != 0, "there should be args to insert");
+    assert(vm_options_file_pos != -1, "vm_options_file_pos should be set");
+
+    int length = args->nOptions + args_to_insert->nOptions - 1;
+    GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtInternal)
+              GrowableArray<JavaVMOption>(length, true);    // Construct new option array
+    for (int i = 0; i < args->nOptions; i++) {
+      if (i == vm_options_file_pos) {
+        // insert the new options starting at the same place as the
+        // -XX:VMOptionsFile option
+        for (int j = 0; j < args_to_insert->nOptions; j++) {
+          options->push(args_to_insert->options[j]);
+        }
+      } else {
+        options->push(args->options[i]);
+      }
+    }
+    // make into options array
+    jint result = set_args(options);
+    delete options;
+    return result;
+  }
 };
 
 jint Arguments::parse_java_options_environment_variable(ScopedVMInitArgs* args) {
@@ -3453,54 +3487,137 @@
     return JNI_ENOMEM;
   }
 
+  int retcode = parse_options_buffer(name, buffer, strlen(buffer), vm_args);
+
+  os::free(buffer);
+  return retcode;
+}
+
+const int OPTION_BUFFER_SIZE = 1024;
+
+jint Arguments::parse_vm_options_file(const char* file_name, ScopedVMInitArgs* vm_args) {
+  // read file into buffer
+  int fd = ::open(file_name, O_RDONLY);
+  if (fd < 0) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Could not open options file '%s'\n",
+                file_name);
+    return JNI_ERR;
+  }
+
+  // '+ 1' for NULL termination even with max bytes
+  int bytes_alloc = OPTION_BUFFER_SIZE + 1;
+
+  char *buf = NEW_C_HEAP_ARRAY_RETURN_NULL(char, bytes_alloc, mtInternal);
+  if (NULL == buf) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Could not allocate read buffer for options file parse\n");
+    os::close(fd);
+    return JNI_ENOMEM;
+  }
+
+  memset(buf, 0, (unsigned)bytes_alloc);
+
+  // Fill buffer
+  // Use ::read() instead of os::read because os::read()
+  // might do a thread state transition
+  // and it is too early for that here
+
+  int bytes_read = ::read(fd, (void *)buf, (unsigned)bytes_alloc);
+  os::close(fd);
+  if (bytes_read < 0) {
+    FREE_C_HEAP_ARRAY(char, buf);
+    jio_fprintf(defaultStream::error_stream(),
+                "Could not read options file '%s'\n", file_name);
+    return JNI_ERR;
+  }
+
+  if (bytes_read == 0) {
+    // tell caller there is no option data and that is ok
+    FREE_C_HEAP_ARRAY(char, buf);
+    return JNI_OK;
+  }
+
+  // file is larger than OPTION_BUFFER_SIZE
+  if (bytes_read > bytes_alloc - 1) {
+    FREE_C_HEAP_ARRAY(char, buf);
+    jio_fprintf(defaultStream::error_stream(),
+                "Options file '%s' is larger than %d bytes.\n",
+                file_name, bytes_alloc - 1);
+    return JNI_EINVAL;
+  }
+
+  int retcode = parse_options_buffer(file_name, buf, bytes_read, vm_args);
+
+  FREE_C_HEAP_ARRAY(char, buf);
+  return retcode;
+}
+
+jint Arguments::parse_options_buffer(const char* name, char* buffer, const size_t buf_len, ScopedVMInitArgs* vm_args) {
   GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaVMOption>(2, true);    // Construct option array
-  jio_fprintf(defaultStream::error_stream(),
-              "Picked up %s: %s\n", name, buffer);
-  char* rd = buffer;                        // pointer to the input string (rd)
-  while (true) {                            // repeat for all options in the input string
-    while (isspace(*rd)) rd++;              // skip whitespace
-    if (*rd == 0) break;                    // we re done when the input string is read completely
-
-    // The output, option string, overwrites the input string.
-    // Because of quoting, the pointer to the option string (wrt) may lag the pointer to
-    // input string (rd).
-    char* wrt = rd;
-
-    JavaVMOption option;
-    option.optionString = wrt;
-    options->append(option);                // Fill in option
-    while (*rd != 0 && !isspace(*rd)) {     // unquoted strings terminate with a space or NULL
+
+  // some pointers to help with parsing
+  char *buffer_end = buffer + buf_len;
+  char *opt_hd = buffer;
+  char *wrt = buffer;
+  char *rd = buffer;
+
+  // parse all options
+  while (rd < buffer_end) {
+    // skip leading white space from the input string
+    while (rd < buffer_end && isspace(*rd)) {
+      rd++;
+    }
+
+    if (rd >= buffer_end) {
+      break;
+    }
+
+    // Remember this is where we found the head of the token.
+    opt_hd = wrt;
+
+    // Tokens are strings of non white space characters separated
+    // by one or more white spaces.
+    while (rd < buffer_end && !isspace(*rd)) {
       if (*rd == '\'' || *rd == '"') {      // handle a quoted string
         int quote = *rd;                    // matching quote to look for
         rd++;                               // don't copy open quote
-        while (*rd != quote) {              // include everything (even spaces) up until quote
-          if (*rd == 0) {                   // string termination means unmatched string
-            jio_fprintf(defaultStream::error_stream(),
-                        "Unmatched quote in %s\n", name);
-            delete options;
-            os::free(buffer);
-            return JNI_ERR;
-          }
+        while (rd < buffer_end && *rd != quote) {
+                                            // include everything (even spaces)
+                                            // up until the close quote
           *wrt++ = *rd++;                   // copy to option string
         }
-        rd++;                               // don't copy close quote
+
+        if (rd < buffer_end) {
+          rd++;                             // don't copy close quote
+        } else {
+                                            // did not see closing quote
+          jio_fprintf(defaultStream::error_stream(),
+                      "Unmatched quote in %s\n", name);
+          delete options;
+          return JNI_ERR;
+        }
       } else {
         *wrt++ = *rd++;                     // copy to option string
       }
     }
-    if (*rd != 0) {
-      // In this case, the assignment to wrt below will make *rd nul,
-      // which will interfere with the next loop iteration.
-      rd++;
-    }
-    *wrt = 0;                               // Zero terminate option
+
+    // steal a white space character and set it to NULL
+    *wrt++ = '\0';
+    // We now have a complete token
+
+    JavaVMOption option;
+    option.optionString = opt_hd;
+
+    options->append(option);                // Fill in option
+
+    rd++;  // Advance to next character
   }
 
   // Fill out JavaVMInitArgs structure.
   jint status = vm_args->set_args(options);
 
   delete options;
-  os::free(buffer);
   return status;
 }
 
@@ -3582,12 +3699,44 @@
 
   return false;
 }
+
 #endif // PRODUCT
 
-static jint match_special_option_and_act(const JavaVMInitArgs* args,
-                                        char** flags_file) {
+jint Arguments::insert_vm_options_file(const JavaVMInitArgs* args,
+                                       char** flags_file,
+                                       char** vm_options_file,
+                                       const int vm_options_file_pos,
+                                       ScopedVMInitArgs *vm_options_file_args,
+                                       ScopedVMInitArgs* args_out) {
+  jint code = parse_vm_options_file(*vm_options_file, vm_options_file_args);
+  if (code != JNI_OK) {
+    return code;
+  }
+
+  // Now set global settings from the vm_option file, giving an error if
+  // it has VMOptionsFile in it
+  code = match_special_option_and_act(vm_options_file_args->get(), flags_file,
+                                      NULL, NULL, NULL);
+  if (code != JNI_OK) {
+    return code;
+  }
+
+  if (vm_options_file_args->get()->nOptions < 1) {
+    return 0;
+  }
+
+  return args_out->insert(args, vm_options_file_args->get(),
+                          vm_options_file_pos);
+}
+
+jint Arguments::match_special_option_and_act(const JavaVMInitArgs* args,
+                                             char ** flags_file,
+                                             char ** vm_options_file,
+                                             ScopedVMInitArgs* vm_options_file_args,
+                                             ScopedVMInitArgs* args_out) {
   // Remaining part of option string
   const char* tail;
+  int   vm_options_file_pos = -1;
 
   for (int index = 0; index < args->nOptions; index++) {
     const JavaVMOption* option = args->options + index;
@@ -3596,6 +3745,35 @@
     }
     if (match_option(option, "-XX:Flags=", &tail)) {
       *flags_file = (char *) tail;
+      if (*flags_file == NULL) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Cannot copy flags_file name.\n");
+        return JNI_ENOMEM;
+      }
+      continue;
+    }
+    if (match_option(option, "-XX:VMOptionsFile=", &tail)) {
+      if (vm_options_file != NULL) {
+        // The caller accepts -XX:VMOptionsFile
+        if (*vm_options_file != NULL) {
+          jio_fprintf(defaultStream::error_stream(),
+                      "Only one VM Options file is supported "
+                      "on the command line\n");
+          return JNI_EINVAL;
+        }
+
+        *vm_options_file = (char *) tail;
+        vm_options_file_pos = index;  // save position of -XX:VMOptionsFile
+        if (*vm_options_file == NULL) {
+          jio_fprintf(defaultStream::error_stream(),
+                      "Cannot copy vm_options_file name.\n");
+          return JNI_ENOMEM;
+        }
+      } else {
+        jio_fprintf(defaultStream::error_stream(),
+                    "VM options file is only supported on the command line\n");
+        return JNI_EINVAL;
+      }
       continue;
     }
     if (match_option(option, "-XX:+PrintVMOptions")) {
@@ -3649,6 +3827,12 @@
     }
 #endif
   }
+
+  // If there's a VMOptionsFile, parse that (also can set flags_file)
+  if ((vm_options_file != NULL) && (*vm_options_file != NULL)) {
+    return insert_vm_options_file(args, flags_file, vm_options_file,
+                                  vm_options_file_pos, vm_options_file_args, args_out);
+  }
   return JNI_OK;
 }
 
@@ -3673,10 +3857,15 @@
   // If flag "-XX:Flags=flags-file" is used it will be the first option to be processed.
   const char* hotspotrc = ".hotspotrc";
   char* flags_file = NULL;
+  char* vm_options_file = NULL;
   bool settings_file_specified = false;
   bool needs_hotspotrc_warning = false;
   ScopedVMInitArgs java_tool_options_args;
   ScopedVMInitArgs java_options_args;
+  ScopedVMInitArgs modified_cmd_line_args;
+  // Pass in vm_options_file_args to keep memory for flags_file from being
+  // deallocated if found in the vm options file.
+  ScopedVMInitArgs vm_options_file_args;
 
   jint code =
       parse_java_tool_options_environment_variable(&java_tool_options_args);
@@ -3689,18 +3878,27 @@
     return code;
   }
 
-  code =
-      match_special_option_and_act(java_tool_options_args.get(), &flags_file);
+  code = match_special_option_and_act(java_tool_options_args.get(),
+                                      &flags_file, NULL, NULL, NULL);
+  if (code != JNI_OK) {
+    return code;
+  }
+
+  code = match_special_option_and_act(args, &flags_file, &vm_options_file,
+                                      &vm_options_file_args,
+                                      &modified_cmd_line_args);
   if (code != JNI_OK) {
     return code;
   }
 
-  code = match_special_option_and_act(args, &flags_file);
-  if (code != JNI_OK) {
-    return code;
-  }
-
-  code = match_special_option_and_act(java_options_args.get(), &flags_file);
+
+  // The command line arguments have been modified to include VMOptionsFile arguments.
+  if (modified_cmd_line_args.is_set()) {
+    args = modified_cmd_line_args.get();
+  }
+
+  code = match_special_option_and_act(java_options_args.get(), &flags_file,
+                                      NULL, NULL, NULL);
   if (code != JNI_OK) {
     return code;
   }
@@ -3741,7 +3939,8 @@
 
   // Parse JavaVMInitArgs structure passed in, as well as JAVA_TOOL_OPTIONS and _JAVA_OPTIONS
   jint result = parse_vm_init_args(java_tool_options_args.get(),
-                                   java_options_args.get(), args);
+                                   java_options_args.get(),
+                                   args);   // command line arguments
 
   if (result != JNI_OK) {
     return result;
@@ -3870,7 +4069,10 @@
   set_bytecode_flags();
 
   // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled
-  set_aggressive_opts_flags();
+  jint code = set_aggressive_opts_flags();
+  if (code != JNI_OK) {
+    return code;
+  }
 
   // Turn off biased locking for locking debug mode flags,
   // which are subtly different from each other but neither works with
@@ -4036,7 +4238,7 @@
   }
 }
 
-void Arguments::PropertyList_add(SystemProperty** plist, const char* k, char* v) {
+void Arguments::PropertyList_add(SystemProperty** plist, const char* k, const char* v) {
   if (plist == NULL)
     return;
 
@@ -4049,7 +4251,7 @@
 }
 
 // This add maintains unique property key in the list.
-void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append) {
+void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v, jboolean append) {
   if (plist == NULL)
     return;
 
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -60,7 +60,7 @@
   char* value() const                       { return _value; }
   SystemProperty* next() const              { return _next; }
   void set_next(SystemProperty* next)       { _next = next; }
-  bool set_value(char *value) {
+  bool set_value(const char *value) {
     if (writeable()) {
       if (_value != NULL) {
         FreeHeap(_value);
@@ -346,8 +346,6 @@
   static julong limit_by_allocatable_memory(julong size);
   // Setup heap size
   static void set_heap_size();
-  // Set up runtime image flags
-  static void set_runtime_image_flags();
   // Based on automatic selection criteria, should the
   // low pause collector be used.
   static bool should_auto_select_low_pause_collector();
@@ -364,17 +362,31 @@
   static bool add_property(const char* prop);
 
   // Aggressive optimization flags.
-  static void set_aggressive_opts_flags();
+  static jint set_aggressive_opts_flags();
 
   // Argument parsing
   static void do_pd_flag_adjustments();
   static bool parse_argument(const char* arg, Flag::Flags origin);
   static bool process_argument(const char* arg, jboolean ignore_unrecognized, Flag::Flags origin);
   static void process_java_launcher_argument(const char*, void*);
-  static void process_java_compiler_argument(char* arg);
+  static void process_java_compiler_argument(const char* arg);
   static jint parse_options_environment_variable(const char* name, ScopedVMInitArgs* vm_args);
   static jint parse_java_tool_options_environment_variable(ScopedVMInitArgs* vm_args);
   static jint parse_java_options_environment_variable(ScopedVMInitArgs* vm_args);
+  static jint parse_vm_options_file(const char* file_name, ScopedVMInitArgs* vm_args);
+  static jint parse_options_buffer(const char* name, char* buffer, const size_t buf_len, ScopedVMInitArgs* vm_args);
+  static jint insert_vm_options_file(const JavaVMInitArgs* args,
+                                     char** flags_file,
+                                     char** vm_options_file,
+                                     const int vm_options_file_pos,
+                                     ScopedVMInitArgs* vm_options_file_args,
+                                     ScopedVMInitArgs* args_out);
+  static jint match_special_option_and_act(const JavaVMInitArgs* args,
+                                           char** flags_file,
+                                           char** vm_options_file,
+                                           ScopedVMInitArgs* vm_options_file_args,
+                                           ScopedVMInitArgs* args_out);
+
   static jint parse_vm_init_args(const JavaVMInitArgs *java_tool_options_args,
                                  const JavaVMInitArgs *java_options_args,
                                  const JavaVMInitArgs *cmd_line_args);
@@ -561,22 +573,22 @@
   // Property List manipulation
   static void PropertyList_add(SystemProperty *element);
   static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
-  static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
-  static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v) {
+  static void PropertyList_add(SystemProperty** plist, const char* k, const char* v);
+  static void PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v) {
     PropertyList_unique_add(plist, k, v, false);
   }
-  static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append);
+  static void PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v, jboolean append);
   static const char* PropertyList_get_value(SystemProperty* plist, const char* key);
   static int  PropertyList_count(SystemProperty* pl);
   static const char* PropertyList_get_key_at(SystemProperty* pl,int index);
   static char* PropertyList_get_value_at(SystemProperty* pl,int index);
 
   // Miscellaneous System property value getter and setters.
-  static void set_dll_dir(char *value) { _sun_boot_library_path->set_value(value); }
-  static void set_java_home(char *value) { _java_home->set_value(value); }
-  static void set_library_path(char *value) { _java_library_path->set_value(value); }
+  static void set_dll_dir(const char *value) { _sun_boot_library_path->set_value(value); }
+  static void set_java_home(const char *value) { _java_home->set_value(value); }
+  static void set_library_path(const char *value) { _java_library_path->set_value(value); }
   static void set_ext_dirs(char *value)     { _ext_dirs = os::strdup_check_oom(value); }
-  static void set_sysclasspath(char *value) { _sun_boot_class_path->set_value(value); }
+  static void set_sysclasspath(const char *value) { _sun_boot_class_path->set_value(value); }
   static void append_sysclasspath(const char *value) { _sun_boot_class_path->append_value(value); }
 
   static char* get_java_home() { return _java_home->value(); }
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -60,8 +60,8 @@
   if ((UseConcMarkSweepGC || UseG1GC) && (value > PLAB::max_size())) {
     CommandLineError::print(verbose,
                             "%s (" SIZE_FORMAT ") must be "
-                            "less than ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
-                            name, value, PLAB::min_size());
+                            "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
+                            name, value, PLAB::max_size());
     return Flag::VIOLATES_CONSTRAINT;
   }
 #endif // INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -638,10 +638,6 @@
   experimental(bool, AlwaysSafeConstructors, false,                         \
           "Force safe construction, as if all fields are final.")           \
                                                                             \
-  /* Temporary: See 6948537 */                                              \
-  experimental(bool, UseMemSetInBOT, true,                                  \
-          "(Unstable) uses memset in BOT updates in GC code")               \
-                                                                            \
   diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug,                  \
           "Enable normal processing of flags relating to field diagnostics")\
                                                                             \
@@ -820,7 +816,7 @@
                                                                             \
   product(bool, UseSHA1Intrinsics, false,                                   \
           "Use intrinsics for SHA-1 crypto hash function. "                 \
-          "Requires that UseSHA is enabled.")                                \
+          "Requires that UseSHA is enabled.")                               \
                                                                             \
   product(bool, UseSHA256Intrinsics, false,                                 \
           "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. "  \
@@ -1091,9 +1087,6 @@
   product(bool, AlwaysRestoreFPU, false,                                    \
           "Restore the FPU control word after every JNI call (expensive)")  \
                                                                             \
-  product(bool, MemoryMapImage, false,                                      \
-          "Memory map entire runtime image")                                \
-                                                                            \
   diagnostic(bool, PrintCompilation2, false,                                \
           "Print additional statistics per compilation")                    \
                                                                             \
@@ -1603,7 +1596,7 @@
           "(ParallelGC only)")                                              \
                                                                             \
   product(bool, ScavengeBeforeFullGC, true,                                 \
-          "Scavenge youngest generation before each full GC.")              \
+          "Scavenge young generation before each full GC.")                 \
                                                                             \
   develop(bool, ScavengeWithObjectsInToSpace, false,                        \
           "Allow scavenges to occur when to-space contains objects")        \
@@ -2101,11 +2094,11 @@
           "promotion failure")                                              \
                                                                             \
   notproduct(bool, PromotionFailureALot, false,                             \
-          "Use promotion failure handling on every youngest generation "    \
+          "Use promotion failure handling on every young generation "       \
           "collection")                                                     \
                                                                             \
   develop(uintx, PromotionFailureALotCount, 1000,                           \
-          "Number of promotion failures occurring at PLAB "     \
+          "Number of promotion failures occurring at PLAB "                 \
           "refill attempts (ParNew) or promotion attempts "                 \
           "(other young collectors)")                                       \
                                                                             \
--- a/hotspot/src/share/vm/runtime/init.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/init.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -83,7 +83,6 @@
 // during VM shutdown
 void perfMemory_exit();
 void ostream_exit();
-bool image_decompressor_init();
 
 void vm_init_globals() {
   check_ThreadShadow();
@@ -122,9 +121,6 @@
   templateTable_init();
   InterfaceSupport_init();
   SharedRuntime::generate_stubs();
-  if (!image_decompressor_init()) {
-    return JNI_ERR;
-  }
   universe2_init();  // dependent on codeCache_init and stubRoutines_init1
   referenceProcessor_init();
   jni_handles_init();
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -124,8 +124,8 @@
 }
 
 
-void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
-  _weak_global_handles->weak_oops_do(is_alive, f);
+size_t JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  return _weak_global_handles->weak_oops_do(is_alive, f);
 }
 
 
@@ -380,8 +380,9 @@
 }
 
 
-void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
-                                  OopClosure* f) {
+size_t JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
+                                    OopClosure* f) {
+  size_t count = 0;
   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
     assert(current->pop_frame_link() == NULL,
       "blocks holding weak global JNI handles should not have pop frame link set");
@@ -390,6 +391,7 @@
       oop value = *root;
       // traverse heap pointers only, not deleted handles or free list pointers
       if (value != NULL && Universe::heap()->is_in_reserved(value)) {
+        count++;
         if (is_alive->do_object_b(value)) {
           // The weakly referenced object is alive, update pointer
           f->do_oop(root);
@@ -412,7 +414,9 @@
    * JVMTI data structures may also contain weak oops.  The iteration of them
    * is placed here so that we don't need to add it to each of the collectors.
    */
-  JvmtiExport::weak_oops_do(is_alive, f);
+  count += JvmtiExport::weak_oops_do(is_alive, f);
+
+  return count;
 }
 
 
--- a/hotspot/src/share/vm/runtime/jniHandles.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/jniHandles.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -85,7 +85,7 @@
   // Traversal of regular global handles
   static void oops_do(OopClosure* f);
   // Traversal of weak global handles. Unreachable oops are cleared.
-  static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+  static size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 };
 
 
@@ -153,7 +153,7 @@
   // Traversal of regular handles
   void oops_do(OopClosure* f);
   // Traversal of weak handles. Unreachable oops are cleared.
-  void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+  size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 
   // Checked JNI support
   void set_planned_capacity(size_t planned_capacity) { _planned_capacity = planned_capacity; }
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -100,7 +100,6 @@
 Mutex*   ExceptionCache_lock          = NULL;
 Monitor* ObjAllocPost_lock            = NULL;
 Mutex*   OsrList_lock                 = NULL;
-Mutex*   ImageFileReaderTable_lock    = NULL;
 
 #ifndef PRODUCT
 Mutex*   FullGCALot_lock              = NULL;
@@ -228,7 +227,6 @@
   def(ProfilePrint_lock            , Mutex  , leaf,        false, Monitor::_safepoint_check_always);     // serial profile printing
   def(ExceptionCache_lock          , Mutex  , leaf,        false, Monitor::_safepoint_check_always);     // serial profile printing
   def(OsrList_lock                 , Mutex  , leaf,        true,  Monitor::_safepoint_check_never);
-  def(ImageFileReaderTable_lock    , Mutex  , nonleaf,     false, Monitor::_safepoint_check_always);     // synchronize image readers open/close
   def(Debug1_lock                  , Mutex  , leaf,        true,  Monitor::_safepoint_check_never);
 #ifndef PRODUCT
   def(FullGCALot_lock              , Mutex  , leaf,        false, Monitor::_safepoint_check_always);     // a lock to make FullGCALot MT safe
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -102,7 +102,6 @@
 extern Mutex*   ProfilePrint_lock;               // a lock used to serialize the printing of profiles
 extern Mutex*   ExceptionCache_lock;             // a lock used to synchronize exception cache updates
 extern Mutex*   OsrList_lock;                    // a lock used to serialize access to OSR queues
-extern Mutex*   ImageFileReaderTable_lock;       // a lock used to synchronize image readers open/close
 
 #ifndef PRODUCT
 extern Mutex*   FullGCALot_lock;                 // a lock to make FullGCALot MT safe
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -184,7 +184,8 @@
 //
 // * The monitor entry list operations avoid locks, but strictly speaking
 //   they're not lock-free.  Enter is lock-free, exit is not.
-//   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
+//   For a description of 'Methods and apparatus providing non-blocking access
+//   to a resource,' see U.S. Pat. No. 7844973.
 //
 // * The cxq can have multiple concurrent "pushers" but only one concurrent
 //   detaching thread.  This mechanism is immune from the ABA corruption.
@@ -405,9 +406,7 @@
     event.commit();
   }
 
-  if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
-    ObjectMonitor::_sync_ContendedLockAttempts->inc();
-  }
+  OM_PERFDATA_OP(ContendedLockAttempts, inc());
 }
 
 
@@ -574,9 +573,9 @@
     // That is by design - we trade "lossy" counters which are exposed to
     // races during updates for a lower probe effect.
     TEVENT(Inflated enter - Futile wakeup);
-    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-      ObjectMonitor::_sync_FutileWakeups->inc();
-    }
+    // This PerfData object can be used in parallel with a safepoint.
+    // See the work around in PerfDataManager::destroy().
+    OM_PERFDATA_OP(FutileWakeups, inc());
     ++nWakeups;
 
     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
@@ -748,9 +747,9 @@
     // *must* retry  _owner before parking.
     OrderAccess::fence();
 
-    if (ObjectMonitor::_sync_FutileWakeups != NULL) {
-      ObjectMonitor::_sync_FutileWakeups->inc();
-    }
+    // This PerfData object can be used in parallel with a safepoint.
+    // See the work around in PerfDataManager::destroy().
+    OM_PERFDATA_OP(FutileWakeups, inc());
   }
 
   // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
@@ -1302,9 +1301,7 @@
   Trigger->unpark();
 
   // Maintain stats and report events to JVMTI
-  if (ObjectMonitor::_sync_Parks != NULL) {
-    ObjectMonitor::_sync_Parks->inc();
-  }
+  OM_PERFDATA_OP(Parks, inc());
 }
 
 
@@ -1765,9 +1762,7 @@
   }
   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
   INotify(THREAD);
-  if (ObjectMonitor::_sync_Notifications != NULL) {
-    ObjectMonitor::_sync_Notifications->inc(1);
-  }
+  OM_PERFDATA_OP(Notifications, inc(1));
 }
 
 
@@ -1792,9 +1787,7 @@
     INotify(THREAD);
   }
 
-  if (tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
-    ObjectMonitor::_sync_Notifications->inc(tally);
-  }
+  OM_PERFDATA_OP(Notifications, inc(tally));
 }
 
 // -----------------------------------------------------------------------------
@@ -1816,7 +1809,8 @@
 // (duration) or we can fix the count at approximately the duration of
 // a context switch and vary the frequency.   Of course we could also
 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
-// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
+// For a description of 'Adaptive spin-then-block mutual exclusion in
+// multi-threaded processing,' see U.S. Pat. No. 8046758.
 //
 // This implementation varies the duration "D", where D varies with
 // the success rate of recent spin attempts. (D is capped at approximately
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -177,6 +177,19 @@
 
  public:
   static void Initialize();
+
+  // Only perform a PerfData operation if the PerfData object has been
+  // allocated and if the PerfDataManager has not freed the PerfData
+  // objects which can happen at normal VM shutdown.
+  //
+  #define OM_PERFDATA_OP(f, op_str)              \
+    do {                                         \
+      if (ObjectMonitor::_sync_ ## f != NULL &&  \
+          PerfDataManager::has_PerfData()) {     \
+        ObjectMonitor::_sync_ ## f->op_str;      \
+      }                                          \
+    } while (0)
+
   static PerfCounter * _sync_ContendedLockAttempts;
   static PerfCounter * _sync_FutileWakeups;
   static PerfCounter * _sync_Parks;
--- a/hotspot/src/share/vm/runtime/perfData.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/perfData.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
 PerfDataList*   PerfDataManager::_all = NULL;
 PerfDataList*   PerfDataManager::_sampled = NULL;
 PerfDataList*   PerfDataManager::_constants = NULL;
+volatile bool   PerfDataManager::_has_PerfData = 0;
 
 /*
  * The jvmstat global and subsystem jvmstat counter name spaces. The top
@@ -272,16 +273,22 @@
 }
 
 
-
-
-
-
 void PerfDataManager::destroy() {
 
   if (_all == NULL)
     // destroy already called, or initialization never happened
     return;
 
+  // Clear the flag before we free the PerfData counters. Thus begins
+  // the race between this thread and another thread that has just
+  // queried PerfDataManager::has_PerfData() and gotten back 'true'.
+  // The hope is that the other thread will finish its PerfData
+  // manipulation before we free the memory. The two alternatives are
+  // 1) leak the PerfData memory or 2) do some form of synchronized
+  // access or check before every PerfData operation.
+  _has_PerfData = false;
+  os::naked_short_sleep(1);  // 1ms sleep to let other thread(s) run
+
   for (int index = 0; index < _all->length(); index++) {
     PerfData* p = _all->at(index);
     delete p;
@@ -302,6 +309,7 @@
 
   if (_all == NULL) {
     _all = new PerfDataList(100);
+    _has_PerfData = true;
   }
 
   assert(!_all->contains(p->name()), "duplicate name added");
--- a/hotspot/src/share/vm/runtime/perfData.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/perfData.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -668,6 +668,7 @@
     static PerfDataList* _sampled;
     static PerfDataList* _constants;
     static const char* _name_spaces[];
+    static volatile bool _has_PerfData;
 
     // add a PerfData item to the list(s) of know PerfData objects
     static void add_item(PerfData* p, bool sampled);
@@ -869,6 +870,7 @@
     }
 
     static void destroy();
+    static bool has_PerfData() { return _has_PerfData; }
 };
 
 // Useful macros to create the performance counters
--- a/hotspot/src/share/vm/runtime/perfMemory.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/perfMemory.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include "runtime/os.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/perfMemory.hpp"
+#include "runtime/safepoint.hpp"
 #include "runtime/statSampler.hpp"
 #include "utilities/globalDefinitions.hpp"
 
@@ -64,16 +65,20 @@
   if (!UsePerfData) return;
   if (!PerfMemory::is_initialized()) return;
 
-  // if the StatSampler is active, then we don't want to remove
-  // resources it may be dependent on. Typically, the StatSampler
-  // is disengaged from the watcher thread when this method is called,
-  // but it is not disengaged if this method is invoked during a
-  // VM abort.
+  // Only destroy PerfData objects if we're at a safepoint and the
+  // StatSampler is not active. Otherwise, we risk removing PerfData
+  // objects that are currently being used by running JavaThreads
+  // or the StatSampler. This method is invoked while we are not at
+  // a safepoint during a VM abort so leaving the PerfData objects
+  // around may also help diagnose the failure. In rare cases,
+  // PerfData objects are used in parallel with a safepoint. See
+  // the work around in PerfDataManager::destroy().
   //
-  if (!StatSampler::is_active())
+  if (SafepointSynchronize::is_at_safepoint() && !StatSampler::is_active()) {
     PerfDataManager::destroy();
+  }
 
-  // remove the persistent external resources, if any. this method
+  // Remove the persistent external resources, if any. This method
   // does not unmap or invalidate any virtual memory allocated during
   // initialization.
   //
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -189,9 +189,7 @@
         mon->INotify(self);
         ++tally;
       } while (mon->first_waiter() != NULL && all);
-      if (ObjectMonitor::_sync_Notifications != NULL) {
-        ObjectMonitor::_sync_Notifications->inc(tally);
-      }
+      OM_PERFDATA_OP(Notifications, inc(tally));
     }
     return true;
   }
@@ -1362,7 +1360,7 @@
       }
 
       // We've successfully installed INFLATING (0) into the mark-word.
-      // This is the only case where 0 will appear in a mark-work.
+      // This is the only case where 0 will appear in a mark-word.
       // Only the singular thread that successfully swings the mark-word
       // to 0 can perform (or more precisely, complete) inflation.
       //
@@ -1413,7 +1411,7 @@
 
       // Hopefully the performance counters are allocated on distinct cache lines
       // to avoid false sharing on MP systems ...
-      if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
+      OM_PERFDATA_OP(Inflations, inc());
       TEVENT(Inflate: overwrite stacklock);
       if (TraceMonitorInflation) {
         if (object->is_instance()) {
@@ -1461,7 +1459,7 @@
 
     // Hopefully the performance counters are allocated on distinct
     // cache lines to avoid false sharing on MP systems ...
-    if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
+    OM_PERFDATA_OP(Inflations, inc());
     TEVENT(Inflate: overwrite neutral);
     if (TraceMonitorInflation) {
       if (object->is_instance()) {
@@ -1678,8 +1676,8 @@
   }
   Thread::muxRelease(&gListLock);
 
-  if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged);
-  if (ObjectMonitor::_sync_MonExtant  != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
+  OM_PERFDATA_OP(Deflations, inc(nScavenged));
+  OM_PERFDATA_OP(MonExtant, set_value(nInCirculation));
 
   // TODO: Add objectMonitor leak detection.
   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -283,17 +283,17 @@
   volatile_nonstatic_field(ArrayKlass,         _higher_dimension,                             Klass*)                                \
   volatile_nonstatic_field(ArrayKlass,         _lower_dimension,                              Klass*)                                \
   nonstatic_field(ArrayKlass,                  _vtable_len,                                   int)                                   \
-  nonstatic_field(CompiledICHolder,     _holder_method,                                Method*)                               \
-  nonstatic_field(CompiledICHolder,     _holder_klass,                                 Klass*)                                \
-  nonstatic_field(ConstantPool,         _tags,                                         Array<u1>*)                            \
-  nonstatic_field(ConstantPool,         _cache,                                        ConstantPoolCache*)                    \
-  nonstatic_field(ConstantPool,         _pool_holder,                                  InstanceKlass*)                        \
-  nonstatic_field(ConstantPool,         _operands,                                     Array<u2>*)                            \
-  nonstatic_field(ConstantPool,         _length,                                       int)                                   \
-  nonstatic_field(ConstantPool,         _resolved_references,                          jobject)                               \
-  nonstatic_field(ConstantPool,         _reference_map,                                Array<u2>*)                            \
-  nonstatic_field(ConstantPoolCache,    _length,                                       int)                                   \
-  nonstatic_field(ConstantPoolCache,    _constant_pool,                                ConstantPool*)                         \
+  nonstatic_field(CompiledICHolder,            _holder_method,                                Method*)                               \
+  nonstatic_field(CompiledICHolder,            _holder_klass,                                 Klass*)                                \
+  nonstatic_field(ConstantPool,                _tags,                                         Array<u1>*)                            \
+  nonstatic_field(ConstantPool,                _cache,                                        ConstantPoolCache*)                    \
+  nonstatic_field(ConstantPool,                _pool_holder,                                  InstanceKlass*)                        \
+  nonstatic_field(ConstantPool,                _operands,                                     Array<u2>*)                            \
+  nonstatic_field(ConstantPool,                _length,                                       int)                                   \
+  nonstatic_field(ConstantPool,                _resolved_references,                          jobject)                               \
+  nonstatic_field(ConstantPool,                _reference_map,                                Array<u2>*)                            \
+  nonstatic_field(ConstantPoolCache,           _length,                                       int)                                   \
+  nonstatic_field(ConstantPoolCache,           _constant_pool,                                ConstantPool*)                         \
   nonstatic_field(InstanceKlass,               _array_klasses,                                Klass*)                                \
   nonstatic_field(InstanceKlass,               _methods,                                      Array<Method*>*)                       \
   nonstatic_field(InstanceKlass,               _default_methods,                              Array<Method*>*)                       \
@@ -303,12 +303,12 @@
   nonstatic_field(InstanceKlass,               _java_fields_count,                            u2)                                    \
   nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                         \
   nonstatic_field(InstanceKlass,               _class_loader_data,                            ClassLoaderData*)                      \
-  nonstatic_field(InstanceKlass,               _source_file_name_index,                            u2)                               \
+  nonstatic_field(InstanceKlass,               _source_file_name_index,                       u2)                                    \
   nonstatic_field(InstanceKlass,               _source_debug_extension,                       char*)                                 \
-  nonstatic_field(InstanceKlass,               _inner_classes,                               Array<jushort>*)                       \
+  nonstatic_field(InstanceKlass,               _inner_classes,                                Array<jushort>*)                       \
   nonstatic_field(InstanceKlass,               _nonstatic_field_size,                         int)                                   \
   nonstatic_field(InstanceKlass,               _static_field_size,                            int)                                   \
-  nonstatic_field(InstanceKlass,               _static_oop_field_count,                       u2)                                   \
+  nonstatic_field(InstanceKlass,               _static_oop_field_count,                       u2)                                    \
   nonstatic_field(InstanceKlass,               _nonstatic_oop_map_size,                       int)                                   \
   nonstatic_field(InstanceKlass,               _is_marked_dependent,                          bool)                                  \
   nonstatic_field(InstanceKlass,               _minor_version,                                u2)                                    \
@@ -346,62 +346,62 @@
   nonstatic_field(Klass,                       _prototype_header,                             markOop)                               \
   nonstatic_field(Klass,                       _next_sibling,                                 Klass*)                                \
   nonstatic_field(vtableEntry,                 _method,                                       Method*)                               \
-  nonstatic_field(MethodData,           _size,                                         int)                                   \
-  nonstatic_field(MethodData,           _method,                                       Method*)                               \
-  nonstatic_field(MethodData,           _data_size,                                    int)                                   \
-  nonstatic_field(MethodData,           _data[0],                                      intptr_t)                              \
-  nonstatic_field(MethodData,           _parameters_type_data_di,                      int)                                   \
-  nonstatic_field(MethodData,           _nof_decompiles,                               uint)                                  \
-  nonstatic_field(MethodData,           _nof_overflow_recompiles,                      uint)                                  \
-  nonstatic_field(MethodData,           _nof_overflow_traps,                           uint)                                  \
-  nonstatic_field(MethodData,           _trap_hist._array[0],                          u1)                                    \
-  nonstatic_field(MethodData,           _eflags,                                       intx)                                  \
-  nonstatic_field(MethodData,           _arg_local,                                    intx)                                  \
-  nonstatic_field(MethodData,           _arg_stack,                                    intx)                                  \
-  nonstatic_field(MethodData,           _arg_returned,                                 intx)                                  \
-  nonstatic_field(MethodData,           _tenure_traps,                                 uint)                                  \
-  nonstatic_field(MethodData,           _invoke_mask,                                  int)                                   \
-  nonstatic_field(MethodData,           _backedge_mask,                                int)                                   \
-  nonstatic_field(DataLayout,           _header._struct._tag,                          u1)                                    \
-  nonstatic_field(DataLayout,           _header._struct._flags,                        u1)                                    \
-  nonstatic_field(DataLayout,           _header._struct._bci,                          u2)                                    \
-  nonstatic_field(DataLayout,           _cells[0],                                     intptr_t)                              \
-  nonstatic_field(MethodCounters,       _nmethod_age,                                  int)                                   \
-  nonstatic_field(MethodCounters,       _interpreter_invocation_limit,                 int)                                   \
-  nonstatic_field(MethodCounters,       _interpreter_backward_branch_limit,            int)                                   \
-  nonstatic_field(MethodCounters,       _interpreter_profile_limit,                    int)                                   \
-  nonstatic_field(MethodCounters,       _invoke_mask,                                  int)                                   \
-  nonstatic_field(MethodCounters,       _backedge_mask,                                int)                                   \
-  nonstatic_field(MethodCounters,       _interpreter_invocation_count,                 int)                                   \
-  nonstatic_field(MethodCounters,       _interpreter_throwout_count,                   u2)                                    \
-  nonstatic_field(MethodCounters,       _number_of_breakpoints,                        u2)                                    \
-  nonstatic_field(MethodCounters,       _invocation_counter,                           InvocationCounter)                     \
-  nonstatic_field(MethodCounters,       _backedge_counter,                             InvocationCounter)                     \
-  nonstatic_field(Method,               _constMethod,                                  ConstMethod*)                          \
-  nonstatic_field(Method,               _method_data,                                  MethodData*)                           \
-  nonstatic_field(Method,               _method_counters,                              MethodCounters*)                       \
-  nonstatic_field(Method,               _access_flags,                                 AccessFlags)                           \
-  nonstatic_field(Method,               _vtable_index,                                 int)                                   \
-  nonstatic_field(Method,               _method_size,                                  u2)                                    \
-  nonstatic_field(Method,               _intrinsic_id,                                 u1)                                    \
-  nonproduct_nonstatic_field(Method,    _compiled_invocation_count,                    int)                                   \
-  volatile_nonstatic_field(Method,      _code,                                         nmethod*)                              \
-  nonstatic_field(Method,               _i2i_entry,                                    address)                               \
-  nonstatic_field(Method,               _adapter,                                      AdapterHandlerEntry*)                  \
-  volatile_nonstatic_field(Method,      _from_compiled_entry,                          address)                               \
-  volatile_nonstatic_field(Method,      _from_interpreted_entry,                       address)                               \
-  volatile_nonstatic_field(ConstMethod, _fingerprint,                                  uint64_t)                              \
-  nonstatic_field(ConstMethod,          _constants,                                    ConstantPool*)                         \
-  nonstatic_field(ConstMethod,          _stackmap_data,                                Array<u1>*)                            \
-  nonstatic_field(ConstMethod,          _constMethod_size,                             int)                                   \
-  nonstatic_field(ConstMethod,          _flags,                                        u2)                                    \
-  nonstatic_field(ConstMethod,          _code_size,                                    u2)                                    \
-  nonstatic_field(ConstMethod,          _name_index,                                   u2)                                    \
-  nonstatic_field(ConstMethod,          _signature_index,                              u2)                                    \
-  nonstatic_field(ConstMethod,          _method_idnum,                                 u2)                                    \
-  nonstatic_field(ConstMethod,          _max_stack,                                    u2)                                    \
-  nonstatic_field(ConstMethod,          _max_locals,                                   u2)                                    \
-  nonstatic_field(ConstMethod,          _size_of_parameters,                           u2)                                    \
+  nonstatic_field(MethodData,                  _size,                                         int)                                   \
+  nonstatic_field(MethodData,                  _method,                                       Method*)                               \
+  nonstatic_field(MethodData,                  _data_size,                                    int)                                   \
+  nonstatic_field(MethodData,                  _data[0],                                      intptr_t)                              \
+  nonstatic_field(MethodData,                  _parameters_type_data_di,                      int)                                   \
+  nonstatic_field(MethodData,                  _nof_decompiles,                               uint)                                  \
+  nonstatic_field(MethodData,                  _nof_overflow_recompiles,                      uint)                                  \
+  nonstatic_field(MethodData,                  _nof_overflow_traps,                           uint)                                  \
+  nonstatic_field(MethodData,                  _trap_hist._array[0],                          u1)                                    \
+  nonstatic_field(MethodData,                  _eflags,                                       intx)                                  \
+  nonstatic_field(MethodData,                  _arg_local,                                    intx)                                  \
+  nonstatic_field(MethodData,                  _arg_stack,                                    intx)                                  \
+  nonstatic_field(MethodData,                  _arg_returned,                                 intx)                                  \
+  nonstatic_field(MethodData,                  _tenure_traps,                                 uint)                                  \
+  nonstatic_field(MethodData,                  _invoke_mask,                                  int)                                   \
+  nonstatic_field(MethodData,                  _backedge_mask,                                int)                                   \
+  nonstatic_field(DataLayout,                  _header._struct._tag,                          u1)                                    \
+  nonstatic_field(DataLayout,                  _header._struct._flags,                        u1)                                    \
+  nonstatic_field(DataLayout,                  _header._struct._bci,                          u2)                                    \
+  nonstatic_field(DataLayout,                  _cells[0],                                     intptr_t)                              \
+  nonstatic_field(MethodCounters,              _nmethod_age,                                  int)                                   \
+  nonstatic_field(MethodCounters,              _interpreter_invocation_limit,                 int)                                   \
+  nonstatic_field(MethodCounters,              _interpreter_backward_branch_limit,            int)                                   \
+  nonstatic_field(MethodCounters,              _interpreter_profile_limit,                    int)                                   \
+  nonstatic_field(MethodCounters,              _invoke_mask,                                  int)                                   \
+  nonstatic_field(MethodCounters,              _backedge_mask,                                int)                                   \
+  nonstatic_field(MethodCounters,              _interpreter_invocation_count,                 int)                                   \
+  nonstatic_field(MethodCounters,              _interpreter_throwout_count,                   u2)                                    \
+  nonstatic_field(MethodCounters,              _number_of_breakpoints,                        u2)                                    \
+  nonstatic_field(MethodCounters,              _invocation_counter,                           InvocationCounter)                     \
+  nonstatic_field(MethodCounters,              _backedge_counter,                             InvocationCounter)                     \
+  nonstatic_field(Method,                      _constMethod,                                  ConstMethod*)                          \
+  nonstatic_field(Method,                      _method_data,                                  MethodData*)                           \
+  nonstatic_field(Method,                      _method_counters,                              MethodCounters*)                       \
+  nonstatic_field(Method,                      _access_flags,                                 AccessFlags)                           \
+  nonstatic_field(Method,                      _vtable_index,                                 int)                                   \
+  nonstatic_field(Method,                      _method_size,                                  u2)                                    \
+  nonstatic_field(Method,                      _intrinsic_id,                                 u1)                                    \
+  nonproduct_nonstatic_field(Method,           _compiled_invocation_count,                    int)                                   \
+  volatile_nonstatic_field(Method,             _code,                                         nmethod*)                              \
+  nonstatic_field(Method,                      _i2i_entry,                                    address)                               \
+  nonstatic_field(Method,                      _adapter,                                      AdapterHandlerEntry*)                  \
+  volatile_nonstatic_field(Method,             _from_compiled_entry,                          address)                               \
+  volatile_nonstatic_field(Method,             _from_interpreted_entry,                       address)                               \
+  volatile_nonstatic_field(ConstMethod,        _fingerprint,                                  uint64_t)                              \
+  nonstatic_field(ConstMethod,                 _constants,                                    ConstantPool*)                         \
+  nonstatic_field(ConstMethod,                 _stackmap_data,                                Array<u1>*)                            \
+  nonstatic_field(ConstMethod,                 _constMethod_size,                             int)                                   \
+  nonstatic_field(ConstMethod,                 _flags,                                        u2)                                    \
+  nonstatic_field(ConstMethod,                 _code_size,                                    u2)                                    \
+  nonstatic_field(ConstMethod,                 _name_index,                                   u2)                                    \
+  nonstatic_field(ConstMethod,                 _signature_index,                              u2)                                    \
+  nonstatic_field(ConstMethod,                 _method_idnum,                                 u2)                                    \
+  nonstatic_field(ConstMethod,                 _max_stack,                                    u2)                                    \
+  nonstatic_field(ConstMethod,                 _max_locals,                                   u2)                                    \
+  nonstatic_field(ConstMethod,                 _size_of_parameters,                           u2)                                    \
   nonstatic_field(ObjArrayKlass,               _element_klass,                                Klass*)                                \
   nonstatic_field(ObjArrayKlass,               _bottom_klass,                                 Klass*)                                \
   volatile_nonstatic_field(Symbol,             _refcount,                                     short)                                 \
@@ -414,10 +414,10 @@
   /* Constant Pool Cache */                                                                                                          \
   /***********************/                                                                                                          \
                                                                                                                                      \
-  volatile_nonstatic_field(ConstantPoolCacheEntry,      _indices,                                      intx)                         \
-  nonstatic_field(ConstantPoolCacheEntry,               _f1,                                           volatile Metadata*)           \
-  volatile_nonstatic_field(ConstantPoolCacheEntry,      _f2,                                           intx)                         \
-  volatile_nonstatic_field(ConstantPoolCacheEntry,      _flags,                                        intx)                         \
+  volatile_nonstatic_field(ConstantPoolCacheEntry,      _indices,                             intx)                                  \
+  nonstatic_field(ConstantPoolCacheEntry,               _f1,                                  volatile Metadata*)                    \
+  volatile_nonstatic_field(ConstantPoolCacheEntry,      _f2,                                  intx)                                  \
+  volatile_nonstatic_field(ConstantPoolCacheEntry,      _flags,                               intx)                                  \
                                                                                                                                      \
   /********************************/                                                                                                 \
   /* MethodOop-related structures */                                                                                                 \
@@ -631,83 +631,83 @@
   /* SymbolTable */                                                                                                                  \
   /***************/                                                                                                                  \
                                                                                                                                      \
-     static_field(SymbolTable,                  _the_table,                                   SymbolTable*)                          \
-     static_field(SymbolTable,                  _shared_table,                                SymbolCompactHashTable)                \
+     static_field(SymbolTable,                 _the_table,                                    SymbolTable*)                          \
+     static_field(SymbolTable,                 _shared_table,                                 SymbolCompactHashTable)                \
                                                                                                                                      \
   /***************/                                                                                                                  \
   /* StringTable */                                                                                                                  \
   /***************/                                                                                                                  \
                                                                                                                                      \
-     static_field(StringTable,                  _the_table,                                   StringTable*)                          \
+     static_field(StringTable,                 _the_table,                                    StringTable*)                          \
                                                                                                                                      \
   /********************/                                                                                                             \
   /* CompactHashTable */                                                                                                             \
   /********************/                                                                                                             \
                                                                                                                                      \
-  nonstatic_field(SymbolCompactHashTable, _base_address, uintx)                                                                      \
-  nonstatic_field(SymbolCompactHashTable, _entry_count, juint)                                                                       \
-  nonstatic_field(SymbolCompactHashTable, _bucket_count, juint)                                                                      \
-  nonstatic_field(SymbolCompactHashTable, _table_end_offset, juint)                                                                  \
-  nonstatic_field(SymbolCompactHashTable, _buckets, juint*)                                                                          \
+  nonstatic_field(SymbolCompactHashTable,      _base_address,                                 uintx)                                 \
+  nonstatic_field(SymbolCompactHashTable,      _entry_count,                                  juint)                                 \
+  nonstatic_field(SymbolCompactHashTable,      _bucket_count,                                 juint)                                 \
+  nonstatic_field(SymbolCompactHashTable,      _table_end_offset,                             juint)                                 \
+  nonstatic_field(SymbolCompactHashTable,      _buckets,                                      juint*)                                \
                                                                                                                                      \
   /********************/                                                                                                             \
   /* SystemDictionary */                                                                                                             \
   /********************/                                                                                                             \
                                                                                                                                      \
-      static_field(SystemDictionary,            _dictionary,                                   Dictionary*)                          \
-      static_field(SystemDictionary,            _placeholders,                                 PlaceholderTable*)                    \
-      static_field(SystemDictionary,            _shared_dictionary,                            Dictionary*)                          \
-      static_field(SystemDictionary,            _system_loader_lock_obj,                       oop)                                  \
-      static_field(SystemDictionary,            _loader_constraints,                           LoaderConstraintTable*)               \
-      static_field(SystemDictionary,            WK_KLASS(Object_klass),                        Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(String_klass),                        Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Class_klass),                         Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Cloneable_klass),                     Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(ClassLoader_klass),                   Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Serializable_klass),                  Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(System_klass),                        Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Throwable_klass),                     Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(ThreadDeath_klass),                   Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Error_klass),                         Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Exception_klass),                     Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(RuntimeException_klass),              Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(ClassNotFoundException_klass),        Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(NoClassDefFoundError_klass),          Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(LinkageError_klass),                  Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(ClassCastException_klass),            Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(ArrayStoreException_klass),           Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(VirtualMachineError_klass),           Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(OutOfMemoryError_klass),              Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(StackOverflowError_klass),            Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(ProtectionDomain_klass),              Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(AccessControlContext_klass),          Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(SecureClassLoader_klass),             Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Reference_klass),                     Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(SoftReference_klass),                 Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(FinalReference_klass),                Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(PhantomReference_klass),              Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Cleaner_klass),                       Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Finalizer_klass),                     Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Thread_klass),                        Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(Properties_klass),                    Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(StringBuffer_klass),                  Klass*)                               \
-      static_field(SystemDictionary,            WK_KLASS(MethodHandle_klass),                  Klass*)                               \
-      static_field(SystemDictionary,            _box_klasses[0],                               Klass*)                               \
-      static_field(SystemDictionary,            _java_system_loader,                           oop)                                  \
+     static_field(SystemDictionary,            _dictionary,                                   Dictionary*)                           \
+     static_field(SystemDictionary,            _placeholders,                                 PlaceholderTable*)                     \
+     static_field(SystemDictionary,            _shared_dictionary,                            Dictionary*)                           \
+     static_field(SystemDictionary,            _system_loader_lock_obj,                       oop)                                   \
+     static_field(SystemDictionary,            _loader_constraints,                           LoaderConstraintTable*)                \
+     static_field(SystemDictionary,            WK_KLASS(Object_klass),                        Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(String_klass),                        Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Class_klass),                         Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Cloneable_klass),                     Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(ClassLoader_klass),                   Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Serializable_klass),                  Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(System_klass),                        Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Throwable_klass),                     Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(ThreadDeath_klass),                   Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Error_klass),                         Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Exception_klass),                     Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(RuntimeException_klass),              Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(ClassNotFoundException_klass),        Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(NoClassDefFoundError_klass),          Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(LinkageError_klass),                  Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(ClassCastException_klass),            Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(ArrayStoreException_klass),           Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(VirtualMachineError_klass),           Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(OutOfMemoryError_klass),              Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(StackOverflowError_klass),            Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(ProtectionDomain_klass),              Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(AccessControlContext_klass),          Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(SecureClassLoader_klass),             Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Reference_klass),                     Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(SoftReference_klass),                 Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(FinalReference_klass),                Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(PhantomReference_klass),              Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Cleaner_klass),                       Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Finalizer_klass),                     Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Thread_klass),                        Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(Properties_klass),                    Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(StringBuffer_klass),                  Klass*)                                \
+     static_field(SystemDictionary,            WK_KLASS(MethodHandle_klass),                  Klass*)                                \
+     static_field(SystemDictionary,            _box_klasses[0],                               Klass*)                                \
+     static_field(SystemDictionary,            _java_system_loader,                           oop)                                   \
                                                                                                                                      \
   /*************/                                                                                                                    \
   /* vmSymbols */                                                                                                                    \
   /*************/                                                                                                                    \
                                                                                                                                      \
-      static_field(vmSymbols,                   _symbols[0],                                  Symbol*)                               \
+     static_field(vmSymbols,                   _symbols[0],                                   Symbol*)                               \
                                                                                                                                      \
   /*******************/                                                                                                              \
   /* HashtableBucket */                                                                                                              \
   /*******************/                                                                                                              \
                                                                                                                                      \
-  nonstatic_field(HashtableBucket<mtInternal>,  _entry,                                        BasicHashtableEntry<mtInternal>*)     \
+  nonstatic_field(HashtableBucket<mtInternal>, _entry,                                        BasicHashtableEntry<mtInternal>*)      \
                                                                                                                                      \
   /******************/                                                                                                               \
   /* HashtableEntry */                                                                                                               \
@@ -721,12 +721,12 @@
   /* Hashtable */                                                                                                                    \
   /*************/                                                                                                                    \
                                                                                                                                      \
-  nonstatic_field(BasicHashtable<mtInternal>, _table_size,                                   int)                                   \
-  nonstatic_field(BasicHashtable<mtInternal>, _buckets,                                      HashtableBucket<mtInternal>*)          \
-  nonstatic_field(BasicHashtable<mtInternal>, _free_list,                                    BasicHashtableEntry<mtInternal>*)      \
-  nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry,                             char*)                                 \
-  nonstatic_field(BasicHashtable<mtInternal>, _end_block,                                    char*)                                 \
-  nonstatic_field(BasicHashtable<mtInternal>, _entry_size,                                   int)                                   \
+  nonstatic_field(BasicHashtable<mtInternal>,  _table_size,                                   int)                                   \
+  nonstatic_field(BasicHashtable<mtInternal>,  _buckets,                                      HashtableBucket<mtInternal>*)          \
+  nonstatic_field(BasicHashtable<mtInternal>,  _free_list,                                    BasicHashtableEntry<mtInternal>*)      \
+  nonstatic_field(BasicHashtable<mtInternal>,  _first_free_entry,                             char*)                                 \
+  nonstatic_field(BasicHashtable<mtInternal>,  _end_block,                                    char*)                                 \
+  nonstatic_field(BasicHashtable<mtInternal>,  _entry_size,                                   int)                                   \
                                                                                                                                      \
   /*******************/                                                                                                              \
   /* DictionaryEntry */                                                                                                              \
@@ -764,7 +764,7 @@
   nonstatic_field(ClassLoaderData,             _class_loader,                                 oop)                                   \
   nonstatic_field(ClassLoaderData,             _next,                                         ClassLoaderData*)                      \
                                                                                                                                      \
-  static_field(ClassLoaderDataGraph,           _head,                                         ClassLoaderData*)                      \
+     static_field(ClassLoaderDataGraph,        _head,                                         ClassLoaderData*)                      \
                                                                                                                                      \
   /**********/                                                                                                                       \
   /* Arrays */                                                                                                                       \
@@ -786,8 +786,8 @@
   /* CodeCache (NOTE: incomplete) */                                                                                                 \
   /********************************/                                                                                                 \
                                                                                                                                      \
-  static_field(CodeCache,                      _heaps,                                        GrowableArray<CodeHeap*>*)             \
-  static_field(CodeCache,                      _scavenge_root_nmethods,                       nmethod*)                              \
+     static_field(CodeCache,                   _heaps,                                        GrowableArray<CodeHeap*>*)             \
+     static_field(CodeCache,                   _scavenge_root_nmethods,                       nmethod*)                              \
                                                                                                                                      \
   /*******************************/                                                                                                  \
   /* CodeHeap (NOTE: incomplete) */                                                                                                  \
@@ -873,37 +873,37 @@
   /* NMethods (NOTE: incomplete, but only a little) */                                                                               \
   /**************************************************/                                                                               \
                                                                                                                                      \
-  nonstatic_field(nmethod,             _method,                                       Method*)                        \
-  nonstatic_field(nmethod,             _entry_bci,                                    int)                                   \
-  nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
-  nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
-  nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
-  nonstatic_field(nmethod,             _state,                                        volatile unsigned char)                \
-  nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
-  nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
-  nonstatic_field(nmethod,             _deoptimize_mh_offset,                         int)                                   \
-  nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
-  nonstatic_field(nmethod,             _stub_offset,                                  int)                                   \
-  nonstatic_field(nmethod,             _consts_offset,                                int)                                   \
-  nonstatic_field(nmethod,             _oops_offset,                                  int)                                   \
-  nonstatic_field(nmethod,             _metadata_offset,                              int)                                   \
-  nonstatic_field(nmethod,             _scopes_data_offset,                           int)                                   \
-  nonstatic_field(nmethod,             _scopes_pcs_offset,                            int)                                   \
-  nonstatic_field(nmethod,             _dependencies_offset,                          int)                                   \
-  nonstatic_field(nmethod,             _handler_table_offset,                         int)                                   \
-  nonstatic_field(nmethod,             _nul_chk_table_offset,                         int)                                   \
-  nonstatic_field(nmethod,             _nmethod_end_offset,                           int)                                   \
-  nonstatic_field(nmethod,             _entry_point,                                  address)                               \
-  nonstatic_field(nmethod,             _verified_entry_point,                         address)                               \
-  nonstatic_field(nmethod,             _osr_entry_point,                              address)                               \
-  volatile_nonstatic_field(nmethod,    _lock_count,                                   jint)                                  \
-  nonstatic_field(nmethod,             _stack_traversal_mark,                         long)                                  \
-  nonstatic_field(nmethod,             _compile_id,                                   int)                                   \
-  nonstatic_field(nmethod,             _comp_level,                                   int)                                   \
-  nonstatic_field(nmethod,             _exception_cache,                              ExceptionCache*)                       \
-  nonstatic_field(nmethod,             _marked_for_deoptimization,                    bool)                                  \
-                                                                                                                             \
-  unchecked_c2_static_field(Deoptimization,         _trap_reason_name,                   void*)                              \
+  nonstatic_field(nmethod,                     _method,                                       Method*)                               \
+  nonstatic_field(nmethod,                     _entry_bci,                                    int)                                   \
+  nonstatic_field(nmethod,                     _osr_link,                                     nmethod*)                              \
+  nonstatic_field(nmethod,                     _scavenge_root_link,                           nmethod*)                              \
+  nonstatic_field(nmethod,                     _scavenge_root_state,                          jbyte)                                 \
+  nonstatic_field(nmethod,                     _state,                                        volatile unsigned char)                \
+  nonstatic_field(nmethod,                     _exception_offset,                             int)                                   \
+  nonstatic_field(nmethod,                     _deoptimize_offset,                            int)                                   \
+  nonstatic_field(nmethod,                     _deoptimize_mh_offset,                         int)                                   \
+  nonstatic_field(nmethod,                     _orig_pc_offset,                               int)                                   \
+  nonstatic_field(nmethod,                     _stub_offset,                                  int)                                   \
+  nonstatic_field(nmethod,                     _consts_offset,                                int)                                   \
+  nonstatic_field(nmethod,                     _oops_offset,                                  int)                                   \
+  nonstatic_field(nmethod,                     _metadata_offset,                              int)                                   \
+  nonstatic_field(nmethod,                     _scopes_data_offset,                           int)                                   \
+  nonstatic_field(nmethod,                     _scopes_pcs_offset,                            int)                                   \
+  nonstatic_field(nmethod,                     _dependencies_offset,                          int)                                   \
+  nonstatic_field(nmethod,                     _handler_table_offset,                         int)                                   \
+  nonstatic_field(nmethod,                     _nul_chk_table_offset,                         int)                                   \
+  nonstatic_field(nmethod,                     _nmethod_end_offset,                           int)                                   \
+  nonstatic_field(nmethod,                     _entry_point,                                  address)                               \
+  nonstatic_field(nmethod,                     _verified_entry_point,                         address)                               \
+  nonstatic_field(nmethod,                     _osr_entry_point,                              address)                               \
+  volatile_nonstatic_field(nmethod,            _lock_count,                                   jint)                                  \
+  nonstatic_field(nmethod,                     _stack_traversal_mark,                         long)                                  \
+  nonstatic_field(nmethod,                     _compile_id,                                   int)                                   \
+  nonstatic_field(nmethod,                     _comp_level,                                   int)                                   \
+  nonstatic_field(nmethod,                     _exception_cache,                              ExceptionCache*)                       \
+  nonstatic_field(nmethod,                     _marked_for_deoptimization,                    bool)                                  \
+                                                                                                                                     \
+  unchecked_c2_static_field(Deoptimization,    _trap_reason_name,                             void*)                                 \
                                                                                                                                      \
   /********************************/                                                                                                 \
   /* JavaCalls (NOTE: incomplete) */                                                                                                 \
@@ -928,7 +928,7 @@
   nonstatic_field(ThreadShadow,                _pending_exception,                            oop)                                   \
   nonstatic_field(ThreadShadow,                _exception_file,                               const char*)                           \
   nonstatic_field(ThreadShadow,                _exception_line,                               int)                                   \
-   volatile_nonstatic_field(Thread,            _suspend_flags,                                uint32_t)                              \
+  volatile_nonstatic_field(Thread,             _suspend_flags,                                uint32_t)                              \
   nonstatic_field(Thread,                      _active_handles,                               JNIHandleBlock*)                       \
   nonstatic_field(Thread,                      _tlab,                                         ThreadLocalAllocBuffer)                \
   nonstatic_field(Thread,                      _allocated_bytes,                              jlong)                                 \
@@ -948,7 +948,7 @@
   volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
   nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
   nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
-   volatile_nonstatic_field(JavaThread,        _thread_state,                                 JavaThreadState)                       \
+  volatile_nonstatic_field(JavaThread,         _thread_state,                                 JavaThreadState)                       \
   nonstatic_field(JavaThread,                  _osthread,                                     OSThread*)                             \
   nonstatic_field(JavaThread,                  _stack_base,                                   address)                               \
   nonstatic_field(JavaThread,                  _stack_size,                                   size_t)                                \
@@ -991,7 +991,7 @@
      static_field(JNIHandles,                  _weak_global_handles,                          JNIHandleBlock*)                       \
      static_field(JNIHandles,                  _deleted_handle,                               oop)                                   \
                                                                                                                                      \
-  unchecked_nonstatic_field(JNIHandleBlock,    _handles,                                      JNIHandleBlock::block_size_in_oops * sizeof(Oop)) /* Note: no type */ \
+  unchecked_nonstatic_field(JNIHandleBlock,    _handles,       JNIHandleBlock::block_size_in_oops * sizeof(Oop)) /* Note: no type */ \
   nonstatic_field(JNIHandleBlock,              _top,                                          int)                                   \
   nonstatic_field(JNIHandleBlock,              _next,                                         JNIHandleBlock*)                       \
                                                                                                                                      \
@@ -1019,80 +1019,80 @@
   /* allocation */                                                                                                                   \
   /**************/                                                                                                                   \
                                                                                                                                      \
-  nonstatic_field(Chunk, _next, Chunk*)                                                                                              \
-  nonstatic_field(Chunk, _len, const size_t)                                                                                         \
+  nonstatic_field(Chunk,                       _next,                                         Chunk*)                                \
+  nonstatic_field(Chunk,                       _len,                                          const size_t)                          \
                                                                                                                                      \
-  nonstatic_field(Arena, _first, Chunk*)                                                                                             \
-  nonstatic_field(Arena, _chunk, Chunk*)                                                                                             \
-  nonstatic_field(Arena, _hwm, char*)                                                                                                \
-  nonstatic_field(Arena, _max, char*)                                                                                                \
+  nonstatic_field(Arena,                       _first,                                        Chunk*)                                \
+  nonstatic_field(Arena,                       _chunk,                                        Chunk*)                                \
+  nonstatic_field(Arena,                       _hwm,                                          char*)                                 \
+  nonstatic_field(Arena,                       _max,                                          char*)                                 \
                                                                                                                                      \
   /************/                                                                                                                     \
   /* CI */                                                                                                                           \
   /************/                                                                                                                     \
                                                                                                                                      \
- nonstatic_field(ciEnv,               _system_dictionary_modification_counter, int)                                                  \
- nonstatic_field(ciEnv,               _compiler_data, void*)                                                                         \
- nonstatic_field(ciEnv,               _failure_reason, const char*)                                                                  \
- nonstatic_field(ciEnv,               _factory, ciObjectFactory*)                                                                    \
- nonstatic_field(ciEnv,               _dependencies, Dependencies*)                                                                  \
- nonstatic_field(ciEnv,               _task, CompileTask*)                                                                           \
- nonstatic_field(ciEnv,               _arena, Arena*)                                                                                \
+  nonstatic_field(ciEnv,                       _system_dictionary_modification_counter,       int)                                   \
+  nonstatic_field(ciEnv,                       _compiler_data,                                void*)                                 \
+  nonstatic_field(ciEnv,                       _failure_reason,                               const char*)                           \
+  nonstatic_field(ciEnv,                       _factory,                                      ciObjectFactory*)                      \
+  nonstatic_field(ciEnv,                       _dependencies,                                 Dependencies*)                         \
+  nonstatic_field(ciEnv,                       _task,                                         CompileTask*)                          \
+  nonstatic_field(ciEnv,                       _arena,                                        Arena*)                                \
                                                                                                                                      \
- nonstatic_field(ciBaseObject,    _ident, uint)                                                                                      \
+  nonstatic_field(ciBaseObject,                _ident,                                        uint)                                  \
                                                                                                                                      \
- nonstatic_field(ciObject,    _handle, jobject)                                                                                      \
- nonstatic_field(ciObject,    _klass, ciKlass*)                                                                                      \
+  nonstatic_field(ciObject,                    _handle,                                       jobject)                               \
+  nonstatic_field(ciObject,                    _klass,                                        ciKlass*)                              \
                                                                                                                                      \
- nonstatic_field(ciMetadata,  _metadata, Metadata*)                                                                           \
+  nonstatic_field(ciMetadata,                  _metadata,                                     Metadata*)                             \
                                                                                                                                      \
- nonstatic_field(ciSymbol,    _symbol, Symbol*)                                                                                      \
+  nonstatic_field(ciSymbol,                    _symbol,                                       Symbol*)                               \
                                                                                                                                      \
- nonstatic_field(ciType,    _basic_type, BasicType)                                                                                  \
+  nonstatic_field(ciType,                      _basic_type,                                   BasicType)                             \
                                                                                                                                      \
- nonstatic_field(ciKlass,   _name, ciSymbol*)                                                                                        \
+  nonstatic_field(ciKlass,                     _name,                                         ciSymbol*)                             \
                                                                                                                                      \
- nonstatic_field(ciArrayKlass,   _dimension, jint)                                                                                   \
+  nonstatic_field(ciArrayKlass,                _dimension,                                    jint)                                  \
                                                                                                                                      \
- nonstatic_field(ciObjArrayKlass, _element_klass, ciKlass*)                                                                          \
- nonstatic_field(ciObjArrayKlass, _base_element_klass, ciKlass*)                                                                     \
+  nonstatic_field(ciObjArrayKlass,             _element_klass,                                ciKlass*)                              \
+  nonstatic_field(ciObjArrayKlass,             _base_element_klass,                           ciKlass*)                              \
                                                                                                                                      \
- nonstatic_field(ciInstanceKlass,   _init_state, InstanceKlass::ClassState)                                                          \
- nonstatic_field(ciInstanceKlass,   _is_shared,  bool)                                                                               \
+  nonstatic_field(ciInstanceKlass,             _init_state,                                   InstanceKlass::ClassState)             \
+  nonstatic_field(ciInstanceKlass,             _is_shared,                                    bool)                                  \
                                                                                                                                      \
- nonstatic_field(ciMethod,     _interpreter_invocation_count, int)                                                                   \
- nonstatic_field(ciMethod,     _interpreter_throwout_count, int)                                                                     \
- nonstatic_field(ciMethod,     _instructions_size, int)                                                                              \
+  nonstatic_field(ciMethod,                    _interpreter_invocation_count,                 int)                                   \
+  nonstatic_field(ciMethod,                    _interpreter_throwout_count,                   int)                                   \
+  nonstatic_field(ciMethod,                    _instructions_size,                            int)                                   \
                                                                                                                                      \
- nonstatic_field(ciMethodData, _data_size, int)                                                                                      \
- nonstatic_field(ciMethodData, _state, u_char)                                                                                       \
- nonstatic_field(ciMethodData, _extra_data_size, int)                                                                                \
- nonstatic_field(ciMethodData, _data, intptr_t*)                                                                                     \
- nonstatic_field(ciMethodData, _hint_di, int)                                                                                        \
- nonstatic_field(ciMethodData, _eflags, intx)                                                                                        \
- nonstatic_field(ciMethodData, _arg_local, intx)                                                                                     \
- nonstatic_field(ciMethodData, _arg_stack, intx)                                                                                     \
- nonstatic_field(ciMethodData, _arg_returned, intx)                                                                                  \
- nonstatic_field(ciMethodData, _current_mileage, int)                                                                                \
- nonstatic_field(ciMethodData, _orig, MethodData)                                                                             \
+  nonstatic_field(ciMethodData,                _data_size,                                    int)                                   \
+  nonstatic_field(ciMethodData,                _state,                                        u_char)                                \
+  nonstatic_field(ciMethodData,                _extra_data_size,                              int)                                   \
+  nonstatic_field(ciMethodData,                _data,                                         intptr_t*)                             \
+  nonstatic_field(ciMethodData,                _hint_di,                                      int)                                   \
+  nonstatic_field(ciMethodData,                _eflags,                                       intx)                                  \
+  nonstatic_field(ciMethodData,                _arg_local,                                    intx)                                  \
+  nonstatic_field(ciMethodData,                _arg_stack,                                    intx)                                  \
+  nonstatic_field(ciMethodData,                _arg_returned,                                 intx)                                  \
+  nonstatic_field(ciMethodData,                _current_mileage,                              int)                                   \
+  nonstatic_field(ciMethodData,                _orig,                                         MethodData)                            \
                                                                                                                                      \
- nonstatic_field(ciField,     _holder, ciInstanceKlass*)                                                                             \
- nonstatic_field(ciField,     _name, ciSymbol*)                                                                                      \
- nonstatic_field(ciField,     _signature, ciSymbol*)                                                                                 \
- nonstatic_field(ciField,     _offset, int)                                                                                          \
- nonstatic_field(ciField,     _is_constant, bool)                                                                                    \
- nonstatic_field(ciField,     _constant_value, ciConstant)                                                                           \
+  nonstatic_field(ciField,                     _holder,                                       ciInstanceKlass*)                      \
+  nonstatic_field(ciField,                     _name,                                         ciSymbol*)                             \
+  nonstatic_field(ciField,                     _signature,                                    ciSymbol*)                             \
+  nonstatic_field(ciField,                     _offset,                                       int)                                   \
+  nonstatic_field(ciField,                     _is_constant,                                  bool)                                  \
+  nonstatic_field(ciField,                     _constant_value,                               ciConstant)                            \
                                                                                                                                      \
- nonstatic_field(ciObjectFactory,     _ci_metadata, GrowableArray<ciMetadata*>*)                                                     \
- nonstatic_field(ciObjectFactory,     _symbols, GrowableArray<ciSymbol*>*)                                                           \
- nonstatic_field(ciObjectFactory,     _unloaded_methods, GrowableArray<ciMethod*>*)                                                  \
+  nonstatic_field(ciObjectFactory,             _ci_metadata,                                  GrowableArray<ciMetadata*>*)           \
+  nonstatic_field(ciObjectFactory,             _symbols,                                      GrowableArray<ciSymbol*>*)             \
+  nonstatic_field(ciObjectFactory,             _unloaded_methods,                             GrowableArray<ciMethod*>*)             \
                                                                                                                                      \
- nonstatic_field(ciConstant,     _type, BasicType)                                                                                   \
- nonstatic_field(ciConstant,     _value._int, jint)                                                                                  \
- nonstatic_field(ciConstant,     _value._long, jlong)                                                                                \
- nonstatic_field(ciConstant,     _value._float, jfloat)                                                                              \
- nonstatic_field(ciConstant,     _value._double, jdouble)                                                                            \
- nonstatic_field(ciConstant,     _value._object, ciObject*)                                                                          \
+  nonstatic_field(ciConstant,                  _type,                                         BasicType)                             \
+  nonstatic_field(ciConstant,                  _value._int,                                   jint)                                  \
+  nonstatic_field(ciConstant,                  _value._long,                                  jlong)                                 \
+  nonstatic_field(ciConstant,                  _value._float,                                 jfloat)                                \
+  nonstatic_field(ciConstant,                  _value._double,                                jdouble)                               \
+  nonstatic_field(ciConstant,                  _value._object,                                ciObject*)                             \
                                                                                                                                      \
   /************/                                                                                                                     \
   /* Monitors */                                                                                                                     \
@@ -1108,7 +1108,7 @@
   volatile_nonstatic_field(BasicLock,          _displaced_header,                             markOop)                               \
   nonstatic_field(BasicObjectLock,             _lock,                                         BasicLock)                             \
   nonstatic_field(BasicObjectLock,             _obj,                                          oop)                                   \
-  static_field(ObjectSynchronizer,             gBlockList,                                    ObjectMonitor*)                        \
+     static_field(ObjectSynchronizer,          gBlockList,                                    ObjectMonitor*)                        \
                                                                                                                                      \
   /*********************/                                                                                                            \
   /* Matcher (C2 only) */                                                                                                            \
@@ -1116,111 +1116,111 @@
                                                                                                                                      \
   unchecked_c2_static_field(Matcher,           _regEncode,                          sizeof(Matcher::_regEncode)) /* NOTE: no type */ \
                                                                                                                                      \
-  c2_nonstatic_field(Node,               _in,                      Node**)                                                           \
-  c2_nonstatic_field(Node,               _out,                     Node**)                                                           \
-  c2_nonstatic_field(Node,               _cnt,                     node_idx_t)                                                       \
-  c2_nonstatic_field(Node,               _max,                     node_idx_t)                                                       \
-  c2_nonstatic_field(Node,               _outcnt,                  node_idx_t)                                                       \
-  c2_nonstatic_field(Node,               _outmax,                  node_idx_t)                                                       \
-  c2_nonstatic_field(Node,               _idx,                     const node_idx_t)                                                 \
-  c2_nonstatic_field(Node,               _class_id,                jushort)                                                          \
-  c2_nonstatic_field(Node,               _flags,                   jushort)                                                          \
+  c2_nonstatic_field(Node,                     _in,                                           Node**)                                \
+  c2_nonstatic_field(Node,                     _out,                                          Node**)                                \
+  c2_nonstatic_field(Node,                     _cnt,                                          node_idx_t)                            \
+  c2_nonstatic_field(Node,                     _max,                                          node_idx_t)                            \
+  c2_nonstatic_field(Node,                     _outcnt,                                       node_idx_t)                            \
+  c2_nonstatic_field(Node,                     _outmax,                                       node_idx_t)                            \
+  c2_nonstatic_field(Node,                     _idx,                                          const node_idx_t)                      \
+  c2_nonstatic_field(Node,                     _class_id,                                     jushort)                               \
+  c2_nonstatic_field(Node,                     _flags,                                        jushort)                               \
                                                                                                                                      \
-  c2_nonstatic_field(Compile,            _root,                    RootNode*)                                                        \
-  c2_nonstatic_field(Compile,            _unique,                  uint)                                                             \
-  c2_nonstatic_field(Compile,            _entry_bci,               int)                                                              \
-  c2_nonstatic_field(Compile,            _top,                     Node*)                                                            \
-  c2_nonstatic_field(Compile,            _cfg,                     PhaseCFG*)                                                        \
-  c2_nonstatic_field(Compile,            _regalloc,                PhaseRegAlloc*)                                                   \
-  c2_nonstatic_field(Compile,            _method,                  ciMethod*)                                                        \
-  c2_nonstatic_field(Compile,            _compile_id,              const int)                                                        \
-  c2_nonstatic_field(Compile,            _save_argument_registers, const bool)                                                       \
-  c2_nonstatic_field(Compile,            _subsume_loads,           const bool)                                                       \
-  c2_nonstatic_field(Compile,            _do_escape_analysis,      const bool)                                                       \
-  c2_nonstatic_field(Compile,            _eliminate_boxing,        const bool)                                                       \
-  c2_nonstatic_field(Compile,            _ilt,                     InlineTree*)                                                      \
+  c2_nonstatic_field(Compile,                  _root,                                         RootNode*)                             \
+  c2_nonstatic_field(Compile,                  _unique,                                       uint)                                  \
+  c2_nonstatic_field(Compile,                  _entry_bci,                                    int)                                   \
+  c2_nonstatic_field(Compile,                  _top,                                          Node*)                                 \
+  c2_nonstatic_field(Compile,                  _cfg,                                          PhaseCFG*)                             \
+  c2_nonstatic_field(Compile,                  _regalloc,                                     PhaseRegAlloc*)                        \
+  c2_nonstatic_field(Compile,                  _method,                                       ciMethod*)                             \
+  c2_nonstatic_field(Compile,                  _compile_id,                                   const int)                             \
+  c2_nonstatic_field(Compile,                  _save_argument_registers,                      const bool)                            \
+  c2_nonstatic_field(Compile,                  _subsume_loads,                                const bool)                            \
+  c2_nonstatic_field(Compile,                  _do_escape_analysis,                           const bool)                            \
+  c2_nonstatic_field(Compile,                  _eliminate_boxing,                             const bool)                            \
+  c2_nonstatic_field(Compile,                  _ilt,                                          InlineTree*)                           \
                                                                                                                                      \
-  c2_nonstatic_field(InlineTree,         _caller_jvms,             JVMState*)                                                        \
-  c2_nonstatic_field(InlineTree,         _method,                  ciMethod*)                                                        \
-  c2_nonstatic_field(InlineTree,         _caller_tree,             InlineTree*)                                                      \
-  c2_nonstatic_field(InlineTree,         _subtrees,                GrowableArray<InlineTree*>)                                       \
+  c2_nonstatic_field(InlineTree,               _caller_jvms,                                  JVMState*)                             \
+  c2_nonstatic_field(InlineTree,               _method,                                       ciMethod*)                             \
+  c2_nonstatic_field(InlineTree,               _caller_tree,                                  InlineTree*)                           \
+  c2_nonstatic_field(InlineTree,               _subtrees,                                     GrowableArray<InlineTree*>)            \
                                                                                                                                      \
-  c2_nonstatic_field(OptoRegPair,        _first,                   short)                                                            \
-  c2_nonstatic_field(OptoRegPair,        _second,                  short)                                                            \
+  c2_nonstatic_field(OptoRegPair,              _first,                                        short)                                 \
+  c2_nonstatic_field(OptoRegPair,              _second,                                       short)                                 \
                                                                                                                                      \
-  c2_nonstatic_field(JVMState,           _caller,                  JVMState*)                                                        \
-  c2_nonstatic_field(JVMState,           _depth,                   uint)                                                             \
-  c2_nonstatic_field(JVMState,           _locoff,                  uint)                                                             \
-  c2_nonstatic_field(JVMState,           _stkoff,                  uint)                                                             \
-  c2_nonstatic_field(JVMState,           _monoff,                  uint)                                                             \
-  c2_nonstatic_field(JVMState,           _scloff,                  uint)                                                             \
-  c2_nonstatic_field(JVMState,           _endoff,                  uint)                                                             \
-  c2_nonstatic_field(JVMState,           _sp,                      uint)                                                             \
-  c2_nonstatic_field(JVMState,           _bci,                     int)                                                              \
-  c2_nonstatic_field(JVMState,           _method,                  ciMethod*)                                                        \
-  c2_nonstatic_field(JVMState,           _map,                     SafePointNode*)                                                   \
+  c2_nonstatic_field(JVMState,                 _caller,                                       JVMState*)                             \
+  c2_nonstatic_field(JVMState,                 _depth,                                        uint)                                  \
+  c2_nonstatic_field(JVMState,                 _locoff,                                       uint)                                  \
+  c2_nonstatic_field(JVMState,                 _stkoff,                                       uint)                                  \
+  c2_nonstatic_field(JVMState,                 _monoff,                                       uint)                                  \
+  c2_nonstatic_field(JVMState,                 _scloff,                                       uint)                                  \
+  c2_nonstatic_field(JVMState,                 _endoff,                                       uint)                                  \
+  c2_nonstatic_field(JVMState,                 _sp,                                           uint)                                  \
+  c2_nonstatic_field(JVMState,                 _bci,                                          int)                                   \
+  c2_nonstatic_field(JVMState,                 _method,                                       ciMethod*)                             \
+  c2_nonstatic_field(JVMState,                 _map,                                          SafePointNode*)                        \
                                                                                                                                      \
-  c2_nonstatic_field(SafePointNode,      _jvms,                    JVMState* const)                                                  \
+  c2_nonstatic_field(SafePointNode,            _jvms,                                         JVMState* const)                       \
                                                                                                                                      \
-  c2_nonstatic_field(MachSafePointNode,  _jvms,                    JVMState*)                                                        \
-  c2_nonstatic_field(MachSafePointNode,  _jvmadj,                  uint)                                                             \
+  c2_nonstatic_field(MachSafePointNode,        _jvms,                                         JVMState*)                             \
+  c2_nonstatic_field(MachSafePointNode,        _jvmadj,                                       uint)                                  \
                                                                                                                                      \
-  c2_nonstatic_field(MachIfNode,         _prob,                    jfloat)                                                           \
-  c2_nonstatic_field(MachIfNode,         _fcnt,                    jfloat)                                                           \
+  c2_nonstatic_field(MachIfNode,               _prob,                                         jfloat)                                \
+  c2_nonstatic_field(MachIfNode,               _fcnt,                                         jfloat)                                \
                                                                                                                                      \
-  c2_nonstatic_field(CallNode,           _entry_point,             address)                                                          \
+  c2_nonstatic_field(CallNode,                 _entry_point,                                  address)                               \
                                                                                                                                      \
-  c2_nonstatic_field(CallJavaNode,       _method,                  ciMethod*)                                                        \
+  c2_nonstatic_field(CallJavaNode,             _method,                                       ciMethod*)                             \
                                                                                                                                      \
-  c2_nonstatic_field(CallRuntimeNode,    _name,                    const char*)                                                      \
+  c2_nonstatic_field(CallRuntimeNode,          _name,                                         const char*)                           \
                                                                                                                                      \
-  c2_nonstatic_field(CallStaticJavaNode, _name,                    const char*)                                                      \
+  c2_nonstatic_field(CallStaticJavaNode,       _name,                                         const char*)                           \
                                                                                                                                      \
-  c2_nonstatic_field(MachCallJavaNode,   _method,                  ciMethod*)                                                        \
-  c2_nonstatic_field(MachCallJavaNode,   _bci,                     int)                                                              \
+  c2_nonstatic_field(MachCallJavaNode,         _method,                                       ciMethod*)                             \
+  c2_nonstatic_field(MachCallJavaNode,         _bci,                                          int)                                   \
                                                                                                                                      \
-  c2_nonstatic_field(MachCallStaticJavaNode, _name,                const char*)                                                      \
+  c2_nonstatic_field(MachCallStaticJavaNode,   _name,                                         const char*)                           \
                                                                                                                                      \
-  c2_nonstatic_field(MachCallRuntimeNode,  _name,                  const char*)                                                      \
+  c2_nonstatic_field(MachCallRuntimeNode,      _name,                                         const char*)                           \
                                                                                                                                      \
-  c2_nonstatic_field(PhaseCFG,           _number_of_blocks,        uint)                                                             \
-  c2_nonstatic_field(PhaseCFG,           _blocks,                  Block_List)                                                       \
-  c2_nonstatic_field(PhaseCFG,           _node_to_block_mapping,   Block_Array)                                                      \
-  c2_nonstatic_field(PhaseCFG,           _root_block,              Block*)                                                           \
+  c2_nonstatic_field(PhaseCFG,                 _number_of_blocks,                             uint)                                  \
+  c2_nonstatic_field(PhaseCFG,                 _blocks,                                       Block_List)                            \
+  c2_nonstatic_field(PhaseCFG,                 _node_to_block_mapping,                        Block_Array)                           \
+  c2_nonstatic_field(PhaseCFG,                 _root_block,                                   Block*)                                \
                                                                                                                                      \
-  c2_nonstatic_field(PhaseRegAlloc,      _node_regs,               OptoRegPair*)                                                     \
-  c2_nonstatic_field(PhaseRegAlloc,      _node_regs_max_index,     uint)                                                             \
-  c2_nonstatic_field(PhaseRegAlloc,      _framesize,               uint)                                                             \
-  c2_nonstatic_field(PhaseRegAlloc,      _max_reg,                 OptoReg::Name)                                                    \
+  c2_nonstatic_field(PhaseRegAlloc,            _node_regs,                                    OptoRegPair*)                          \
+  c2_nonstatic_field(PhaseRegAlloc,            _node_regs_max_index,                          uint)                                  \
+  c2_nonstatic_field(PhaseRegAlloc,            _framesize,                                    uint)                                  \
+  c2_nonstatic_field(PhaseRegAlloc,            _max_reg,                                      OptoReg::Name)                         \
                                                                                                                                      \
-  c2_nonstatic_field(PhaseChaitin,       _trip_cnt,                int)                                                              \
-  c2_nonstatic_field(PhaseChaitin,       _alternate,               int)                                                              \
-  c2_nonstatic_field(PhaseChaitin,       _lo_degree,               uint)                                                             \
-  c2_nonstatic_field(PhaseChaitin,       _lo_stk_degree,           uint)                                                             \
-  c2_nonstatic_field(PhaseChaitin,       _hi_degree,               uint)                                                             \
-  c2_nonstatic_field(PhaseChaitin,       _simplified,              uint)                                                             \
+  c2_nonstatic_field(PhaseChaitin,             _trip_cnt,                                     int)                                   \
+  c2_nonstatic_field(PhaseChaitin,             _alternate,                                    int)                                   \
+  c2_nonstatic_field(PhaseChaitin,             _lo_degree,                                    uint)                                  \
+  c2_nonstatic_field(PhaseChaitin,             _lo_stk_degree,                                uint)                                  \
+  c2_nonstatic_field(PhaseChaitin,             _hi_degree,                                    uint)                                  \
+  c2_nonstatic_field(PhaseChaitin,             _simplified,                                   uint)                                  \
                                                                                                                                      \
-  c2_nonstatic_field(Block,              _nodes,                   Node_List)                                                        \
-  c2_nonstatic_field(Block,              _succs,                   Block_Array)                                                      \
-  c2_nonstatic_field(Block,              _num_succs,               uint)                                                             \
-  c2_nonstatic_field(Block,              _pre_order,               uint)                                                             \
-  c2_nonstatic_field(Block,              _dom_depth,               uint)                                                             \
-  c2_nonstatic_field(Block,              _idom,                    Block*)                                                           \
-  c2_nonstatic_field(Block,              _freq,                    jdouble)                                                          \
+  c2_nonstatic_field(Block,                    _nodes,                                        Node_List)                             \
+  c2_nonstatic_field(Block,                    _succs,                                        Block_Array)                           \
+  c2_nonstatic_field(Block,                    _num_succs,                                    uint)                                  \
+  c2_nonstatic_field(Block,                    _pre_order,                                    uint)                                  \
+  c2_nonstatic_field(Block,                    _dom_depth,                                    uint)                                  \
+  c2_nonstatic_field(Block,                    _idom,                                         Block*)                                \
+  c2_nonstatic_field(Block,                    _freq,                                         jdouble)                               \
                                                                                                                                      \
-  c2_nonstatic_field(CFGElement,         _freq,                    jdouble)                                                          \
+  c2_nonstatic_field(CFGElement,               _freq,                                         jdouble)                               \
                                                                                                                                      \
-  c2_nonstatic_field(Block_List,         _cnt,                     uint)                                                             \
+  c2_nonstatic_field(Block_List,               _cnt,                                          uint)                                  \
                                                                                                                                      \
-  c2_nonstatic_field(Block_Array,        _size,                    uint)                                                             \
-  c2_nonstatic_field(Block_Array,        _blocks,                  Block**)                                                          \
-  c2_nonstatic_field(Block_Array,        _arena,                   Arena*)                                                           \
+  c2_nonstatic_field(Block_Array,              _size,                                         uint)                                  \
+  c2_nonstatic_field(Block_Array,              _blocks,                                       Block**)                               \
+  c2_nonstatic_field(Block_Array,              _arena,                                        Arena*)                                \
                                                                                                                                      \
-  c2_nonstatic_field(Node_List,          _cnt,                     uint)                                                             \
+  c2_nonstatic_field(Node_List,                _cnt,                                          uint)                                  \
                                                                                                                                      \
-  c2_nonstatic_field(Node_Array,         _max,                     uint)                                                             \
-  c2_nonstatic_field(Node_Array,         _nodes,                   Node**)                                                           \
-  c2_nonstatic_field(Node_Array,         _a,                       Arena*)                                                           \
+  c2_nonstatic_field(Node_Array,               _max,                                          uint)                                  \
+  c2_nonstatic_field(Node_Array,               _nodes,                                        Node**)                                \
+  c2_nonstatic_field(Node_Array,               _a,                                            Arena*)                                \
                                                                                                                                      \
                                                                                                                                      \
   /*********************/                                                                                                            \
@@ -1231,22 +1231,22 @@
   nonstatic_field(Flag,                        _name,                                         const char*)                           \
   unchecked_nonstatic_field(Flag,              _addr,                                         sizeof(void*)) /* NOTE: no type */     \
   nonstatic_field(Flag,                        _flags,                                        Flag::Flags)                           \
-  static_field(Flag,                           flags,                                         Flag*)                                 \
-  static_field(Flag,                           numFlags,                                      size_t)                                \
+     static_field(Flag,                        flags,                                         Flag*)                                 \
+     static_field(Flag,                        numFlags,                                      size_t)                                \
                                                                                                                                      \
   /*************************/                                                                                                        \
   /* JDK / VM version info */                                                                                                        \
   /*************************/                                                                                                        \
                                                                                                                                      \
-  static_field(Abstract_VM_Version,            _s_vm_release,                                 const char*)                           \
-  static_field(Abstract_VM_Version,            _s_internal_vm_info_string,                    const char*)                           \
-  static_field(Abstract_VM_Version,            _vm_major_version,                             int)                                   \
-  static_field(Abstract_VM_Version,            _vm_minor_version,                             int)                                   \
-  static_field(Abstract_VM_Version,            _vm_micro_version,                             int)                                   \
-  static_field(Abstract_VM_Version,            _vm_build_number,                              int)                                   \
-  static_field(Abstract_VM_Version,            _reserve_for_allocation_prefetch,              int)                                   \
+     static_field(Abstract_VM_Version,         _s_vm_release,                                 const char*)                           \
+     static_field(Abstract_VM_Version,         _s_internal_vm_info_string,                    const char*)                           \
+     static_field(Abstract_VM_Version,         _vm_major_version,                             int)                                   \
+     static_field(Abstract_VM_Version,         _vm_minor_version,                             int)                                   \
+     static_field(Abstract_VM_Version,         _vm_micro_version,                             int)                                   \
+     static_field(Abstract_VM_Version,         _vm_build_number,                              int)                                   \
+     static_field(Abstract_VM_Version,         _reserve_for_allocation_prefetch,              int)                                   \
                                                                                                                                      \
-  static_field(JDK_Version,                    _current,                                      JDK_Version)                           \
+     static_field(JDK_Version,                 _current,                                      JDK_Version)                           \
   nonstatic_field(JDK_Version,                 _partially_initialized,                        bool)                                  \
   nonstatic_field(JDK_Version,                 _major,                                        unsigned char)                         \
                                                                                                                                      \
@@ -1260,65 +1260,65 @@
   /* Arguments */                                                                                                                    \
   /*************/                                                                                                                    \
                                                                                                                                      \
-  static_field(Arguments,                      _jvm_flags_array,                              char**)                                \
-  static_field(Arguments,                      _num_jvm_flags,                                int)                                   \
-  static_field(Arguments,                      _jvm_args_array,                               char**)                                \
-  static_field(Arguments,                      _num_jvm_args,                                 int)                                   \
-  static_field(Arguments,                      _java_command,                                 char*)                                 \
+     static_field(Arguments,                   _jvm_flags_array,                              char**)                                \
+     static_field(Arguments,                   _num_jvm_flags,                                int)                                   \
+     static_field(Arguments,                   _jvm_args_array,                               char**)                                \
+     static_field(Arguments,                   _num_jvm_args,                                 int)                                   \
+     static_field(Arguments,                   _java_command,                                 char*)                                 \
                                                                                                                                      \
   /************/                                                                                                                     \
   /* Array<T> */                                                                                                                     \
   /************/                                                                                                                     \
                                                                                                                                      \
-  nonstatic_field(Array<int>,                      _length,                                   int)                                   \
-  unchecked_nonstatic_field(Array<int>,            _data,                                     sizeof(int))                           \
-  unchecked_nonstatic_field(Array<u1>,             _data,                                     sizeof(u1))                            \
-  unchecked_nonstatic_field(Array<u2>,             _data,                                     sizeof(u2))                            \
-  unchecked_nonstatic_field(Array<Method*>,        _data,                                     sizeof(Method*))                       \
-  unchecked_nonstatic_field(Array<Klass*>,         _data,                                     sizeof(Klass*))                        \
+  nonstatic_field(Array<int>,                  _length,                                       int)                                   \
+  unchecked_nonstatic_field(Array<int>,        _data,                                         sizeof(int))                           \
+  unchecked_nonstatic_field(Array<u1>,         _data,                                         sizeof(u1))                            \
+  unchecked_nonstatic_field(Array<u2>,         _data,                                         sizeof(u2))                            \
+  unchecked_nonstatic_field(Array<Method*>,    _data,                                         sizeof(Method*))                       \
+  unchecked_nonstatic_field(Array<Klass*>,     _data,                                         sizeof(Klass*))                        \
                                                                                                                                      \
   /*********************************/                                                                                                \
   /* java_lang_Class fields        */                                                                                                \
   /*********************************/                                                                                                \
                                                                                                                                      \
-  static_field(java_lang_Class,                _klass_offset,                                 int)                                   \
-  static_field(java_lang_Class,                _array_klass_offset,                           int)                                   \
-  static_field(java_lang_Class,                _oop_size_offset,                              int)                                   \
-  static_field(java_lang_Class,                _static_oop_field_count_offset,                int)                                   \
+     static_field(java_lang_Class,             _klass_offset,                                 int)                                   \
+     static_field(java_lang_Class,             _array_klass_offset,                           int)                                   \
+     static_field(java_lang_Class,             _oop_size_offset,                              int)                                   \
+     static_field(java_lang_Class,             _static_oop_field_count_offset,                int)                                   \
                                                                                                                                      \
   /************************/                                                                                                         \
   /* Miscellaneous fields */                                                                                                         \
   /************************/                                                                                                         \
                                                                                                                                      \
-  nonstatic_field(CompileTask,                 _method,                                      Method*)                                \
-  nonstatic_field(CompileTask,                 _osr_bci,                                     int)                                    \
-  nonstatic_field(CompileTask,                 _comp_level,                                  int)                                    \
-  nonstatic_field(CompileTask,                 _compile_id,                                  uint)                                   \
-  nonstatic_field(CompileTask,                 _next,                                        CompileTask*)                           \
-  nonstatic_field(CompileTask,                 _prev,                                        CompileTask*)                           \
+  nonstatic_field(CompileTask,                 _method,                                       Method*)                               \
+  nonstatic_field(CompileTask,                 _osr_bci,                                      int)                                   \
+  nonstatic_field(CompileTask,                 _comp_level,                                   int)                                   \
+  nonstatic_field(CompileTask,                 _compile_id,                                   uint)                                  \
+  nonstatic_field(CompileTask,                 _next,                                         CompileTask*)                          \
+  nonstatic_field(CompileTask,                 _prev,                                         CompileTask*)                          \
                                                                                                                                      \
-  nonstatic_field(vframeArray,                 _next,                                        vframeArray*)                           \
-  nonstatic_field(vframeArray,                 _original,                                    frame)                                  \
-  nonstatic_field(vframeArray,                 _caller,                                      frame)                                  \
-  nonstatic_field(vframeArray,                 _frames,                                      int)                                    \
+  nonstatic_field(vframeArray,                 _next,                                         vframeArray*)                          \
+  nonstatic_field(vframeArray,                 _original,                                     frame)                                 \
+  nonstatic_field(vframeArray,                 _caller,                                       frame)                                 \
+  nonstatic_field(vframeArray,                 _frames,                                       int)                                   \
                                                                                                                                      \
-  nonstatic_field(vframeArrayElement,          _frame,                                       frame)                                  \
-  nonstatic_field(vframeArrayElement,          _bci,                                         int)                                    \
-  nonstatic_field(vframeArrayElement,          _method,                                      Method*)                                \
+  nonstatic_field(vframeArrayElement,          _frame,                                        frame)                                 \
+  nonstatic_field(vframeArrayElement,          _bci,                                          int)                                   \
+  nonstatic_field(vframeArrayElement,          _method,                                       Method*)                               \
                                                                                                                                      \
-  nonstatic_field(PtrQueue,                    _active,                                      bool)                                   \
-  nonstatic_field(PtrQueue,                    _buf,                                         void**)                                 \
-  nonstatic_field(PtrQueue,                    _index,                                       size_t)                                 \
+  nonstatic_field(PtrQueue,                    _active,                                       bool)                                  \
+  nonstatic_field(PtrQueue,                    _buf,                                          void**)                                \
+  nonstatic_field(PtrQueue,                    _index,                                        size_t)                                \
                                                                                                                                      \
-  nonstatic_field(AccessFlags,                 _flags,                                       jint)                                   \
-  nonstatic_field(elapsedTimer,                _counter,                                     jlong)                                  \
-  nonstatic_field(elapsedTimer,                _active,                                      bool)                                   \
-  nonstatic_field(InvocationCounter,           _counter,                                     unsigned int)                           \
-  volatile_nonstatic_field(FreeChunk,          _size,                                        size_t)                                 \
-  nonstatic_field(FreeChunk,                   _next,                                        FreeChunk*)                             \
-  nonstatic_field(FreeChunk,                   _prev,                                        FreeChunk*)                             \
-  nonstatic_field(AdaptiveFreeList<FreeChunk>, _size,                                        size_t)                                 \
-  nonstatic_field(AdaptiveFreeList<FreeChunk>, _count,                                       ssize_t)
+  nonstatic_field(AccessFlags,                 _flags,                                        jint)                                  \
+  nonstatic_field(elapsedTimer,                _counter,                                      jlong)                                 \
+  nonstatic_field(elapsedTimer,                _active,                                       bool)                                  \
+  nonstatic_field(InvocationCounter,           _counter,                                      unsigned int)                          \
+  volatile_nonstatic_field(FreeChunk,          _size,                                         size_t)                                \
+  nonstatic_field(FreeChunk,                   _next,                                         FreeChunk*)                            \
+  nonstatic_field(FreeChunk,                   _prev,                                         FreeChunk*)                            \
+  nonstatic_field(AdaptiveFreeList<FreeChunk>, _size,                                         size_t)                                \
+  nonstatic_field(AdaptiveFreeList<FreeChunk>, _count,                                        ssize_t)
 
 
 //--------------------------------------------------------------------------------
@@ -2253,6 +2253,7 @@
                                                                           \
   declare_constant(BarrierSet::ModRef)                                    \
   declare_constant(BarrierSet::CardTableModRef)                           \
+  declare_constant(BarrierSet::CardTableForRS)                            \
   declare_constant(BarrierSet::CardTableExtension)                        \
   declare_constant(BarrierSet::G1SATBCT)                                  \
   declare_constant(BarrierSet::G1SATBCTLogging)                           \
--- a/hotspot/src/share/vm/services/memoryPool.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/services/memoryPool.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -204,21 +204,21 @@
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
 
-SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* gen,
+SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
                                                          const char* name,
                                                          PoolType type,
                                                          size_t max_size,
                                                          bool support_usage_threshold) :
-  CollectedMemoryPool(name, type, gen->from()->capacity(), max_size,
-                      support_usage_threshold), _gen(gen) {
+  CollectedMemoryPool(name, type, young_gen->from()->capacity(), max_size,
+                      support_usage_threshold), _young_gen(young_gen) {
 }
 
 size_t SurvivorContiguousSpacePool::used_in_bytes() {
-  return _gen->from()->used();
+  return _young_gen->from()->used();
 }
 
 size_t SurvivorContiguousSpacePool::committed_in_bytes() {
-  return _gen->from()->capacity();
+  return _young_gen->from()->capacity();
 }
 
 MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {
--- a/hotspot/src/share/vm/services/memoryPool.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/services/memoryPool.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -163,10 +163,10 @@
 
 class SurvivorContiguousSpacePool : public CollectedMemoryPool {
 private:
-  DefNewGeneration* _gen;
+  DefNewGeneration* _young_gen;
 
 public:
-  SurvivorContiguousSpacePool(DefNewGeneration* gen,
+  SurvivorContiguousSpacePool(DefNewGeneration* young_gen,
                               const char* name,
                               PoolType type,
                               size_t max_size,
--- a/hotspot/src/share/vm/services/memoryService.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -212,13 +212,13 @@
   return (MemoryPool*) pool;
 }
 
-MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* gen,
+MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* young_gen,
                                                const char* name,
                                                bool is_heap,
                                                size_t max_size,
                                                bool support_usage_threshold) {
   MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
-  SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(gen, name, type, max_size, support_usage_threshold);
+  SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(young_gen, name, type, max_size, support_usage_threshold);
 
   _pools_list->append(pool);
   return (MemoryPool*) pool;
@@ -328,18 +328,18 @@
 
 
 #if INCLUDE_ALL_GCS
-void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
+void MemoryService::add_psYoung_memory_pool(PSYoungGen* young_gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
   assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
 
   // Add a memory pool for each space and young gen doesn't
   // support low memory detection as it is expected to get filled up.
-  EdenMutableSpacePool* eden = new EdenMutableSpacePool(gen,
-                                                        gen->eden_space(),
+  EdenMutableSpacePool* eden = new EdenMutableSpacePool(young_gen,
+                                                        young_gen->eden_space(),
                                                         "PS Eden Space",
                                                         MemoryPool::Heap,
                                                         false /* support_usage_threshold */);
 
-  SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(gen,
+  SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(young_gen,
                                                                     "PS Survivor Space",
                                                                     MemoryPool::Heap,
                                                                     false /* support_usage_threshold */);
@@ -352,13 +352,13 @@
   _pools_list->append(survivor);
 }
 
-void MemoryService::add_psOld_memory_pool(PSOldGen* gen, MemoryManager* mgr) {
-  PSGenerationPool* old_gen = new PSGenerationPool(gen,
-                                                   "PS Old Gen",
-                                                   MemoryPool::Heap,
-                                                   true /* support_usage_threshold */);
-  mgr->add_pool(old_gen);
-  _pools_list->append(old_gen);
+void MemoryService::add_psOld_memory_pool(PSOldGen* old_gen, MemoryManager* mgr) {
+  PSGenerationPool* old_gen_pool = new PSGenerationPool(old_gen,
+                                                       "PS Old Gen",
+                                                       MemoryPool::Heap,
+                                                       true /* support_usage_threshold */);
+  mgr->add_pool(old_gen_pool);
+  _pools_list->append(old_gen_pool);
 }
 
 void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
@@ -548,7 +548,7 @@
 }
 //
 // GC manager type depends on the type of Generation. Depending on the space
-// availablity and vm options the gc uses major gc manager or minor gc
+// availability and vm options the gc uses major gc manager or minor gc
 // manager or both. The type of gc manager depends on the generation kind.
 // For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so
 // _fullGC is set to false ) and for other generation kinds doing
--- a/hotspot/src/share/vm/services/memoryService.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/services/memoryService.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -80,10 +80,10 @@
   }
 
 
-  static void add_psYoung_memory_pool(PSYoungGen* gen,
+  static void add_psYoung_memory_pool(PSYoungGen* young_gen,
                                       MemoryManager* major_mgr,
                                       MemoryManager* minor_mgr);
-  static void add_psOld_memory_pool(PSOldGen* gen,
+  static void add_psOld_memory_pool(PSOldGen* old_gen,
                                     MemoryManager* mgr);
 
   static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
@@ -97,7 +97,7 @@
                                bool is_heap,
                                size_t max_size,
                                bool support_usage_threshold);
-  static MemoryPool* add_survivor_spaces(DefNewGeneration* gen,
+  static MemoryPool* add_survivor_spaces(DefNewGeneration* young_gen,
                                          const char* name,
                                          bool is_heap,
                                          size_t max_size,
@@ -162,7 +162,6 @@
                      bool recordGCEndTime, bool countCollection,
                      GCCause::Cause cause);
 
-
   static void oops_do(OopClosure* f);
 
   static bool get_verbose() { return PrintGC; }
--- a/hotspot/src/share/vm/services/psMemoryPool.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/services/psMemoryPool.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,18 +33,18 @@
 #include "services/memoryManager.hpp"
 #include "services/psMemoryPool.hpp"
 
-PSGenerationPool::PSGenerationPool(PSOldGen* gen,
+PSGenerationPool::PSGenerationPool(PSOldGen* old_gen,
                                    const char* name,
                                    PoolType type,
                                    bool support_usage_threshold) :
-  CollectedMemoryPool(name, type, gen->capacity_in_bytes(),
-                      gen->reserved().byte_size(), support_usage_threshold), _gen(gen) {
+  CollectedMemoryPool(name, type, old_gen->capacity_in_bytes(),
+                      old_gen->reserved().byte_size(), support_usage_threshold), _old_gen(old_gen) {
 }
 
 MemoryUsage PSGenerationPool::get_memory_usage() {
   size_t maxSize   = (available_for_allocation() ? max_size() : 0);
   size_t used      = used_in_bytes();
-  size_t committed = _gen->capacity_in_bytes();
+  size_t committed = _old_gen->capacity_in_bytes();
 
   return MemoryUsage(initial_size(), used, committed, maxSize);
 }
@@ -55,15 +55,16 @@
 // Max size of PS eden space is changing due to ergonomic.
 // PSYoungGen, PSOldGen, Eden, Survivor spaces are all resizable.
 //
-EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* gen,
+EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* young_gen,
                                            MutableSpace* space,
                                            const char* name,
                                            PoolType type,
                                            bool support_usage_threshold) :
   CollectedMemoryPool(name, type, space->capacity_in_bytes(),
-                      (gen->max_size() - gen->from_space()->capacity_in_bytes() - gen->to_space()->capacity_in_bytes()),
+                      (young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes()),
                        support_usage_threshold),
-  _gen(gen), _space(space) {
+  _young_gen(young_gen),
+  _space(space) {
 }
 
 MemoryUsage EdenMutableSpacePool::get_memory_usage() {
@@ -79,13 +80,13 @@
 //
 // PS from and to survivor spaces could have different sizes.
 //
-SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* gen,
+SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* young_gen,
                                                    const char* name,
                                                    PoolType type,
                                                    bool support_usage_threshold) :
-  CollectedMemoryPool(name, type, gen->from_space()->capacity_in_bytes(),
-                      gen->from_space()->capacity_in_bytes(),
-                      support_usage_threshold), _gen(gen) {
+  CollectedMemoryPool(name, type, young_gen->from_space()->capacity_in_bytes(),
+                      young_gen->from_space()->capacity_in_bytes(),
+                      support_usage_threshold), _young_gen(young_gen) {
 }
 
 MemoryUsage SurvivorMutableSpacePool::get_memory_usage() {
--- a/hotspot/src/share/vm/services/psMemoryPool.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/services/psMemoryPool.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -39,23 +39,23 @@
 
 class PSGenerationPool : public CollectedMemoryPool {
 private:
-  PSOldGen* _gen;
+  PSOldGen* _old_gen;
 
 public:
   PSGenerationPool(PSOldGen* pool, const char* name, PoolType type, bool support_usage_threshold);
 
   MemoryUsage get_memory_usage();
-  size_t used_in_bytes()              { return _gen->used_in_bytes(); }
-  size_t max_size() const             { return _gen->reserved().byte_size(); }
+  size_t used_in_bytes()              { return _old_gen->used_in_bytes(); }
+  size_t max_size() const             { return _old_gen->reserved().byte_size(); }
 };
 
 class EdenMutableSpacePool : public CollectedMemoryPool {
 private:
-  PSYoungGen*   _gen;
+  PSYoungGen*   _young_gen;
   MutableSpace* _space;
 
 public:
-  EdenMutableSpacePool(PSYoungGen* gen,
+  EdenMutableSpacePool(PSYoungGen* young_gen,
                        MutableSpace* space,
                        const char* name,
                        PoolType type,
@@ -66,16 +66,16 @@
   size_t used_in_bytes()                    { return space()->used_in_bytes(); }
   size_t max_size() const {
     // Eden's max_size = max_size of Young Gen - the current committed size of survivor spaces
-    return _gen->max_size() - _gen->from_space()->capacity_in_bytes() - _gen->to_space()->capacity_in_bytes();
+    return _young_gen->max_size() - _young_gen->from_space()->capacity_in_bytes() - _young_gen->to_space()->capacity_in_bytes();
   }
 };
 
 class SurvivorMutableSpacePool : public CollectedMemoryPool {
 private:
-  PSYoungGen*   _gen;
+  PSYoungGen*   _young_gen;
 
 public:
-  SurvivorMutableSpacePool(PSYoungGen* gen,
+  SurvivorMutableSpacePool(PSYoungGen* young_gen,
                            const char* name,
                            PoolType type,
                            bool support_usage_threshold);
@@ -83,14 +83,14 @@
   MemoryUsage get_memory_usage();
 
   size_t used_in_bytes() {
-    return _gen->from_space()->used_in_bytes();
+    return _young_gen->from_space()->used_in_bytes();
   }
   size_t committed_in_bytes() {
-    return _gen->from_space()->capacity_in_bytes();
+    return _young_gen->from_space()->capacity_in_bytes();
   }
   size_t max_size() const {
     // Return current committed size of the from-space
-    return _gen->from_space()->capacity_in_bytes();
+    return _young_gen->from_space()->capacity_in_bytes();
   }
 };
 
--- a/hotspot/src/share/vm/shark/sharkBuilder.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/shark/sharkBuilder.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -440,8 +440,10 @@
 // HotSpot memory barriers
 
 void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
-  if (bs->kind() != BarrierSet::CardTableModRef)
+  if (bs->kind() != BarrierSet::CardTableForRS &&
+      bs->kind() != BarrierSet::CardTableExtension) {
     Unimplemented();
+  }
 
   CreateStore(
     LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card_val()),
--- a/hotspot/src/share/vm/utilities/endian.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "utilities/endian.hpp"
-#include "utilities/bytes.hpp"
-
-#ifndef bswap_16
-extern "C" inline u2 bswap_16(u2 x) {
-  return ((x & 0xFF) << 8) |
-         ((x >> 8) & 0xFF);
-}
-#endif
-
-#ifndef bswap_32
-extern "C" inline u4 bswap_32(u4 x) {
-  return ((x & 0xFF) << 24) |
-       ((x & 0xFF00) << 8) |
-       ((x >> 8) & 0xFF00) |
-       ((x >> 24) & 0xFF);
-}
-#endif
-
-#ifndef bswap_64
-extern "C" inline u8 bswap_64(u8 x) {
-  return (u8)bswap_32((u4)x) << 32 |
-         (u8)bswap_32((u4)(x >> 32));
-}
-#endif
-
-u2 NativeEndian::get(u2 x) { return x; }
-u4 NativeEndian::get(u4 x) { return x; }
-u8 NativeEndian::get(u8 x) { return x; }
-s2 NativeEndian::get(s2 x) { return x; }
-s4 NativeEndian::get(s4 x) { return x; }
-s8 NativeEndian::get(s8 x) { return x; }
-
-void NativeEndian::set(u2& x, u2 y) { x = y; }
-void NativeEndian::set(u4& x, u4 y) { x = y; }
-void NativeEndian::set(u8& x, u8 y) { x = y; }
-void NativeEndian::set(s2& x, s2 y) { x = y; }
-void NativeEndian::set(s4& x, s4 y) { x = y; }
-void NativeEndian::set(s8& x, s8 y) { x = y; }
-
-NativeEndian NativeEndian::_native;
-
-u2 SwappingEndian::get(u2 x) { return bswap_16(x); }
-u4 SwappingEndian::get(u4 x) { return bswap_32(x); }
-u8 SwappingEndian::get(u8 x) { return bswap_64(x); }
-s2 SwappingEndian::get(s2 x) { return bswap_16(x); }
-s4 SwappingEndian::get(s4 x) { return bswap_32(x); }
-s8 SwappingEndian::get(s8 x) { return bswap_64(x); }
-
-void SwappingEndian::set(u2& x, u2 y) { x = bswap_16(y); }
-void SwappingEndian::set(u4& x, u4 y) { x = bswap_32(y); }
-void SwappingEndian::set(u8& x, u8 y) { x = bswap_64(y); }
-void SwappingEndian::set(s2& x, s2 y) { x = bswap_16(y); }
-void SwappingEndian::set(s4& x, s4 y) { x = bswap_32(y); }
-void SwappingEndian::set(s8& x, s8 y) { x = bswap_64(y); }
-
-SwappingEndian SwappingEndian::_swapping;
-
-Endian* Endian::get_handler(bool big_endian) {
-  // If requesting little endian on a little endian machine or
-  // big endian on a big endian machine use native handler
-  if (big_endian == is_big_endian()) {
-    return NativeEndian::get_native();
-  } else {
-    // Use swapping handler.
-    return SwappingEndian::get_swapping();
-  }
-}
-
-Endian* Endian::get_native_handler() {
-  return NativeEndian::get_native();
-}
--- a/hotspot/src/share/vm/utilities/endian.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_UTILITIES_ENDIAN_HPP
-#define SHARE_VM_UTILITIES_ENDIAN_HPP
-
-#include "utilities/globalDefinitions.hpp"
-
-// Selectable endian handling. Endian handlers are used when accessing values
-// that are of unknown (until runtime) endian.  The only requirement of the values
-// accessed are that they are aligned to proper size boundaries (no misalignment.)
-// To select an endian handler, one should call Endian::get_handler(big_endian);
-// Where big_endian is true if big endian is required and false otherwise.  The
-// native endian handler can be fetched with Endian::get_native_handler();
-// To retrieve a value using the approprate endian, use one of the overloaded
-// calls to get. To set a value, then use one of the overloaded set calls.
-// Ex.
-//      s4 value; // Imported value;
-//      ...
-//      Endian* endian = Endian::get_handler(true);  // Use big endian
-//      s4 corrected = endian->get(value);
-//      endian->set(value, 1);
-//
-class Endian {
-public:
-  virtual u2 get(u2 x) = 0;
-  virtual u4 get(u4 x) = 0;
-  virtual u8 get(u8 x) = 0;
-  virtual s2 get(s2 x) = 0;
-  virtual s4 get(s4 x) = 0;
-  virtual s8 get(s8 x) = 0;
-
-  virtual void set(u2& x, u2 y) = 0;
-  virtual void set(u4& x, u4 y) = 0;
-  virtual void set(u8& x, u8 y) = 0;
-  virtual void set(s2& x, s2 y) = 0;
-  virtual void set(s4& x, s4 y) = 0;
-  virtual void set(s8& x, s8 y) = 0;
-
-  // Quick little endian test.
-  static bool is_little_endian() {  u4 x = 1; return *(u1 *)&x != 0; }
-
-  // Quick big endian test.
-  static bool is_big_endian() { return !is_little_endian(); }
-
-  // Select an appropriate endian handler.
-  static Endian* get_handler(bool big_endian);
-
-  // Return the native endian handler.
-  static Endian* get_native_handler();
-};
-
-// Normal endian handling.
-class NativeEndian : public Endian {
-private:
-  static NativeEndian _native;
-
-public:
-  u2 get(u2 x);
-  u4 get(u4 x);
-  u8 get(u8 x);
-  s2 get(s2 x);
-  s4 get(s4 x);
-  s8 get(s8 x);
-
-  void set(u2& x, u2 y);
-  void set(u4& x, u4 y);
-  void set(u8& x, u8 y);
-  void set(s2& x, s2 y);
-  void set(s4& x, s4 y);
-  void set(s8& x, s8 y);
-
-  static Endian* get_native() { return &_native; }
-};
-
-// Swapping endian handling.
-class SwappingEndian : public Endian {
-private:
-  static SwappingEndian _swapping;
-
-public:
-  u2 get(u2 x);
-  u4 get(u4 x);
-  u8 get(u8 x);
-  s2 get(s2 x);
-  s4 get(s4 x);
-  s8 get(s8 x);
-
-  void set(u2& x, u2 y);
-  void set(u4& x, u4 y);
-  void set(u8& x, u8 y);
-  void set(s2& x, s2 y);
-  void set(s4& x, s4 y);
-  void set(s8& x, s8 y);
-
-  static Endian* get_swapping() { return &_swapping; }
-};
-#endif // SHARE_VM_UTILITIES_ENDIAN_HPP
--- a/hotspot/src/share/vm/utilities/stack.inline.hpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/utilities/stack.inline.hpp	Tue Sep 08 16:10:37 2015 +0200
@@ -27,6 +27,17 @@
 
 #include "utilities/stack.hpp"
 
+// Stack is used by the GC code and in some hot paths a lot of the Stack
+// code gets inlined. This is generally good, but when too much code has
+// been inlined, no further inlining is allowed by GCC. Therefore we need
+// to prevent parts of the slow path in Stack to be inlined to allow other
+// code to be.
+#if defined(TARGET_COMPILER_gcc)
+#define NOINLINE __attribute__((noinline))
+#else
+#define NOINLINE
+#endif
+
 template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
                      size_t max_size):
   _seg_size(segment_size),
@@ -141,7 +152,7 @@
 }
 
 template <class E, MEMFLAGS F>
-void Stack<E, F>::push_segment()
+NOINLINE void Stack<E, F>::push_segment()
 {
   assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
   E* next;
@@ -269,4 +280,6 @@
   return _cur_seg + --_cur_seg_size;
 }
 
+#undef NOINLINE
+
 #endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Tue Sep 08 16:10:37 2015 +0200
@@ -231,7 +231,7 @@
 
   if (signame) {
     jio_snprintf(buf, buflen,
-                 "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" INTPTR_FORMAT,
+                 "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT,
                  signame, _id, _pc,
                  os::current_process_id(), os::current_thread_id());
   } else if (_filename != NULL && _lineno > 0) {
@@ -239,7 +239,7 @@
     char separator = os::file_separator()[0];
     const char *p = strrchr(_filename, separator);
     int n = jio_snprintf(buf, buflen,
-                         "Internal Error at %s:%d, pid=%d, tid=" INTPTR_FORMAT,
+                         "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT,
                          p ? p + 1 : _filename, _lineno,
                          os::current_process_id(), os::current_thread_id());
     if (n >= 0 && n < buflen && _message) {
@@ -253,7 +253,7 @@
     }
   } else {
     jio_snprintf(buf, buflen,
-                 "Internal Error (0x%x), pid=%d, tid=" INTPTR_FORMAT,
+                 "Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT,
                  _id, os::current_process_id(), os::current_thread_id());
   }
 
@@ -486,7 +486,7 @@
 
      // process id, thread id
      st->print(", pid=%d", os::current_process_id());
-     st->print(", tid=" INTPTR_FORMAT, os::current_thread_id());
+     st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
      st->cr();
 
   STEP(80, "(printing error message)")
--- a/hotspot/test/compiler/arguments/CheckCICompilerCount.java	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/test/compiler/arguments/CheckCICompilerCount.java	Tue Sep 08 16:10:37 2015 +0200
@@ -75,7 +75,7 @@
             "intx CICompilerCount                          := 1                                   {product}"
         },
         {
-            "CICompilerCount=0 must be at least 1",
+            "CICompilerCount (0) must be at least 1",
             "Improperly specified VM option 'CICompilerCount=0'"
         },
         {
@@ -130,7 +130,7 @@
             "intx CICompilerCount                          := 2                                   {product}"
         },
         {
-            "CICompilerCount=1 must be at least 2",
+            "CICompilerCount (1) must be at least 2",
             "Improperly specified VM option 'CICompilerCount=1'"
         },
         {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/GCTypes.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,115 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+import java.util.Arrays;
+import java.util.Objects;
+
+/**
+ * Helper class with enum representation of GC types.
+ */
+public final class GCTypes {
+
+    private static <T extends GCType> T getCurrentGCType(Class<T> type) {
+        return ManagementFactory.getGarbageCollectorMXBeans().stream()
+                .map(bean -> getGCTypeByName(type, bean.getName()))
+                .filter(Objects::nonNull)
+                .findFirst()
+                .orElse(null);
+    }
+
+    private static <T extends GCType> T getGCTypeByName(Class<T> type, String name) {
+        return Arrays.stream(type.getEnumConstants())
+                .filter(e -> e.getGCName().equals(name))
+                .findFirst()
+                .orElse(null);
+    }
+
+    private static <T extends GCType> GarbageCollectorMXBean getGCBeanByType(Class<T> type) {
+        return ManagementFactory.getGarbageCollectorMXBeans().stream()
+                .filter(bean -> Arrays.stream(type.getEnumConstants())
+                        .filter(enumName -> enumName.getGCName().equals(bean.getName()))
+                        .findFirst()
+                        .isPresent()
+                )
+                .findFirst()
+                .orElse(null);
+    }
+
+    /**
+     * Helper interface used by GCTypes static methods
+     * to get gcTypeName field of *GCType classes.
+     */
+    private interface GCType {
+
+        String getGCName();
+    }
+
+    public static enum YoungGCType implements GCType {
+        DefNew("Copy"),
+        ParNew("ParNew"),
+        PSNew("PS Scavenge"),
+        G1("G1 Young Generation");
+
+        @Override
+        public String getGCName() {
+            return gcTypeName;
+        }
+        private final String gcTypeName;
+
+        private YoungGCType(String name) {
+            gcTypeName = name;
+        }
+
+        public static YoungGCType getYoungGCType() {
+            return GCTypes.getCurrentGCType(YoungGCType.class);
+        }
+
+        public static GarbageCollectorMXBean getYoungGCBean() {
+            return GCTypes.getGCBeanByType(YoungGCType.class);
+        }
+    }
+
+    public static enum OldGCType implements GCType {
+        Serial("MarkSweepCompact"),
+        CMS("ConcurrentMarkSweep"),
+        PSOld("PS MarkSweep"),
+        G1("G1 Old Generation");
+
+        private final String gcTypeName;
+
+        private OldGCType(String name) {
+            gcTypeName = name;
+        }
+
+        public static OldGCType getOldGCType() {
+            return GCTypes.getCurrentGCType(OldGCType.class);
+        }
+
+        @Override
+        public String getGCName() {
+            return gcTypeName;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestMaxMinHeapFreeRatioFlags.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,299 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestMaxMinHeapFreeRatioFlags
+ * @key gc
+ * @summary Verify that heap size changes according to max and min heap free ratios.
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ * @build TestMaxMinHeapFreeRatioFlags
+ * @run driver/timeout=240 TestMaxMinHeapFreeRatioFlags
+ */
+
+import java.util.LinkedList;
+import java.util.Arrays;
+import java.util.Collections;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.Utils;
+import jdk.test.lib.HeapRegionUsageTool;
+import sun.misc.Unsafe;
+
+public class TestMaxMinHeapFreeRatioFlags {
+
+    public static final long M = 1024 * 1024;
+    public static final long MAX_HEAP_SIZE = 200 * M;
+    public static final long HEAP_SIZE = 10 * M;
+    public static final long MAX_NEW_SIZE = 20 * M;
+    public static final long NEW_SIZE = 5 * M;
+
+    public static void main(String args[]) throws Exception {
+        LinkedList<String> options = new LinkedList<>(
+                Arrays.asList(Utils.getFilteredTestJavaOpts("-XX:[^ ]*HeapFreeRatio","-XX:\\+ExplicitGCInvokesConcurrent"))
+        );
+
+        negativeTest(20, false, 10, true, options);
+        negativeTest(100, true, 0, false, options);
+        negativeTest(101, false, 50, false, options);
+        negativeTest(49, true, 102, true, options);
+        negativeTest(-1, false, 50, false, options);
+        negativeTest(50, true, -1, true, options);
+
+        positiveTest(10, false, 90, false, options);
+        positiveTest(10, true, 80, false, options);
+        positiveTest(20, false, 70, true, options);
+        positiveTest(25, true, 65, true, options);
+        positiveTest(40, false, 50, false, options);
+    }
+
+    /**
+     * Verify that heap size will be changed to conform
+     * min and max heap free ratios.
+     *
+     * @param minRatio value of MinHeapFreeRatio option
+     * @param useXminf used Xminf option instead of MinHeapFreeRatio
+     * @param maxRatio value of MaxHeapFreeRatio option
+     * @param useXmaxf used Xmaxf option instead of MaxHeapFreeRatio
+     * @param options additional options for JVM
+     */
+    public static void positiveTest(int minRatio, boolean useXminf,
+            int maxRatio, boolean useXmaxf,
+            LinkedList<String> options) throws Exception {
+
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+        Collections.addAll(vmOptions,
+                (useXminf ? "-Xminf" + minRatio / 100.0 : "-XX:MinHeapFreeRatio=" + minRatio),
+                (useXmaxf ? "-Xmaxf" + maxRatio / 100.0 : "-XX:MaxHeapFreeRatio=" + maxRatio),
+                "-Xmx" + MAX_HEAP_SIZE,
+                "-Xms" + HEAP_SIZE,
+                "-XX:NewSize=" + NEW_SIZE,
+                "-XX:MaxNewSize=" + MAX_NEW_SIZE,
+                RatioVerifier.class.getName(),
+                Integer.toString(minRatio),
+                Integer.toString(maxRatio)
+        );
+
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+        analyzer.shouldHaveExitValue(0);
+    }
+
+    /**
+     * Verify that VM will fail to start with specified ratios.
+     *
+     * @param minRatio value of MinHeapFreeRatio option
+     * @param useXminf used Xminf option instead of MinHeapFreeRatio
+     * @param maxRatio value of MaxHeapFreeRatio option
+     * @param useXmaxf used Xmaxf option instead of MaxHeapFreeRatio
+     * @param options additional options for JVM
+     */
+    public static void negativeTest(int minRatio, boolean useXminf,
+            int maxRatio, boolean useXmaxf,
+            LinkedList<String> options) throws Exception {
+
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+        Collections.addAll(vmOptions,
+                (useXminf ? "-Xminf" + minRatio / 100.0 : "-XX:MinHeapFreeRatio=" + minRatio),
+                (useXmaxf ? "-Xmaxf" + maxRatio / 100.0 : "-XX:MaxHeapFreeRatio=" + maxRatio),
+                "-version"
+        );
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+        analyzer.shouldHaveExitValue(1);
+        analyzer.shouldContain("Error: Could not create the Java Virtual Machine.");
+    }
+
+    /**
+     * RatioVerifier will be executed in the tested VM.
+     * It will check that real heap usage after collection lies between MinHeapFreeRatio and MaxHeapFreeRatio.
+     */
+    public static class RatioVerifier {
+
+        private static final Unsafe unsafe = Utils.getUnsafe();
+
+        // Size of byte array that will be allocated
+        public static final int CHUNK_SIZE = 1024;
+        // Length of byte array, that will be added to "garbage" list.
+        public static final int ARRAY_LENGTH = CHUNK_SIZE - Unsafe.ARRAY_BYTE_BASE_OFFSET;
+        // Amount of tries to force heap shrinking/expansion using GC
+        public static final int GC_TRIES = 10;
+
+        // Value that will be added/substracted from expected min/max heap free ratio
+        // during memory allocation to make sure that specified limit will be exceeded.
+        public static final double OVERLOAD = 0.05;
+        // Acceptable heap free ratio limit exceedance: verification will fail if
+        // actual ratio is lower than expected min heap free ratio - VARIANCE or
+        // higher than expected max heap free ratio + VARIANCE.
+        public static final double VARIANCE = 0.025;
+
+        public static LinkedList<Object> garbage = new LinkedList<>();
+
+        public static void main(String args[]) throws Exception {
+            if (args.length != 2) {
+                throw new IllegalArgumentException("Expected 2 args: <minRatio> <maxRatio>");
+            }
+            if (GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.PSOld) {
+                System.out.println("Test is not applicable to parallel GC");
+                return;
+            }
+
+            double minRatio = Integer.valueOf(args[0]) / 100.0;
+            double maxRatio = Integer.valueOf(args[1]) / 100.0;
+
+            long maxHeapSize = getMax();
+
+            // commit 0.5 of total heap size to have enough space
+            // to both shink and expand
+            while (getCommitted() < maxHeapSize / 2) {
+                garbage.add(new byte[ARRAY_LENGTH]);
+            }
+
+            forceGC();
+            // Verify that current heap free ratio lies between specified limits
+            verifyRatio(minRatio, maxRatio);
+
+            // Estimate how much memory we have to allocate to force expansion
+            long memoryToFill = (long) (getCommitted() * (1 - minRatio + OVERLOAD))
+                    - getUsed();
+
+            long previouslyCommitted = getCommitted();
+
+            while (memoryToFill > 0) {
+                garbage.add(new byte[CHUNK_SIZE]);
+                memoryToFill -= CHUNK_SIZE;
+            }
+
+            forceGC();
+            // Verify that after memory allocation heap free ratio is still conforming specified limits
+            verifyRatio(minRatio, maxRatio);
+            // Verify that heap was actually expanded
+            if (previouslyCommitted >= getCommitted()) {
+                throw new RuntimeException("Heap was not expanded.");
+            }
+
+            // Estimate how much memory we have to free to force shrinking
+            long memoryToFree = getUsed()
+                    - (long) (getCommitted() * (1 - maxRatio - OVERLOAD));
+
+            previouslyCommitted = getCommitted();
+
+            while (memoryToFree > 0 && garbage.size() > 0) {
+                garbage.remove(garbage.size() - 1);
+                memoryToFree -= CHUNK_SIZE;
+            }
+
+            forceGC();
+            // Verify that heap free ratio is still conforming specified limits
+            verifyRatio(minRatio, maxRatio);
+            // Verify that heap was actually shrinked
+            if (previouslyCommitted <= getCommitted()) {
+                throw new RuntimeException("Heap was not shrinked.");
+            }
+
+        }
+
+        public static void forceGC() {
+            for (int i = 0; i < GC_TRIES; i++) {
+                System.gc();
+                try {
+                    Thread.sleep(10);
+                } catch (InterruptedException ie) {
+                }
+            }
+        }
+
+        /**
+         * Verify that heap free ratio is conforming specified limits.
+         * Actual heap free ratio may be very close to one of specified limits,
+         * but exceed for more then VARIANCE.
+         * Verification will also pass if actual ratio is not conforming limits,
+         * but it is not possible to shrink/expand heap.
+         */
+        public static void verifyRatio(double minRatio, double maxRatio) {
+            double ratio = getHeapFreeRatio();
+            System.out.println(minRatio + " " + ratio + " " + maxRatio);
+            if (minRatio - ratio > VARIANCE
+                    && getCommitted() < getMax()) {
+                throw new RuntimeException("Current heap free ratio is lower than "
+                        + "MinHeapFreeRatio (" + ratio + " vs " + minRatio + ").");
+            }
+            if (ratio - maxRatio > VARIANCE
+                    && getUsed() > getInit()) {
+                throw new RuntimeException("Current heap free ratio is higher than "
+                        + "MaxHeapFreeRatio (" + ratio + " vs " + maxRatio + ").");
+            }
+        }
+
+        /*
+         * Obtain information about heap size.
+         *
+         * For G1 information summed up for all type of regions,
+         * because tested options affect overall heap sizing.
+         *
+         * For all other GCs return information only for old gen.
+         */
+        public static long getMax() {
+            return HeapRegionUsageTool.getOldUsage().getMax();
+        }
+
+        public static long getInit() {
+            if (GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.G1) {
+                return HeapRegionUsageTool.getEdenUsage().getInit()
+                        + HeapRegionUsageTool.getSurvivorUsage().getInit()
+                        + HeapRegionUsageTool.getOldUsage().getInit();
+            } else {
+                return HeapRegionUsageTool.getOldUsage().getInit();
+            }
+        }
+
+        public static long getUsed() {
+            if (GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.G1) {
+                return HeapRegionUsageTool.getEdenUsage().getUsed()
+                        + HeapRegionUsageTool.getSurvivorUsage().getUsed()
+                        + HeapRegionUsageTool.getOldUsage().getUsed();
+            } else {
+                return HeapRegionUsageTool.getOldUsage().getUsed();
+            }
+        }
+
+        public static long getCommitted() {
+            if (GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.G1) {
+                return HeapRegionUsageTool.getEdenUsage().getCommitted()
+                        + HeapRegionUsageTool.getSurvivorUsage().getCommitted()
+                        + HeapRegionUsageTool.getOldUsage().getCommitted();
+            } else {
+                return HeapRegionUsageTool.getOldUsage().getCommitted();
+            }
+        }
+
+        public static long getFree() {
+            return getCommitted() - getUsed();
+        }
+
+        public static double getHeapFreeRatio() {
+            return getFree() / (double) getCommitted();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestMinAndInitialSurvivorRatioFlags.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,206 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestMinAndInitialSurvivorRatioFlags
+ * @key gc
+ * @summary Verify that MinSurvivorRatio and InitialSurvivorRatio flags work
+ * @library /testlibrary /../../test/lib
+ * @modules java.base/sun.misc
+ *          java.management
+ * @build TestMinAndInitialSurvivorRatioFlags
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestMinAndInitialSurvivorRatioFlags
+ */
+
+import jdk.test.lib.AllocationHelper;
+import java.lang.management.MemoryUsage;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import jdk.test.lib.HeapRegionUsageTool;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.Utils;
+import sun.hotspot.WhiteBox;
+
+/* Test verifies that VM can start with any GC when MinSurvivorRatio and
+ * InitialSurvivorRatio flags passed and for Parallel GC it verifies that
+ * after start up survivor ratio equal to InitialSurvivorRatio value and
+ * that actual survivor ratio will never be less than MinSurvivorRatio.
+ */
+public class TestMinAndInitialSurvivorRatioFlags {
+
+    public static final long M = 1024 * 1024;
+    public static final long HEAP_SIZE = 200 * M;
+    public static final long NEW_SIZE = 100 * M;
+
+    public static void main(String args[]) throws Exception {
+        LinkedList<String> options = new LinkedList<>(
+                Arrays.asList(Utils.getFilteredTestJavaOpts("-XX:[^ ]*SurvivorRatio=[^ ]+"))
+        );
+
+        testSurvivorRatio(5, -1, -1, options, true);
+        testSurvivorRatio(10, -1, -1, options, true);
+        testSurvivorRatio(-1, 5, 3, options, true);
+        testSurvivorRatio(-1, 15, 3, options, true);
+        testSurvivorRatio(-1, 15, 3, options, false);
+        testSurvivorRatio(-1, 10, 10, options, true);
+        testSurvivorRatio(-1, 3, 15, options, true);
+        testSurvivorRatio(-1, 3, 15, options, false);
+    }
+
+    /**
+     * Test that MinSurvivorRatio and InitialSurvivorRatio flags work.
+     *
+     * @param survivorRatio value for -XX:SurvivorRatio option, omitted if negative
+     * @param initRatio value for -XX:InitialSurvivorRatio option, omitted if negative
+     * @param minRatio value for -XX:MinSurvivorRatio option, omitted if negative
+     * @param options additional options for VM
+     * @param useAdaptiveSizePolicy turn on or off UseAdaptiveSizePolicy option
+     */
+    public static void testSurvivorRatio(int survivorRatio,
+            int initRatio,
+            int minRatio,
+            LinkedList<String> options,
+            boolean useAdaptiveSizePolicy) throws Exception {
+
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+        Collections.addAll(vmOptions,
+                "-Xbootclasspath/a:.",
+                "-XX:+UnlockDiagnosticVMOptions",
+                "-XX:+WhiteBoxAPI",
+                "-XX:MaxNewSize=" + NEW_SIZE, "-XX:NewSize=" + NEW_SIZE,
+                "-Xmx" + HEAP_SIZE, "-Xms" + HEAP_SIZE,
+                (survivorRatio >= 0 ? "-XX:SurvivorRatio=" + survivorRatio : ""),
+                (initRatio >= 0 ? "-XX:InitialSurvivorRatio=" + initRatio : ""),
+                (minRatio >= 0 ? "-XX:MinSurvivorRatio=" + minRatio : ""),
+                (useAdaptiveSizePolicy ? "-XX:+UseAdaptiveSizePolicy" : "-XX:-UseAdaptiveSizePolicy"),
+                SurvivorRatioVerifier.class.getName(),
+                Integer.toString(survivorRatio),
+                Integer.toString(initRatio),
+                Integer.toString(minRatio),
+                Boolean.toString(useAdaptiveSizePolicy)
+        );
+        vmOptions.removeIf((String p) -> p.isEmpty());
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+        analyzer.shouldHaveExitValue(0);
+    }
+
+    /**
+     * Class that verifies survivor ratio.
+     * Will be executed in tested VM. Checks initial size of eden and survivor paces with alignment.
+     */
+    public static class SurvivorRatioVerifier {
+
+        public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+        public static final int MAX_ITERATIONS = 10;
+        public static final int ARRAY_LENGTH = 10000;
+        public static final int CHUNK_SIZE = 10000;
+
+        public static byte garbage[][] = new byte[ARRAY_LENGTH][];
+
+        public static void main(String args[]) throws Exception {
+            if (args.length != 4) {
+                throw new IllegalArgumentException("Expected 4 args: <survivorRatio> <initRatio> <minRatio> <useAdaptiveSizePolicy>");
+            }
+            final int survivorRatio = Integer.valueOf(args[0]);
+            final int initRatio = Integer.valueOf(args[1]);
+            final int minRatio = Integer.valueOf(args[2]);
+            final boolean useAdaptiveSizePolicy = Boolean.valueOf(args[3]);
+
+            // we stop testing only here to ensure that JVM will accept
+            // both MinSurvivorRatio and InitialSurvivorRatio regardles to GC
+            if (GCTypes.YoungGCType.getYoungGCType() != GCTypes.YoungGCType.PSNew) {
+                System.out.println("Test is only applicable to Parallel GC");
+                return;
+            }
+
+            // verify initial survivor ratio
+            verifySurvivorRatio(survivorRatio, initRatio, minRatio, useAdaptiveSizePolicy, true);
+
+            // force GC
+            AllocationHelper allocator = new AllocationHelper(MAX_ITERATIONS, ARRAY_LENGTH, CHUNK_SIZE,
+                    () -> (verifySurvivorRatio(survivorRatio, initRatio, minRatio, useAdaptiveSizePolicy, false)));
+            allocator.allocateMemoryAndVerify();
+        }
+
+        /**
+         * Verify actual survivor ratio.
+         *
+         * @param survivorRatio value of SurvivorRatio option, omitted if negative
+         * @param initRatio value of InitialSurvivorRatio option, omitted if negative
+         * @param minRatio value of MinSurvivorRatio option, omitted if negative
+         * @param useAdaptiveSizePolicy value of UseAdaptiveSizePolicy option
+         * @param verifyInitialRatio true if we are going to verify initial ratio
+         */
+        public static Void verifySurvivorRatio(int survivorRatio,
+                int initRatio,
+                int minRatio,
+                boolean useAdaptiveSizePolicy,
+                boolean verifyInitialRatio) {
+
+            MemoryUsage edenUsage = HeapRegionUsageTool.getEdenUsage();
+            MemoryUsage survivorUsage = HeapRegionUsageTool.getSurvivorUsage();
+
+            long alignedNewSize = edenUsage.getMax() + 2 * survivorUsage.getMax();
+            long generationAlignment = wb.psHeapGenerationAlignment();
+
+            if (survivorRatio >= 0) {
+                // -XX:SurvivorRatio was passed to JVM, actual ratio should be SurvivorRatio + 2
+                long expectedSize = HeapRegionUsageTool.alignDown(alignedNewSize / (survivorRatio + 2),
+                        generationAlignment);
+
+                if (survivorUsage.getCommitted() != expectedSize) {
+                    throw new RuntimeException("Expected survivor size is: " + expectedSize
+                            + ", but observed size is: " + survivorUsage.getCommitted());
+                }
+            } else if (verifyInitialRatio || !useAdaptiveSizePolicy) {
+                // In case of initial ratio verification or disabled adaptive size policy
+                // ratio should be equal to InitialSurvivorRatio value
+                long expectedSize = HeapRegionUsageTool.alignDown(alignedNewSize / initRatio,
+                        generationAlignment);
+                if (survivorUsage.getCommitted() != expectedSize) {
+                    throw new RuntimeException("Expected survivor size is: " + expectedSize
+                            + ", but observed size is: " + survivorUsage.getCommitted());
+                }
+            } else {
+                // In any other case actual survivor ratio should not be lower than MinSurvivorRatio
+                // or is should be equal to InitialSurvivorRatio
+                long expectedMinSize = HeapRegionUsageTool.alignDown(alignedNewSize / minRatio,
+                        generationAlignment);
+                long expectedInitSize = HeapRegionUsageTool.alignDown(alignedNewSize / initRatio,
+                        generationAlignment);
+                if (survivorUsage.getCommitted() != expectedInitSize
+                        && survivorUsage.getCommitted() < expectedMinSize) {
+                    throw new RuntimeException("Expected survivor size should be " + expectedMinSize
+                            + " or should be greater then " + expectedMinSize
+                            + ", but observer survivor size is " + survivorUsage.getCommitted());
+                }
+            }
+            return null;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestNewRatioFlag.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,182 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestNewRatioFlag
+ * @key gc
+ * @bug 8025166
+ * @summary Verify that heap devided among generations according to NewRatio
+ * @library /testlibrary /../../test/lib
+ * @modules java.base/sun.misc
+ *          java.management
+ * @build TestNewRatioFlag
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestNewRatioFlag
+ */
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import jdk.test.lib.HeapRegionUsageTool;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.Utils;
+import sun.hotspot.WhiteBox;
+
+public class TestNewRatioFlag {
+
+    public static final long M = 1024 * 1024;
+    public static final long HEAP_SIZE = 100 * M;
+
+    public static void main(String args[]) throws Exception {
+        LinkedList<String> options = new LinkedList<>(
+                Arrays.asList(Utils.getFilteredTestJavaOpts("(-XX:[^ ]*NewSize=[^ ]+)|(-Xm[ns][^ ]+)"))
+        );
+
+        testNewRatio(4, options);
+        testNewRatio(6, options);
+        testNewRatio(10, options);
+        testNewRatio(15, options);
+        testNewRatio(20, options);
+    }
+
+    /**
+     * Verify that actual size of young gen conforms specified NewRatio
+     *
+     * @param ratio value of NewRatio option
+     * @param options additional options for VM
+     */
+    public static void testNewRatio(int ratio, LinkedList<String> options) throws Exception {
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+        Collections.addAll(vmOptions,
+                "-Xbootclasspath/a:.",
+                "-XX:+UnlockDiagnosticVMOptions",
+                "-XX:+WhiteBoxAPI",
+                "-XX:GCLockerEdenExpansionPercent=0",
+                "-Xmx" + HEAP_SIZE,
+                "-Xms" + HEAP_SIZE,
+                "-XX:NewRatio=" + ratio,
+                "-XX:-UseLargePages",
+                NewRatioVerifier.class.getName(),
+                Integer.toString(ratio)
+        );
+
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+        analyzer.shouldHaveExitValue(0);
+        System.out.println(analyzer.getOutput());
+    }
+
+    public static class NewRatioVerifier {
+
+        static WhiteBox wb = WhiteBox.getWhiteBox();
+
+        public static void main(String args[]) {
+            if (args.length != 1) {
+                throw new IllegalArgumentException("Expected 1 arg: <expectedRatio>");
+            }
+            int expectedRatio = Integer.valueOf(args[0]);
+            switch (GCTypes.YoungGCType.getYoungGCType()) {
+                case DefNew:
+                case ParNew:
+                    verifyDefNewNewRatio(expectedRatio);
+                    break;
+                case PSNew:
+                    verifyPSNewRatio(expectedRatio);
+                    break;
+                case G1:
+                    verifyG1NewRatio(expectedRatio);
+                    break;
+                default:
+                    throw new RuntimeException("Unexpected young GC type");
+            }
+        }
+
+        /**
+         * Verify NewSize for DefNew and ParNew collectors.
+         *
+         * Compare expected NewSize calculated according to sizing policies used by DefNew
+         * with NewSize value reported by MemoryPoolMXBeans.
+         */
+        public static void verifyDefNewNewRatio(int expectedRatio) {
+            long initEden = HeapRegionUsageTool.getEdenUsage().getInit();
+            long initSurv = HeapRegionUsageTool.getSurvivorUsage().getInit();
+            long initOld = HeapRegionUsageTool.getOldUsage().getInit();
+
+            long newSize = initEden + 2 * initSurv;
+
+            long expectedNewSize = HeapRegionUsageTool.alignDown(initOld / expectedRatio,
+                    wb.getHeapSpaceAlignment());
+
+            if (expectedNewSize != newSize) {
+                throw new RuntimeException("Expected young gen size is: " + expectedNewSize
+                        + ", but observed new size is: " + newSize);
+            }
+        }
+
+        /**
+         * Verify NewSize for PS collector.
+         * Expected NewSize calculated according to alignment policies used by PS
+         * and then compared with actual NewSize obtained from MemoryPoolMXBeans.
+         */
+        public static void verifyPSNewRatio(int expectedRatio) {
+            long initEden = HeapRegionUsageTool.getEdenUsage().getInit();
+            long initSurv = HeapRegionUsageTool.getSurvivorUsage().getInit();
+            long initOld = HeapRegionUsageTool.getOldUsage().getInit();
+
+            long newSize = initEden + 2 * initSurv;
+
+            long alignedDownNewSize = HeapRegionUsageTool.alignDown(initOld / expectedRatio,
+                    wb.getHeapSpaceAlignment());
+            long expectedNewSize = HeapRegionUsageTool.alignUp(alignedDownNewSize,
+                    wb.psVirtualSpaceAlignment());
+
+            if (expectedNewSize != newSize) {
+                throw new RuntimeException("Expected young gen size is: " + expectedNewSize
+                        + ", but observed new size is: " + newSize);
+            }
+        }
+
+        /**
+         * Verify NewSize for G1 GC.
+         * Amount of young regions calculated according to sizing policies used by G1
+         * and then compared with actual number of young regions derived from
+         * values reported by MemoryPoolMXBeans and region size.
+         */
+        public static void verifyG1NewRatio(int expectedRatio) {
+            long initEden = HeapRegionUsageTool.getEdenUsage().getInit();
+            long initSurv = HeapRegionUsageTool.getSurvivorUsage().getInit();
+            long maxOld = HeapRegionUsageTool.getOldUsage().getMax();
+
+            int regionSize = wb.g1RegionSize();
+            int youngListLength = (int) ((initEden + initSurv) / regionSize);
+            int maxRegions = (int) (maxOld / regionSize);
+            int expectedYoungListLength = (int) (maxRegions / (double) (expectedRatio + 1));
+
+            if (youngListLength != expectedYoungListLength) {
+                throw new RuntimeException("Expected G1 young list length is: " + expectedYoungListLength
+                        + ", but observed young list length is: " + youngListLength);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestNewSizeFlags.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestNewSizeFlags
+ * @key gc
+ * @bug 8025166
+ * @summary Verify that young gen size conforms values specified by NewSize, MaxNewSize and Xmn options
+ * @library /testlibrary /../../test/lib
+ * @modules java.base/sun.misc
+ *          java.management
+ * @build TestNewSizeFlags
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver/timeout=240  TestNewSizeFlags
+ */
+
+import jdk.test.lib.AllocationHelper;
+import java.io.IOException;
+import java.lang.management.MemoryUsage;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import jdk.test.lib.HeapRegionUsageTool;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.Utils;
+import sun.hotspot.WhiteBox;
+
+public class TestNewSizeFlags {
+
+    public static final long M = 1024 * 1024;
+
+    public static void main(String args[]) throws Exception {
+        LinkedList<String> options = new LinkedList<>(
+                Arrays.asList(Utils.getFilteredTestJavaOpts("(-Xm[nsx][^ ]+)|"
+                                + "(-XX:(Max)?((New)|"
+                                + "(Heap))((Size)|"
+                                + "(Ratio))=[^ ]+)"))
+        );
+
+        // Test NewSize and MaxNewSize
+        testNewSizeFlags(20 * M, 10 * M, 30 * M, 40 * M, options, false);
+        testNewSizeFlags(10 * M, 20 * M, 30 * M, 40 * M, options, false);
+        testNewSizeFlags(-1, 20 * M, 30 * M, 40 * M, options, false);
+        testNewSizeFlags(10 * M, -1, 30 * M, 40 * M, options, false);
+        testNewSizeFlags(20 * M, 20 * M, 30 * M, 40 * M, options, false);
+        testNewSizeFlags(20 * M, 30 * M, 40 * M, 50 * M, options, false);
+        testNewSizeFlags(30 * M, 100 * M, 150 * M, 200 * M, options, false);
+        testNewSizeFlags(0, -1, 30 * M, 40 * M, options, false);
+
+        // Test -Xmn
+        testXmnFlags(0, 30 * M, 40 * M, options, true);
+        testXmnFlags(20 * M, 30 * M, 40 * M, options, false);
+        testXmnFlags(50 * M, 70 * M, 100 * M, options, false);
+    }
+
+    /**
+     * Verify that NewSize and MaxNewSize flags affect young gen size.
+     *
+     * @param newSize value of NewSize option, omitted if negative
+     * @param maxNewSize value of MaxNewSize option, omitted if negative
+     * @param heapSize value of HeapSize option
+     * @param maxHeapSize value of MaxHeapSize option
+     * @param options additional options for JVM
+     * @param failureExpected true if JVM should fail with passed heap size options
+     */
+    public static void testNewSizeFlags(long newSize, long maxNewSize,
+            long heapSize, long maxHeapSize,
+            LinkedList<String> options,
+            boolean failureExpected) throws Exception {
+        testVMOptions(newSize, maxNewSize,
+                heapSize, maxHeapSize,
+                newSize, (maxNewSize >= 0 ? Math.max(maxNewSize, newSize) : maxNewSize),
+                options, failureExpected);
+    }
+
+    /**
+     * Verify that -Xmn flag affect young gen size.
+     *
+     * @param mnValue value of -Xmn option
+     * @param heapSize value of HeapSize option
+     * @param maxHeapSize value of MaxHeapSize option
+     * @param options additional options for JVM
+     * @param failureExpected true if JVM should fail with passed heap size options
+     */
+    public static void testXmnFlags(long mnValue,
+            long heapSize, long maxHeapSize,
+            LinkedList<String> options,
+            boolean failureExpected) throws Exception {
+        LinkedList<String> newOptions = new LinkedList<>(options);
+        newOptions.add("-Xmn" + mnValue);
+        testVMOptions(-1, -1,
+                heapSize, maxHeapSize,
+                mnValue, mnValue,
+                newOptions, failureExpected);
+    }
+
+    /**
+     * Verify that NewSize and MaxNewSize flags affect young gen size.
+     *
+     * @param newSize value of NewSize option, omitted if negative
+     * @param maxNewSize value of MaxNewSize option, omitted if negative
+     * @param heapSize value of HeapSize option
+     * @param maxHeapSize value of MaxHeapSize option
+     * @param expectedNewSize expected initial young gen size
+     * @param expectedMaxNewSize expected max young gen size
+     * @param options additional options for JVM
+     * @param failureExpected true if JVM should fail with passed heap size options
+     */
+    public static void testVMOptions(long newSize, long maxNewSize,
+            long heapSize, long maxHeapSize,
+            long expectedNewSize, long expectedMaxNewSize,
+            LinkedList<String> options, boolean failureExpected) throws Exception {
+        OutputAnalyzer analyzer = startVM(options, newSize, maxNewSize, heapSize, maxHeapSize, expectedNewSize, expectedMaxNewSize);
+
+        if (failureExpected) {
+            analyzer.shouldHaveExitValue(1);
+            analyzer.shouldMatch("(Error occurred during initialization of VM)|"
+                    + "(Error: Could not create the Java Virtual Machine.)");
+        } else {
+            analyzer.shouldHaveExitValue(0);
+        }
+    }
+
+    private static OutputAnalyzer startVM(LinkedList<String> options,
+            long newSize, long maxNewSize,
+            long heapSize, long maxHeapSize,
+            long expectedNewSize, long expectedMaxNewSize) throws Exception, IOException {
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+        Collections.addAll(vmOptions,
+                "-Xbootclasspath/a:.",
+                "-XX:+UnlockDiagnosticVMOptions",
+                "-XX:+WhiteBoxAPI",
+                (newSize >= 0 ? "-XX:NewSize=" + newSize : ""),
+                (maxNewSize >= 0 ? "-XX:MaxNewSize=" + maxNewSize : ""),
+                "-Xmx" + maxHeapSize,
+                "-Xms" + heapSize,
+                "-XX:GCLockerEdenExpansionPercent=0",
+                "-XX:-UseLargePages",
+                NewSizeVerifier.class.getName(),
+                Long.toString(expectedNewSize),
+                Long.toString(expectedMaxNewSize)
+        );
+        vmOptions.removeIf(String::isEmpty);
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+        return analyzer;
+    }
+
+    /**
+     * NewSizeVerifier checks that initial young gen size is equal to expected
+     * regardful to alignment and that young gen size will not be greater than
+     * expected max size.
+     * In order to verify that young gen size will not be greater then expected
+     * max size, NewSizeVerifier do some object allocation to force garbage
+     * collection and heap expansion.
+     */
+    public static class NewSizeVerifier {
+
+        static WhiteBox wb = WhiteBox.getWhiteBox();
+
+        public static final int ARRAY_LENGTH = 100;
+        public static final int CHUNK_SIZE = 1024;
+        public static final int MAX_ITERATIONS = 10;
+        public static byte garbage[][] = new byte[ARRAY_LENGTH][];
+
+        public static void main(String args[]) throws Exception {
+            if (args.length != 2) {
+                throw new IllegalArgumentException("Expected 2 args: <expectedNewSize> <expectedMaxNewSize>");
+            }
+            final long newSize = Long.valueOf(args[0]);
+            final long maxNewSize = Long.valueOf(args[1]);
+
+            // verify initial size
+            verifyNewSize(newSize, maxNewSize);
+
+            // force GC and verify that size is still correct
+            AllocationHelper allocator = new AllocationHelper(MAX_ITERATIONS, ARRAY_LENGTH, CHUNK_SIZE, () -> (verifyNewSize(newSize, maxNewSize)));
+            allocator.allocateMemoryAndVerifyNoOOME();
+        }
+
+        /**
+         * Verify that actual young gen size conforms NewSize and MaxNewSize values.
+         */
+        public static Void verifyNewSize(long newSize, long maxNewSize) {
+            long alignedNewSize = alignNewSize(newSize);
+            long alignedMaxNewSize = alignNewSize(maxNewSize);
+
+            MemoryUsage youngGenUsage = getYoungGenUsage();
+
+            if (newSize != -1) {
+                if (youngGenUsage.getInit() < alignedNewSize) {
+                    throw new RuntimeException("initial new size < NewSize value: "
+                            + youngGenUsage.getInit() + " < " + alignedNewSize);
+                }
+
+                if (youngGenUsage.getCommitted() < alignedNewSize) {
+                    throw new RuntimeException("actual new size < NewSize value: "
+                            + youngGenUsage.getCommitted() + " < " + alignedNewSize);
+                }
+
+                // for G1 max new size == committed new size
+                if (GCTypes.YoungGCType.getYoungGCType() != GCTypes.YoungGCType.G1
+                        && youngGenUsage.getMax() < alignedNewSize) {
+                    throw new RuntimeException("max new size < NewSize value: "
+                            + youngGenUsage.getMax() + " < " + alignedNewSize);
+                }
+            }
+
+            if (maxNewSize != -1) {
+                if (youngGenUsage.getInit() > alignedMaxNewSize) {
+                    throw new RuntimeException("initial new size > MaxNewSize value: "
+                            + youngGenUsage.getInit() + " > " + alignedMaxNewSize);
+                }
+
+                if (youngGenUsage.getCommitted() > alignedMaxNewSize) {
+                    throw new RuntimeException("actual new size > MaxNewSize value: "
+                            + youngGenUsage.getCommitted() + " > " + alignedMaxNewSize);
+                }
+
+                if (GCTypes.YoungGCType.getYoungGCType() != GCTypes.YoungGCType.G1
+                        && youngGenUsage.getMax() != alignedMaxNewSize) {
+                    throw new RuntimeException("max new size != MaxNewSize value: "
+                            + youngGenUsage.getMax() + " != " + alignedMaxNewSize);
+                }
+            }
+            return null;
+        }
+
+        /**
+         *  Get young gen memory usage.
+         *
+         *  For G1 it is EdenUsage + SurvivorUsage,
+         *  for other GCs it is EdenUsage + 2 * SurvivorUsage.
+         *  For G1 max value is just LONG_MAX.
+         *  For all GCs used value is 0.
+         */
+        private static MemoryUsage getYoungGenUsage() {
+            if (GCTypes.YoungGCType.getYoungGCType() == GCTypes.YoungGCType.G1) {
+                return new MemoryUsage(HeapRegionUsageTool.getEdenUsage().getInit()
+                        + HeapRegionUsageTool.getSurvivorUsage().getInit(),
+                        0,
+                        HeapRegionUsageTool.getEdenUsage().getCommitted()
+                        + HeapRegionUsageTool.getSurvivorUsage().getCommitted(),
+                        Long.MAX_VALUE);
+            } else {
+                return new MemoryUsage(HeapRegionUsageTool.getEdenUsage().getInit()
+                        + HeapRegionUsageTool.getSurvivorUsage().getInit() * 2,
+                        0,
+                        HeapRegionUsageTool.getEdenUsage().getCommitted()
+                        + HeapRegionUsageTool.getSurvivorUsage().getCommitted() * 2,
+                        HeapRegionUsageTool.getEdenUsage().getMax()
+                        + HeapRegionUsageTool.getSurvivorUsage().getMax() * 2);
+            }
+        }
+
+        /**
+         * Align value regardful to used young GC.
+         */
+        public static long alignNewSize(long value) {
+            switch (GCTypes.YoungGCType.getYoungGCType()) {
+                case DefNew:
+                case ParNew:
+                    return HeapRegionUsageTool.alignDown(value, wb.getHeapSpaceAlignment());
+                case PSNew:
+                    return HeapRegionUsageTool.alignUp(HeapRegionUsageTool.alignDown(value,
+                            wb.getHeapSpaceAlignment()),
+                            wb.psVirtualSpaceAlignment());
+                case G1:
+                    return HeapRegionUsageTool.alignUp(value, wb.g1RegionSize());
+                default:
+                    throw new RuntimeException("Unexpected young GC type");
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestSurvivorRatioFlag.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,182 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestSurvivorRatioFlag
+ * @key gc
+ * @summary Verify that actual survivor ratio is equal to specified SurvivorRatio value
+ * @library /testlibrary /../../test/lib
+ * @modules java.base/sun.misc
+ *          java.management
+ * @build TestSurvivorRatioFlag
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestSurvivorRatioFlag
+ */
+
+import jdk.test.lib.AllocationHelper;
+import java.lang.management.MemoryUsage;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import jdk.test.lib.HeapRegionUsageTool;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.Utils;
+import sun.hotspot.WhiteBox;
+
+public class TestSurvivorRatioFlag {
+
+    public static final long M = 1024 * 1024;
+    public static final long HEAP_SIZE = 200 * M;
+    public static final long NEW_SIZE = 100 * M;
+
+    public static void main(String args[]) throws Exception {
+        LinkedList<String> options = new LinkedList<>(
+                Arrays.asList(Utils.getFilteredTestJavaOpts("-XX:[^ ]*SurvivorRatio=[^ ]+"))
+        );
+
+        testSurvivorRatio(3, options);
+        testSurvivorRatio(6, options);
+        testSurvivorRatio(10, options);
+        testSurvivorRatio(15, options);
+        testSurvivorRatio(20, options);
+    }
+
+    /**
+     * Verify that actual survivor ratio equal to specified.
+     *
+     * @param ratio survivor ratio that be verified
+     * @param options additional options to JVM
+     */
+    public static void testSurvivorRatio(int ratio, LinkedList<String> options) throws Exception {
+
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+
+        Collections.addAll(vmOptions,
+                "-Xbootclasspath/a:.",
+                "-XX:+UnlockDiagnosticVMOptions",
+                "-XX:+WhiteBoxAPI",
+                "-XX:GCLockerEdenExpansionPercent=0",
+                "-XX:MaxNewSize=" + NEW_SIZE,
+                "-XX:NewSize=" + NEW_SIZE,
+                "-Xmx" + HEAP_SIZE,
+                "-Xms" + HEAP_SIZE,
+                "-XX:SurvivorRatio=" + ratio,
+                SurvivorRatioVerifier.class.getName(),
+                Integer.toString(ratio)
+        );
+
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+        analyzer.shouldHaveExitValue(0);
+    }
+
+    /**
+     * Class that verifies survivor ratio.
+     */
+    public static class SurvivorRatioVerifier {
+
+        static WhiteBox wb = WhiteBox.getWhiteBox();
+
+        public static final int MAX_ITERATIONS = 10;
+        public static final int ARRAY_LENGTH = 10000;
+        public static final int CHUNK_SIZE = 10000;
+
+        public static void main(String args[]) throws Exception {
+            if (args.length != 1) {
+                throw new IllegalArgumentException("Expected 1 arg: <ratio>");
+            }
+            final int ratio = Integer.valueOf(args[0]);
+
+            AllocationHelper allocator = new AllocationHelper(MAX_ITERATIONS, ARRAY_LENGTH, CHUNK_SIZE, () -> (verifySurvivorRatio(ratio)));
+            allocator.allocateMemoryAndVerify();
+        }
+
+        /**
+         * Verify that actual survivor ratio is equal to expected.
+         * Depending on selected young GC we verify that:
+         * - for DefNew and ParNew: eden_size / survivor_size is close to expectedRatio;
+         * - for PSNew:             survivor_size equal to young_gen_size / expectedRatio;
+         * - for G1:                survivor_regions <= young_list_length / expectedRatio.
+         */
+        public static Void verifySurvivorRatio(int expectedRatio) {
+            GCTypes.YoungGCType type = GCTypes.YoungGCType.getYoungGCType();
+            switch (type) {
+                case DefNew:
+                case ParNew:
+                    verifyDefNewSurvivorRatio(expectedRatio);
+                    break;
+                case PSNew:
+                    verifyPSSurvivorRatio(expectedRatio);
+                    break;
+                case G1:
+                    verifyG1SurvivorRatio(expectedRatio);
+                    break;
+                default:
+                    throw new RuntimeException("Unexpected young GC type");
+            }
+            return null;
+        }
+
+        private static void verifyDefNewSurvivorRatio(int expectedRatio) {
+            MemoryUsage edenUsage = HeapRegionUsageTool.getEdenUsage();
+            MemoryUsage survivorUsage = HeapRegionUsageTool.getSurvivorUsage();
+
+            int actualRatio = (int) (edenUsage.getCommitted() / survivorUsage.getCommitted());
+            if (Math.abs(actualRatio - expectedRatio) > 1) {
+                throw new RuntimeException("Expected survivor ratio is: " + expectedRatio
+                        + ", but observed ratio is: " + actualRatio);
+            }
+        }
+
+        private static void verifyPSSurvivorRatio(int expectedRatio) {
+            MemoryUsage edenUsage = HeapRegionUsageTool.getEdenUsage();
+            MemoryUsage survivorUsage = HeapRegionUsageTool.getSurvivorUsage();
+
+            long youngGenSize = edenUsage.getMax() + 2 * survivorUsage.getMax();
+            // for Paralle GC Min/InitialSurvivorRatio = SurvivorRatio + 2
+            long expectedSize = HeapRegionUsageTool.alignDown(youngGenSize / (expectedRatio + 2),
+                    wb.psHeapGenerationAlignment());
+
+            if (expectedSize != survivorUsage.getCommitted()) {
+                throw new RuntimeException("Expected survivor size is: " + expectedSize
+                        + ", but observed size is: " + survivorUsage.getCommitted());
+            }
+        }
+
+        private static void verifyG1SurvivorRatio(int expectedRatio) {
+            MemoryUsage survivorUsage = HeapRegionUsageTool.getSurvivorUsage();
+
+            int regionSize = wb.g1RegionSize();
+            int youngListLength = (int) Math.max(NEW_SIZE / regionSize, 1);
+            int expectedSurvivorRegions = (int) Math.ceil(youngListLength / (double) expectedRatio);
+            int observedSurvivorRegions = (int) (survivorUsage.getCommitted() / regionSize);
+
+            if (expectedSurvivorRegions < observedSurvivorRegions) {
+                throw new RuntimeException("Expected amount of G1 survivor regions is "
+                        + expectedSurvivorRegions + ", but observed "
+                        + observedSurvivorRegions);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestTargetSurvivorRatioFlag.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,319 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestTargetSurvivorRatioFlag
+ * @key gc
+ * @summary Verify that option TargetSurvivorRatio affects survivor space occupancy after minor GC.
+ * @library /testlibrary /../../test/lib
+ * @modules java.base/sun.misc
+ *          java.management
+ * @build TestTargetSurvivorRatioFlag
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestTargetSurvivorRatioFlag
+ */
+
+import jdk.test.lib.AllocationHelper;
+import java.lang.management.GarbageCollectorMXBean;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import jdk.test.lib.HeapRegionUsageTool;
+import sun.misc.Unsafe;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.Utils;
+import sun.hotspot.WhiteBox;
+
+/* In order to test that TargetSurvivorRatio affects survivor space occupancy
+ * we setup fixed MaxTenuringThreshold and then verifying that if size of allocated
+ * objects is lower than (survivor_size * TargetSurvivorRatio / 100) then objects
+ * will stay in survivor space until MaxTenuringThreshold minor GC cycles.
+ * If more than (survivor_size * TargetSurvivorRatio / 100) objects were allocated,
+ * then we verify that after MaxTenuringThreshold minor GC cycles survivor space
+ * is almost empty.
+ */
+public class TestTargetSurvivorRatioFlag {
+
+    public static final long M = 1024 * 1024;
+
+    // VM option values
+    public static final long MAX_NEW_SIZE = 40 * M;
+    public static final int SURVIVOR_RATIO = 8;
+    public static final int MAX_TENURING_THRESHOLD = 15;
+
+    // Value used to estimate amount of memory that should be allocated
+    // and placed in survivor space.
+    public static final double DELTA = 0.25;
+
+    // Max variance of observed ratio
+    public static double VARIANCE = 1;
+
+    // Messages used by debuggee
+    public static final String UNSUPPORTED_GC = "Unsupported GC";
+    public static final String START_TEST = "Start test";
+    public static final String END_TEST = "End test";
+
+    // Patterns used during log parsing
+    public static final String TENURING_DISTRIBUTION = "Desired survivor size";
+    public static final String AGE_TABLE_ENTRY = "-[\\s]+age[\\s]+([0-9]+):[\\s]+([0-9]+)[\\s]+bytes,[\\s]+([0-9]+)[\\s]+total";
+    public static final String MAX_SURVIVOR_SIZE = "Max survivor size: ([0-9]+)";
+
+    public static void main(String args[]) throws Exception {
+
+        LinkedList<String> options = new LinkedList<>(Arrays.asList(Utils.getTestJavaOpts()));
+
+        // Need to consider the effect of TargetPLABWastePct=1 for G1 GC
+        if (options.contains("-XX:+UseG1GC")) {
+            VARIANCE = 2;
+        } else {
+            VARIANCE = 1;
+        }
+
+        negativeTest(-1, options);
+        negativeTest(101, options);
+
+        positiveTest(20, options);
+        positiveTest(30, options);
+        positiveTest(55, options);
+        positiveTest(70, options);
+    }
+
+    /**
+     * Verify that VM will fail to start with specified TargetSurvivorRatio
+     *
+     * @param ratio value of TargetSurvivorRatio
+     * @param options additional VM options
+     */
+    public static void negativeTest(int ratio, LinkedList<String> options) throws Exception {
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+        vmOptions.add("-XX:TargetSurvivorRatio=" + ratio);
+        vmOptions.add("-version");
+
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+
+        analyzer.shouldHaveExitValue(1);
+        analyzer.shouldContain("Error: Could not create the Java Virtual Machine.");
+    }
+
+    /**
+     * Verify that actual survivor space usage ratio conforms specified TargetSurvivorRatio
+     *
+     * @param ratio value of TargetSurvivorRatio
+     * @param options additional VM options
+     */
+    public static void positiveTest(int ratio, LinkedList<String> options) throws Exception {
+        LinkedList<String> vmOptions = new LinkedList<>(options);
+        Collections.addAll(vmOptions,
+                "-Xbootclasspath/a:.",
+                "-XX:+UnlockDiagnosticVMOptions",
+                "-XX:+WhiteBoxAPI",
+                "-XX:+UseAdaptiveSizePolicy",
+                "-XX:+PrintTenuringDistribution",
+                "-XX:MaxTenuringThreshold=" + MAX_TENURING_THRESHOLD,
+                "-XX:NewSize=" + MAX_NEW_SIZE,
+                "-XX:MaxNewSize=" + MAX_NEW_SIZE,
+                "-XX:InitialHeapSize=" + 2 * MAX_NEW_SIZE,
+                "-XX:MaxHeapSize=" + 2 * MAX_NEW_SIZE,
+                "-XX:SurvivorRatio=" + SURVIVOR_RATIO,
+                "-XX:TargetSurvivorRatio=" + ratio,
+                // For reducing variance of survivor size.
+                "-XX:TargetPLABWastePct=" + 1,
+                TargetSurvivorRatioVerifier.class.getName(),
+                Integer.toString(ratio)
+        );
+
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(procBuilder.start());
+
+        analyzer.shouldHaveExitValue(0);
+
+        String output = analyzer.getOutput();
+
+        // Test avoids verification for parallel GC
+        if (!output.contains(UNSUPPORTED_GC)) {
+            // Two tests should be done - when actual ratio is lower than TargetSurvivorRatio
+            // and when it is higher. We chech that output contains results for exactly two tests.
+            List<Double> ratios = parseTestOutput(output);
+
+            if (ratios.size() != 2) {
+                System.out.println(output);
+                throw new RuntimeException("Expected number of ratios extraced for output is 2,"
+                        + " but " + ratios.size() + " ratios were extracted");
+            }
+
+            // At the end of the first test survivor space usage ratio should lies between
+            // TargetSurvivorRatio and TargetSurvivorRatio - 2*DELTA
+            if (ratio < ratios.get(0) || ratio - ratios.get(0) > VARIANCE) {
+                System.out.println(output);
+                throw new RuntimeException("Survivor space usage ratio expected to be close to "
+                        + ratio + ", but observed ratio is: " + ratios.get(0));
+            }
+
+            // After second test survivor space should be almost empty.
+            if (ratios.get(1) > VARIANCE) {
+                System.out.println(output);
+                throw new RuntimeException("Survivor space expected to be empty due to "
+                        + "TargetSurvivorRatio overlimit, however observed "
+                        + "survivor space usage ratio is: " + ratios.get(1));
+            }
+        } else {
+            System.out.println("Selected GC does not support TargetSurvivorRatio option.");
+        }
+    }
+
+    /**
+     * Parse output produced by TargetSurvivorRatioVerifier.
+     *
+     * @param output output obtained from TargetSurvivorRatioVerifier
+     * @return list of parsed test results, where each result is an actual
+     *         survivor ratio after MaxTenuringThreshold minor GC cycles.
+     */
+    public static List<Double> parseTestOutput(String output) {
+        List<Double> ratios = new LinkedList<Double>();
+        String lines[] = output.split("[\n\r]");
+        boolean testStarted = false;
+        long survivorSize = 0;
+        long survivorOccupancy = 0;
+        int gcCount = 0;
+        Pattern ageTableEntry = Pattern.compile(AGE_TABLE_ENTRY);
+        Pattern maxSurvivorSize = Pattern.compile(MAX_SURVIVOR_SIZE);
+        for (String line : lines) {
+            if (Pattern.matches(MAX_SURVIVOR_SIZE, line)) {
+                // We found estimated survivor space size
+                Matcher m = maxSurvivorSize.matcher(line);
+                m.find();
+                survivorSize = Long.valueOf(m.group(1));
+            } else if (line.contains(START_TEST) && !testStarted) {
+                // Start collecting test results
+                testStarted = true;
+                gcCount = 0;
+            } else if (testStarted) {
+                if (line.contains(TENURING_DISTRIBUTION)) {
+                    // We found start of output emitted by -XX:+PrintTenuringDistribution
+                    // If it is associated with "MaxTenuringThreshold" GC cycle, then it's
+                    // time to report observed survivor usage ratio
+                    gcCount++;
+                    double survivorRatio = survivorOccupancy / (double) survivorSize;
+                    if (gcCount == MAX_TENURING_THRESHOLD || gcCount == MAX_TENURING_THRESHOLD * 2) {
+                        ratios.add(survivorRatio * 100.0);
+                        testStarted = false;
+                    }
+                    survivorOccupancy = 0;
+                } else if (Pattern.matches(AGE_TABLE_ENTRY, line)) {
+                    // Obtain survivor space usage from "total" age table log entry
+                    Matcher m = ageTableEntry.matcher(line);
+                    m.find();
+                    survivorOccupancy = Long.valueOf(m.group(3));
+                } else if (line.contains(END_TEST)) {
+                    // It is expected to find at least MaxTenuringThreshold GC events
+                    // until test end
+                    if (gcCount < MAX_TENURING_THRESHOLD) {
+                        throw new RuntimeException("Observed " + gcCount + " GC events, "
+                                + "while it is expected to see at least "
+                                + MAX_TENURING_THRESHOLD);
+                    }
+                    testStarted = false;
+                }
+            }
+        }
+        return ratios;
+    }
+
+    public static class TargetSurvivorRatioVerifier {
+
+        static final WhiteBox wb = WhiteBox.getWhiteBox();
+        static final Unsafe unsafe = Utils.getUnsafe();
+
+        // Desired size of memory allocated at once
+        public static final int CHUNK_SIZE = 1024;
+        // Length of byte[] array that will have occupy CHUNK_SIZE bytes in heap
+        public static final int ARRAY_LENGTH = CHUNK_SIZE - Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+        public static void main(String args[]) throws Exception {
+            if (args.length != 1) {
+                throw new IllegalArgumentException("Expected 1 arg: <ratio>");
+            }
+            if (GCTypes.YoungGCType.getYoungGCType() == GCTypes.YoungGCType.PSNew) {
+                System.out.println(UNSUPPORTED_GC);
+                return;
+            }
+
+            int ratio = Integer.valueOf(args[0]);
+            long maxSurvivorSize = getMaxSurvivorSize();
+            System.out.println("Max survivor size: " + maxSurvivorSize);
+
+            allocateMemory(ratio - DELTA, maxSurvivorSize);
+            allocateMemory(ratio + DELTA, maxSurvivorSize);
+        }
+
+        /**
+         * Allocate (<b>ratio</b> * <b>maxSize</b> / 100) bytes of objects
+         * and force at least "MaxTenuringThreshold" minor GCs.
+         *
+         * @param ratio ratio used to calculate how many objects should be allocated
+         * @param maxSize estimated max survivor space size
+         */
+        public static void allocateMemory(double ratio, long maxSize) throws Exception {
+            GarbageCollectorMXBean youngGCBean = GCTypes.YoungGCType.getYoungGCBean();
+            long garbageSize = (long) (maxSize * (ratio / 100.0));
+            int arrayLength = (int) (garbageSize / CHUNK_SIZE);
+            AllocationHelper allocator = new AllocationHelper(1, arrayLength, ARRAY_LENGTH, null);
+
+            System.out.println(START_TEST);
+            System.gc();
+            final long initialGcId = youngGCBean.getCollectionCount();
+            // allocate memory
+            allocator.allocateMemoryAndVerify();
+
+            // force minor GC
+            while (youngGCBean.getCollectionCount() <= initialGcId + MAX_TENURING_THRESHOLD * 2) {
+                byte b[] = new byte[ARRAY_LENGTH];
+            }
+
+            allocator.release();
+            System.out.println(END_TEST);
+        }
+
+        /**
+         * Estimate max survivor space size.
+         *
+         * For non-G1 GC returns value reported by MemoryPoolMXBean
+         * associated with survivor space.
+         * For G1 GC return max number of survivor regions * region size.
+         * Number if survivor regions estimated from MaxNewSize and SurvivorRatio.
+         */
+        public static long getMaxSurvivorSize() {
+            if (GCTypes.YoungGCType.getYoungGCType() == GCTypes.YoungGCType.G1) {
+                int youngLength = (int) Math.max(MAX_NEW_SIZE / wb.g1RegionSize(), 1);
+                return (long) Math.ceil(youngLength / (double) SURVIVOR_RATIO) * wb.g1RegionSize();
+            } else {
+                return HeapRegionUsageTool.getSurvivorUsage().getMax();
+            }
+        }
+    }
+}
--- a/hotspot/test/gc/g1/TestHumongousShrinkHeap.java	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/test/gc/g1/TestHumongousShrinkHeap.java	Tue Sep 08 16:10:37 2015 +0200
@@ -58,6 +58,10 @@
 
 
     public static void main(String[] args) {
+        if (HUMON_COUNT == 0) {
+            System.out.println("Skipped. Heap is too small");
+            return;
+        }
         System.out.format("Running with %s max heap size. "
                 + "Will allocate humongous object of %s size %d times.%n",
                 MemoryUsagePrinter.humanReadableByteCount(AVAILABLE_MEMORY, false),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestPLABSizeBounds.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestPLABSizeBounds
+ * @bug 8134857
+ * @summary Regression test to ensure that G1 supports PLAB sizes of half a region size.
+ * @requires vm.gc=="G1" | vm.gc=="null"
+ * @key gc
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ */
+
+import java.util.ArrayList;
+
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.Platform;
+import jdk.test.lib.ProcessTools;
+
+public class TestPLABSizeBounds {
+
+    public static final int M = 1024 * 1024;
+
+    /**
+     * Starts the VM with the given region size and the given PLAB size arguments. The VM start should
+     * succeed if shouldSucceed is true, otherwise it should fail.
+     *
+     * @param regionSize The region size the VM should be started with in bytes.
+     * @param plabSize The young and old gen PLAB sizes the VM should be started with in machine words.
+     * @param shouldSucceed The expected result of the VM invocation.
+     */
+    public static void runTest(int regionSize, int plabSize, boolean shouldSucceed) throws Exception {
+        ArrayList<String> testArguments = new ArrayList<String>();
+
+        testArguments.add("-XX:+UseG1GC");
+        testArguments.add("-Xmx256M");
+        testArguments.add("-XX:G1HeapRegionSize=" + regionSize);
+        testArguments.add("-XX:YoungPLABSize=" + plabSize);
+        testArguments.add("-XX:OldPLABSize=" + plabSize);
+        testArguments.add(GCTest.class.getName());
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(testArguments.toArray(new String[0]));
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        if (shouldSucceed) {
+            output.shouldHaveExitValue(0);
+        } else {
+            output.shouldHaveExitValue(1);
+        }
+    }
+
+    public static void runRegionTest(int regionSize) throws Exception {
+        final int regionSizeInBytes = regionSize * M;
+        final int wordSize = Platform.is32bit() ? 4 : 8;
+
+        runTest(regionSizeInBytes, (regionSizeInBytes / wordSize) / 2 - 1, true);
+        runTest(regionSizeInBytes, (regionSizeInBytes / wordSize) / 2, true);
+        runTest(regionSizeInBytes, (regionSizeInBytes / wordSize) / 2 + 1, false);
+    }
+
+    public static void main(String[] args) throws Exception {
+        runRegionTest(1);
+        runRegionTest(2);
+        runRegionTest(4);
+        runRegionTest(8);
+        runRegionTest(16);
+        runRegionTest(32);
+    }
+
+    static class GCTest {
+        public static void main(String [] args) {
+            System.out.println("Test completed.");
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/humongousObjects/Helpers.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package gc.g1.humongousObjects;
+
+import sun.hotspot.WhiteBox;
+
+public class Helpers {
+
+    // In case of 128 byte padding
+    private static final int MAX_PADDING_SIZE = 128;
+
+    /**
+     * Detects amount of extra bytes required to allocate a byte array.
+     * Allocating a byte[n] array takes more then just n bytes in the heap.
+     * Extra bytes are required to store object reference and the length.
+     * This amount depends on bitness and other factors.
+     *
+     * @return byte[] memory overhead
+     */
+    public static int detectByteArrayAllocationOverhead() {
+
+        WhiteBox whiteBox = WhiteBox.getWhiteBox();
+
+        int zeroLengthByteArraySize = (int) whiteBox.getObjectSize(new byte[0]);
+
+        // Since we do not know is there any padding in zeroLengthByteArraySize we cannot just take byte[0] size as overhead
+        for (int i = 1; i < MAX_PADDING_SIZE + 1; ++i) {
+            int realAllocationSize = (int) whiteBox.getObjectSize(new byte[i]);
+            if (realAllocationSize != zeroLengthByteArraySize) {
+                // It means we did not have any padding on previous step
+                return zeroLengthByteArraySize - (i - 1);
+            }
+        }
+        throw new Error("We cannot find byte[] memory overhead - should not reach here");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/humongousObjects/TestHumongousThreshold.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package gc.g1.humongousObjects;
+
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+/**
+ * @test TestHumongousThreshold
+ * @summary Checks that objects larger than half a region are allocated as humongous
+ * @requires vm.gc=="G1" | vm.gc=="null"
+ * @library /testlibrary /../../test/lib
+ * @modules java.management
+ * @build sun.hotspot.WhiteBox
+ *        gc.g1.humongousObjects.Helpers
+ *        gc.g1.humongousObjects.TestHumongousThreshold
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=1M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=2M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=4M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=8M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=16M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:.
+ * -XX:G1HeapRegionSize=32M
+ * gc.g1.humongousObjects.TestHumongousThreshold
+ *
+ */
+
+public class TestHumongousThreshold {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final int REGION_SIZE = WHITE_BOX.g1RegionSize();
+    private static final int MAX_CONTINUOUS_SIZE_CHECK = 129;
+    private static final int NON_HUMONGOUS_DIVIDER = 10;
+
+    /**
+     * The method allocates byte[] with specified size and checks that:
+     * 1. byte[] is allocated as we specified in expectedHumongous.
+     * 2. byte[] is allocated as humongous if its size is large than a half of region and non-humongous otherwise.
+     * It uses WB to obtain the size of created byte[]. Only objects larger than half of region are expected
+     * to be humongous.
+     *
+     * @param arraySize size of allocation
+     * @param expectedHumongous expected humongous/non-humongous allocation
+     * @return allocated byte array
+     */
+
+    private static byte[] allocateAndCheck(int arraySize, boolean expectedHumongous) {
+        byte[] storage = new byte[arraySize];
+        long objectSize = WHITE_BOX.getObjectSize(storage);
+        boolean shouldBeHumongous = objectSize > (REGION_SIZE / 2);
+
+        Asserts.assertEquals(expectedHumongous, shouldBeHumongous, "Despite we expected this object to be "
+                + (expectedHumongous ? "humongous" : "non-humongous") + " it appeared otherwise when we checked "
+                + "object size - likely test bug; Allocation size = " + arraySize + "; Object size = " + objectSize
+                + "; region size = " + REGION_SIZE);
+
+        Asserts.assertEquals(WHITE_BOX.g1IsHumongous(storage), shouldBeHumongous,
+                "Object should be allocated as " + (shouldBeHumongous ? "humongous"
+                        : "non-humongous") + " but it wasn't; Allocation size = " + arraySize + "; Object size = "
+                        + objectSize + "; region size = " + REGION_SIZE);
+        return storage;
+    }
+
+    public static void main(String[] args) {
+        int byteArrayMemoryOverhead = Helpers.detectByteArrayAllocationOverhead();
+
+        // Largest non-humongous byte[]
+        int maxByteArrayNonHumongousSize = (REGION_SIZE / 2) - byteArrayMemoryOverhead;
+
+        // Increment for non-humongous testing
+        int nonHumongousStep = maxByteArrayNonHumongousSize / NON_HUMONGOUS_DIVIDER;
+
+        // Maximum byte[] that takes one region
+        int maxByteArrayOneRegionSize = REGION_SIZE - byteArrayMemoryOverhead;
+
+        // Sizes in regions
+        // i,e, 1.0f means one region, 1.5f means one and half region etc
+        float[] humongousFactors = {0.8f, 1.0f, 1.2f, 1.5f, 1.7f, 2.0f, 2.5f};
+
+        // Some diagnostic output
+        System.out.format("%s started%n", TestHumongousThreshold.class.getName());
+        System.out.format("Actual G1 region size %d%n", REGION_SIZE);
+        System.out.format("byte[] memory overhead %d%n", byteArrayMemoryOverhead);
+
+        // Non-humongous allocations
+        System.out.format("Doing non-humongous allocations%n");
+
+        // Testing allocations with byte[] with length from 0 to MAX_CONTINUOUS_SIZE_CHECK
+        System.out.format("Testing allocations with byte[] with length from 0 to %d%n", MAX_CONTINUOUS_SIZE_CHECK);
+        for (int i = 0; i < MAX_CONTINUOUS_SIZE_CHECK; ++i) {
+            allocateAndCheck(i, false);
+        }
+
+        // Testing allocations with byte[] with length from 0 to nonHumongousStep * NON_HUMONGOUS_DIVIDER
+        System.out.format("Testing allocations with byte[] with length from 0 to %d with step %d%n",
+                nonHumongousStep * NON_HUMONGOUS_DIVIDER, nonHumongousStep);
+        for (int i = 0; i < NON_HUMONGOUS_DIVIDER; ++i) {
+            allocateAndCheck(i * nonHumongousStep, false);
+        }
+
+        // Testing allocations with byte[] of maximum non-humongous length
+        System.out.format("Testing allocations with byte[] of maximum non-humongous length %d%n",
+                maxByteArrayNonHumongousSize);
+        allocateAndCheck(maxByteArrayNonHumongousSize, false);
+
+        // Humongous allocations
+        System.out.format("Doing humongous allocations%n");
+        // Testing with minimum humongous object
+        System.out.format("Testing with byte[] of minimum humongous object %d%n", maxByteArrayNonHumongousSize + 1);
+        allocateAndCheck(maxByteArrayNonHumongousSize + 1, true);
+
+        // Testing allocations with byte[] with length from (maxByteArrayNonHumongousSize + 1) to
+        // (maxByteArrayNonHumongousSize + 1 + MAX_CONTINUOUS_SIZE_CHECK)
+        System.out.format("Testing allocations with byte[] with length from %d to %d%n",
+                maxByteArrayNonHumongousSize + 1, maxByteArrayNonHumongousSize + 1 + MAX_CONTINUOUS_SIZE_CHECK);
+        for (int i = 0; i < MAX_CONTINUOUS_SIZE_CHECK; ++i) {
+            allocateAndCheck(maxByteArrayNonHumongousSize + 1 + i, true);
+        }
+
+        // Checking that large (more than a half of region size) objects are humongous
+        System.out.format("Checking that large (more than a half of region size) objects are humongous%n");
+        for (float factor : humongousFactors) {
+            allocateAndCheck((int) (maxByteArrayOneRegionSize * factor), true);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/logging/TestPrintReferences.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestPrintReferences
+ * @bug 8133818
+ * @summary Validate the reference processing logging
+ * @key gc
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ */
+
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.OutputAnalyzer;
+
+public class TestPrintReferences {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb_enabled =
+      ProcessTools.createJavaProcessBuilder("-XX:+PrintGCDetails", "-XX:+PrintReferenceGC", "-Xmx10M", GCTest.class.getName());
+    OutputAnalyzer output = new OutputAnalyzer(pb_enabled.start());
+
+    output.shouldMatch(
+      "#[0-9]+: \\[SoftReference, [0-9]+ refs, [0-9]+\\.[0-9]+ secs\\]" +
+      "#[0-9]+: \\[WeakReference, [0-9]+ refs, [0-9]+\\.[0-9]+ secs\\]" +
+      "#[0-9]+: \\[FinalReference, [0-9]+ refs, [0-9]+\\.[0-9]+ secs\\]" +
+      "#[0-9]+: \\[PhantomReference, [0-9]+ refs, [0-9]+\\.[0-9]+ secs\\]" +
+      "#[0-9]+: \\[Cleaners, [0-9]+ refs, [0-9]+\\.[0-9]+ secs\\]" +
+      "#[0-9]+: \\[JNI Weak Reference, [0-9]+ refs, [0-9]+\\.[0-9]+ secs\\]");
+
+    output.shouldHaveExitValue(0);
+  }
+
+  static class GCTest {
+    public static void main(String [] args) {
+      System.gc();
+    }
+  }
+}
--- a/hotspot/test/runtime/SharedArchiveFile/BasicJarBuilder.java	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/test/runtime/SharedArchiveFile/BasicJarBuilder.java	Tue Sep 08 16:10:37 2015 +0200
@@ -35,14 +35,14 @@
 import java.util.ArrayList;
 import sun.tools.jar.Main;
 
+// Using JarBuilder requires that all to-be-jarred classes should be placed
+// in the current working directory, aka "."
 public class BasicJarBuilder {
     private static final String classDir = System.getProperty("test.classes");
 
-    public static void build(String jarName, String ...classNames)
-        throws Exception {
-
-        createSimpleJar(classDir, classDir + File.separator + jarName +
-            ".jar", classNames);
+    public static void build(String jarName, String ...classNames) throws Exception {
+        createSimpleJar(".", classDir + File.separator + jarName + ".jar",
+            classNames);
     }
 
     private static void createSimpleJar(String jarclassDir, String jarName,
@@ -71,7 +71,7 @@
         }
     }
 
-    // helpers
+    // Get full path to the test jar
     public static String getTestJar(String jar) {
         File dir = new File(System.getProperty("test.classes", "."));
         File jarFile = new File(dir, jar);
--- a/hotspot/test/runtime/SharedArchiveFile/SharedStrings.java	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedStrings.java	Tue Sep 08 16:10:37 2015 +0200
@@ -32,12 +32,10 @@
  * @library /testlibrary /../../test/lib
  * @modules java.base/sun.misc
  *          java.management
- * @ignore - 8133180
- * @build SharedStringsWb SharedStrings BasicJarBuilder
+ * @build SharedStringsWb SharedStrings BasicJarBuilder sun.hotspot.WhiteBox
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main SharedStrings
  */
-
 import jdk.test.lib.*;
 
 public class SharedStrings {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedStringsRunAuto.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SharedStringsAuto
+ * @summary Test -Xshare:auto with shared strings.
+ * Feature support: G1GC only, compressed oops/kptrs, 64-bit os, not on windows
+ * @requires (sun.arch.data.model != "32") & (os.family != "windows")
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
+ * @requires (vm.gc=="G1" | vm.gc=="null")
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ * @run main SharedStringsRunAuto
+ */
+
+import jdk.test.lib.*;
+import java.io.File;
+
+public class SharedStringsRunAuto {
+    public static void main(String[] args) throws Exception {
+        // Dump
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./SharedStringsRunAuto.jsa",
+            "-XX:+UseCompressedOops", "-XX:+UseG1GC",
+            "-XX:+PrintSharedSpaces",
+            "-Xshare:dump");
+
+        new OutputAnalyzer(pb.start())
+            .shouldContain("Loading classes to share")
+            .shouldContain("Shared string table stats")
+            .shouldHaveExitValue(0);
+
+        // Run with -Xshare:auto
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./SharedStringsRunAuto.jsa",
+           "-XX:+UseCompressedOops", "-XX:+UseG1GC",
+           "-Xshare:auto",
+           "-version");
+
+        new OutputAnalyzer(pb.start())
+            .shouldMatch("(java|openjdk) version")
+            .shouldHaveExitValue(0);
+    }
+}
--- a/hotspot/test/runtime/SharedArchiveFile/SharedStringsWb.java	Tue Sep 01 12:57:41 2015 +0300
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedStringsWb.java	Tue Sep 08 16:10:37 2015 +0200
@@ -34,8 +34,8 @@
             return;
         }
 
-        // The string "java" is known to be interened and added to CDS archive
-        String s = "java";
+        // The string below is known to be added to CDS archive
+        String s = "<init>";
         String internedS = s.intern();
 
         if (wb.isShared(internedS)) {
--- a/hotspot/test/runtime/modules/ImageFile/ImageAttributeOffsetsTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Retrieves the array of offsets once with MemoryMapImage enabled once disabled.
- * @test ImageAttributeOffsetsTest
- * @summary Unit test for JVM_ImageAttributeOffsets() method
- * @library /testlibrary /../../test/lib
- * @build ImageAttributeOffsetsTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+MemoryMapImage ImageAttributeOffsetsTest
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-MemoryMapImage ImageAttributeOffsetsTest
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageAttributeOffsetsTest {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-        long id = wb.imageOpenImage(imageFile, bigEndian);
-        boolean passed = true;
-        // Get offsets
-        int[] array = wb.imageAttributeOffsets(id);
-        assertNotNull(array, "Could not retrieve offsets of array");
-
-        wb.imageCloseImage(id);
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageCloseTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test closing image opened multiple time. Test closing mutiple time an image.
- * @test ImageCloseTest
- * @summary Unit test for JVM_ImageClose() method
- * @library /testlibrary /../../test/lib
- * @build ImageCloseTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ImageCloseTest
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-
-public class ImageCloseTest {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-        long id = 0;
-
-        // too many opens
-        for (int i = 0; i < 100; i++) {
-            id = wb.imageOpenImage(imageFile, bigEndian);
-        }
-        wb.imageCloseImage(id);
-
-        // too many closes
-        id = wb.imageOpenImage(imageFile, bigEndian);
-        for (int i = 0; i < 100; i++) {
-            wb.imageCloseImage(id);
-        }
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageFileHeaderTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test that opening image containing wrong headers fails.
- * @test ImageFileHeaderTest
- * @library /testlibrary /../../test/lib
- * @build ImageFileHeaderTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ImageFileHeaderTest
- */
-
-import java.nio.*;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageFileHeaderTest {
-
-    public static final int MAGIC = 0xCAFEDADA;
-    public static final short MAJOR = 0;
-    public static final short MINOR = 1;
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-    public static ByteBuffer buf;
-
-    public static void main(String... args) throws Exception {
-
-        ByteOrder endian = getEndian();
-
-        // Try to read a non-existing file
-        assertFalse(wb.readImageFile("bogus"));
-
-        // Incomplete header, only include the correct magic
-        buf = ByteBuffer.allocate(100);
-        buf.order(endian);
-        buf.putInt(MAGIC);
-        assertFalse(testImageFile("invalidheader.jimage"));
-
-        // Build a complete header but reverse the endian
-        buf = ByteBuffer.allocate(100);
-        buf.order(endian == ByteOrder.LITTLE_ENDIAN ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN);
-        buf.putInt(MAGIC);
-        buf.putShort(MAJOR);
-        buf.putShort(MINOR);
-        assertFalse(testImageFile("wrongendian.jimage"));
-
-        // Use the wrong magic
-        buf = ByteBuffer.allocate(100);
-        buf.order(endian);
-        buf.putInt(0xBEEFCACE);
-        buf.putShort(MAJOR);
-        buf.putShort(MINOR);
-        assertFalse(testImageFile("wrongmagic.jimage"));
-
-        // Wrong major version (current + 1)
-        buf = ByteBuffer.allocate(100);
-        buf.order(endian);
-        buf.putInt(MAGIC);
-        buf.putShort((short)(MAJOR + 1));
-        buf.putShort((short)MINOR);
-        assertFalse(testImageFile("wrongmajorversion.jimage"));
-
-        // Wrong major version (negative)
-        buf = ByteBuffer.allocate(100);
-        buf.order(endian);
-        buf.putInt(MAGIC);
-        buf.putShort((short) -17);
-        buf.putShort((short)MINOR);
-        assertFalse(testImageFile("negativemajorversion.jimage"));
-
-        // Wrong minor version (current + 1)
-        buf = ByteBuffer.allocate(100);
-        buf.order(endian);
-        buf.putInt(MAGIC);
-        buf.putShort((short)MAJOR);
-        buf.putShort((short)(MINOR + 1));
-        assertFalse(testImageFile("wrongminorversion.jimage"));
-
-        // Wrong minor version (negative)
-        buf = ByteBuffer.allocate(100);
-        buf.order(endian);
-        buf.putInt(MAGIC);
-        buf.putShort((short)MAJOR);
-        buf.putShort((short) -17);
-        assertFalse(testImageFile("negativeminorversion.jimage"));
-    }
-
-    public static boolean testImageFile(String filename) throws Exception {
-        Files.write(Paths.get(filename), buf.array());
-        System.out.println("Calling ReadImageFile on " + filename);
-        return wb.readImageFile(filename);
-    }
-
-    public static ByteOrder getEndian() {
-        String endian = System.getProperty("sun.cpu.endian");
-        if (endian.equalsIgnoreCase("little")) {
-            return ByteOrder.LITTLE_ENDIAN;
-        } else if (endian.equalsIgnoreCase("big")) {
-            return ByteOrder.BIG_ENDIAN;
-        }
-        throw new RuntimeException("Unexpected sun.cpu.endian value: " + endian);
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageFindAttributesTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Find the attributes of existing and invalid classes.
- * @test ImageFindAttributesTest
- * @summary Unit test for JVM_ImageFindAttributes() method
- * @library /testlibrary /../../test/lib
- * @build ImageFindAttributesTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ImageFindAttributesTest
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageFindAttributesTest {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-        long id = wb.imageOpenImage(imageFile, bigEndian);
-
-        // class resource
-        String className = "/java.base/java/lang/String.class";
-        long[] longArr = wb.imageFindAttributes(id, className.getBytes());
-
-        assertNotNull(longArr, "Could not retrieve attributes of class " + className);
-
-        // non-existent resource
-        String neClassName = "/java.base/java/lang/NonExistentClass.class";
-        longArr = wb.imageFindAttributes(id, neClassName.getBytes());
-
-        assertNull(longArr, "Failed. Returned not null for non-existent " + neClassName);
-
-        // garbage byte array
-        byte[] buf = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
-        longArr = wb.imageFindAttributes(id, buf);
-
-        assertNull(longArr, "Found attributes for garbage class");
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageGetAttributesTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test getting all attributes,
- * @test ImageGetAttributesTest
- * @summary Unit test for JVM_ImageGetAttributes() method
- * @library /testlibrary /../../test/lib
- * @build LocationConstants ImageGetAttributesTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ImageGetAttributesTest
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageGetAttributesTest implements LocationConstants {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        testImageGetAttributes(imageFile);
-    }
-
-    private static void testImageGetAttributes(String imageFile) {
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-        long id = wb.imageOpenImage(imageFile, bigEndian);
-        try {
-            long stringsSize = wb.imageGetStringsSize(id);
-            assertNE(stringsSize, 0, "strings size is 0");
-
-            int[] array = wb.imageAttributeOffsets(id);
-            assertNotNull(array, "Could not retrieve offsets of array");
-
-            // Get non-null attributes
-            boolean attFound = false;
-            int[] idx = {-1, -1, -1};
-            // first non-null attribute
-            for (int i = 0; i < array.length; i++) {
-                if (array[i] != 0) {
-                    attFound = true;
-                    idx[0] = i;
-                    break;
-                }
-            }
-
-            // middle non-null attribute
-            for (int i = array.length / 2; i < array.length; i++) {
-                if (array[i] != 0) {
-                    attFound = true;
-                    idx[1] = i;
-                    break;
-                }
-            }
-
-            // last non-null attribute
-            for (int i = array.length - 1; i >= 0; i--) {
-                if (array[i] != 0) {
-                    attFound = true;
-                    idx[2] = i;
-                    break;
-                }
-            }
-            assertTrue(attFound, "Failed. No non-null offset attributes");
-                // test cases above
-                for (int i = 0; i < 3; i++) {
-                    if (idx[i] != -1) {
-                        long[] attrs = wb.imageGetAttributes(id, (int) array[idx[i]]);
-                        long module = attrs[LOCATION_ATTRIBUTE_MODULE];
-                        long parent = attrs[LOCATION_ATTRIBUTE_PARENT];
-                        long base = attrs[LOCATION_ATTRIBUTE_BASE];
-                        long ext = attrs[LOCATION_ATTRIBUTE_EXTENSION];
-
-                        if ((module >= 0) && (module < stringsSize)
-                                && (parent >= 0) && (parent < stringsSize)
-                                && (base != 0)
-                                && (ext >= 0) && (ext < stringsSize)) {
-                        } else {
-                            System.out.printf("Failed. Read attribute offset %d (position %d) but wrong offsets\n",
-                                    array[idx[i]], idx[i]);
-                            System.out.printf("    offsets: module = %d parent = %d base = %d extention = %d\n",
-                                    module, parent, base, ext);
-                            throw new RuntimeException("Read attribute offset error");
-                        }
-                    } else {
-                        System.out.printf("Failed. Could not read attribute offset %d (position %d)\n",
-                                array[idx[i]], idx[i]);
-                        throw new RuntimeException("Read attribute offset error");
-                    }
-                }
-        } finally {
-            wb.imageCloseImage(id);
-        }
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageGetDataAddressTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test accessing the data address of a jimage. This only makes sense when the
- * entire jimage is mapped into memory.
- * @test ImageGetDataAddressTest
- * @summary Unit test for JVM_ImageGetDataAddress() method
- * @library /testlibrary /../../test/lib
- * @build ImageGetDataAddressTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+MemoryMapImage ImageGetDataAddressTest +
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-MemoryMapImage ImageGetDataAddressTest -
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageGetDataAddressTest {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean isMMap = args[0].equals("+");
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-        long id = wb.imageOpenImage(imageFile, bigEndian);
-
-        // get data for valid id
-        long dataAddr = wb.imageGetDataAddress(id);
-        assertFalse((dataAddr == 0) && isMMap, "Failed. Data address is " + dataAddr + " for valid id\n");
-
-        // get data for invalid id == 0
-        dataAddr = wb.imageGetDataAddress(0);
-        assertTrue(dataAddr == 0, "Failed. Data address is " + dataAddr + " for zero id\n");
-
-        wb.imageCloseImage(id);
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageGetIndexAddressTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test the address of the jimage index.
- * @test ImageGetIndexAddressTest
- * @summary Unit test for JVM_ImageGetIndexAddress() method
- * @library /testlibrary /../../test/lib
- * @build ImageGetIndexAddressTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ImageGetIndexAddressTest
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageGetIndexAddressTest {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-        long id = wb.imageOpenImage(imageFile, bigEndian);
-
-        // get index for valid id
-        long indexAddr = wb.imageGetIndexAddress(id);
-        assertFalse(indexAddr == 0, "Failed. Index address is zero for valid id");
-
-        // get index for invalid id == 0
-        indexAddr = wb.imageGetIndexAddress(0);
-        assertTrue(indexAddr == 0, "Failed. Index address is" + indexAddr + " for zero id\n");
-
-        wb.imageCloseImage(id);
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageGetStringBytesTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test that the string referenced by an attribute is retrieved.
- * @test ImageGetStringBytesTest
- * @summary Unit test for JVM_ImageGetStringBytes() method
- * @library /testlibrary /../../test/lib
- * @build LocationConstants ImageGetStringBytesTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ImageGetStringBytesTest
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageGetStringBytesTest implements LocationConstants {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-        long id = wb.imageOpenImage(imageFile, bigEndian);
-
-        String className = "/java.base/java/lang/String.class";
-        long[] offsetArr = wb.imageFindAttributes(id, className.getBytes());
-
-        // Module
-        assertTrue(checkAttribute(id, offsetArr, LOCATION_ATTRIBUTE_MODULE, "Module"));
-
-        // Parent
-        assertTrue(checkAttribute(id, offsetArr, LOCATION_ATTRIBUTE_PARENT, "Parent"));
-
-        // Base
-        assertTrue(checkAttribute(id, offsetArr, LOCATION_ATTRIBUTE_BASE, "Base"));
-
-        // Extension
-        assertTrue(checkAttribute(id, offsetArr, LOCATION_ATTRIBUTE_EXTENSION, "Extension"));
-
-        wb.imageCloseImage(id);
-    }
-
-    private static boolean checkAttribute(long id, long[] offsetArr, int attrId, String attrName) {
-        long offset = offsetArr[attrId];
-        return wb.imageGetStringBytes(id, (int) offset) != null;
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageOpenTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test image opening/closing
- * @test ImageOpenTest
- * @summary Unit test for JVM_ImageOpen() method
- * @library /testlibrary /../../test/lib
- * @build ImageOpenTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ImageOpenTest
- */
-
-import java.io.File;
-import java.nio.ByteOrder;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageOpenTest {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String nonexistentImageFile = javaHome + "/lib/modules/nonexistent.jimage";
-        String bootmodulesImageFile = javaHome + File.separator + "lib" + File.separator
-                + "modules" + File.separator + "bootmodules.jimage";
-
-        if (!(new File(bootmodulesImageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean bigEndian = ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN;
-
-        // open nonexistent image
-        long id = wb.imageOpenImage(nonexistentImageFile, bigEndian);
-        assertTrue(id == 0L, "Failed. Get id " + id + "instead of 0 on opening nonexistent file\n");
-        wb.imageCloseImage(id);
-
-        // open bootmodules image
-        id = wb.imageOpenImage(bootmodulesImageFile, bigEndian);
-        assertFalse(id == 0, "Failed. Get id 0 on opening bootmodules.jimage");
-        wb.imageCloseImage(id);
-
-        // non-native endian
-        id = wb.imageOpenImage(bootmodulesImageFile, !bigEndian);
-        assertFalse(id == 0, "Failed. Get id 0 on opening bootmodules.jimage with non-native endian");
-        wb.imageCloseImage(id);
-
-        //
-        // open several times
-        //
-        id = wb.imageOpenImage(bootmodulesImageFile, bigEndian);
-        long id1 = wb.imageOpenImage(bootmodulesImageFile, bigEndian);
-        long id2 = wb.imageOpenImage(bootmodulesImageFile, bigEndian);
-        assertTrue((id == id1) && (id == id2), "Failed. Open thee times with ids " + id + " " + id1 + " " + id1);
-
-        wb.imageCloseImage(id);
-        wb.imageCloseImage(id1);
-        wb.imageCloseImage(id2);
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/ImageReadTest.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Test reading resource content.
- * @test ImageReadTest
- * @summary Unit test for JVM_ImageRead() method
- * @library /testlibrary /../../test/lib
- * @build LocationConstants ImageReadTest
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+MemoryMapImage ImageReadTest +
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-MemoryMapImage ImageReadTest -
- */
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import sun.hotspot.WhiteBox;
-import static jdk.test.lib.Asserts.*;
-
-public class ImageReadTest implements LocationConstants {
-
-    public static final WhiteBox wb = WhiteBox.getWhiteBox();
-
-    public static void main(String... args) throws Exception {
-        String javaHome = System.getProperty("java.home");
-        String imageFile = javaHome + File.separator + "lib"
-                + File.separator + "modules" + File.separator
-                + "bootmodules.jimage";
-
-        if (!(new File(imageFile)).exists()) {
-            System.out.printf("Test skipped.");
-            return;
-        }
-
-        boolean isMMap = args[0].equals("+");
-
-        long id = wb.imageOpenImage(imageFile, isMMap);
-
-        final String mm = isMMap ? "-XX:+MemoryMapImage" : "-XX:-MemoryMapImage";
-        final int magic = 0xCAFEBABE;
-
-        String className = "/java.base/java/lang/String.class";
-        long[] offsetArr = wb.imageFindAttributes(id, className.getBytes());
-        long offset = offsetArr[LOCATION_ATTRIBUTE_OFFSET];
-        long size = offsetArr[LOCATION_ATTRIBUTE_UNCOMPRESSED];
-
-        // positive: read
-        ByteBuffer buf = ByteBuffer.allocateDirect((int) size);
-        assertTrue(wb.imageRead(id, offset, buf, size), "Failed. Read operation returned false, should be true");
-        int m = buf.getInt();
-        assertTrue(m == magic, "Failed. Read operation returned true but wrong magic = " + magic);
-
-        // positive: mmap
-        if (isMMap) {
-            long dataAddr = wb.imageGetDataAddress(id);
-            assertFalse(dataAddr == 0L, "Failed. Did not obtain data address on mmapped test");
-            int data = wb.imageGetIntAtAddress(dataAddr, (int) offset, true);
-            assertTrue(data == magic, "Failed. MMap operation returned true but wrong magic = " + data);
-        }
-
-        // negative: wrong offset
-        boolean success = wb.imageRead(id, -100, buf, size);
-        assertFalse(success, "Failed. Read operation (wrong offset): returned true");
-
-        // negative: too big offset
-        long filesize = new File(imageFile).length();
-        success = wb.imageRead(id, filesize + 1, buf, size);
-        assertFalse(success, "Failed. Read operation (offset > file size) returned true");
-
-        // negative: negative size
-        success = wb.imageRead(id, offset, buf, -100);
-        assertFalse(success, "Failed. Read operation (negative size) returned true");
-
-        wb.imageCloseImage(id);
-    }
-}
--- a/hotspot/test/runtime/modules/ImageFile/LocationConstants.java	Tue Sep 01 12:57:41 2015 +0300
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-public interface LocationConstants {
-    // keep this in sync with enum in ImageLocation C++ class in the
-    // hotspot's C++ header file imageFile.hpp
-    public static final int LOCATION_ATTRIBUTE_END = 0;          // End of attribute stream marker
-    public static final int LOCATION_ATTRIBUTE_MODULE = 1;       // String table offset of module name
-    public static final int LOCATION_ATTRIBUTE_PARENT = 2;       // String table offset of resource path parent
-    public static final int LOCATION_ATTRIBUTE_BASE = 3;         // String table offset of resource path base
-    public static final int LOCATION_ATTRIBUTE_EXTENSION = 4;    // String table offset of resource path extension
-    public static final int LOCATION_ATTRIBUTE_OFFSET = 5;       // Container byte offset of resource
-    public static final int LOCATION_ATTRIBUTE_COMPRESSED = 6;   // In image byte size of the compressed resource
-    public static final int LOCATION_ATTRIBUTE_UNCOMPRESSED = 7; // In memory byte size of the uncompressed resource
-    public static final int LOCATION_ATTRIBUTE_COUNT = 8;        // Number of attribute kinds
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/jdk/test/lib/AllocationHelper.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,116 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+package jdk.test.lib;
+
+import java.util.LinkedList;
+import java.util.concurrent.Callable;
+
+/**
+ * Helper class which allocates memory.
+ *
+ * Typical usage:
+ * <pre>
+ * {@code
+ *           AllocationHelper allocator = new AllocationHelper(MAX_ITERATIONS, ARRAY_LENGTH, CHUNK_SIZE,
+ *                   () -> (verifier()));
+ *           // Allocate byte[CHUNK_SIZE] ARRAY_LENGTH times. Total allocated bytes will be CHUNK_SIZE * ARRAY_LENGTH + refs length.
+ *           // Then invoke verifier and iterate MAX_ITERATIONS times.
+ *           allocator.allocateMemoryAndVerify();
+ * }
+ * </pre>
+ */
+public final class AllocationHelper {
+
+    private final int arrayLength;
+    private final int maxIterations;
+    private final int chunkSize;
+
+    // garbageStorage is used to store link to garbage to prevent optimization.
+    private static Object garbageStorage;
+    private byte garbage[][];
+    private final Callable<?> verifierInstance;
+
+    /**
+     * Create an AllocationHelper with specified iteration count, array length, chunk size and verifier.
+     *
+     * @param maxIterations
+     * @param arrayLength
+     * @param chunkSize
+     * @param verifier - Callable instance which will be invoked after all allocation cycle. Can be null;
+     */
+    public AllocationHelper(int maxIterations, int arrayLength, int chunkSize, Callable<?> verifier) {
+        if ((arrayLength <= 0) || (maxIterations <= 0) || (chunkSize <= 0)) {
+            throw new IllegalArgumentException("maxIterations, arrayLength and chunkSize should be greater then 0.");
+        }
+        this.arrayLength = arrayLength;
+        this.maxIterations = maxIterations;
+        this.chunkSize = chunkSize;
+        verifierInstance = verifier;
+        garbage = new byte[this.arrayLength][];
+        garbageStorage = garbage;
+    }
+
+    private void allocateMemoryOneIteration() {
+        for (int j = 0; j < arrayLength; j++) {
+            garbage[j] = new byte[chunkSize];
+        }
+    }
+
+    /**
+     * Allocate memory and invoke Verifier during all iteration.
+     *
+     * @throws java.lang.Exception
+     */
+    public void allocateMemoryAndVerify() throws Exception {
+        for (int i = 0; i < maxIterations; i++) {
+            allocateMemoryOneIteration();
+            if (verifierInstance != null) {
+                verifierInstance.call();
+            }
+        }
+    }
+
+    /**
+     * The same as allocateMemoryAndVerify() but hides OOME
+     *
+     * @throws Exception
+     */
+    public void allocateMemoryAndVerifyNoOOME() throws Exception {
+        try {
+            allocateMemoryAndVerify();
+        } catch (OutOfMemoryError e) {
+            // exit on OOME
+        }
+    }
+
+    /**
+     * Release link to allocated garbage to make it available for further GC
+     */
+    public void release() {
+        if (garbage != null) {
+            garbage = null;
+            garbageStorage = null;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/jdk/test/lib/HeapRegionUsageTool.java	Tue Sep 08 16:10:37 2015 +0200
@@ -0,0 +1,107 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+package jdk.test.lib;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.lang.management.MemoryUsage;
+
+/**
+ * Utility class used by tests to get heap region usage.
+ */
+public final class HeapRegionUsageTool {
+
+    /**
+     * Get MemoryUsage from MemoryPoolMXBean which name matches passed string.
+     *
+     * @param name
+     * @return MemoryUsage
+     */
+    private static MemoryUsage getUsage(String name){
+        for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans()) {
+            if (pool.getName().matches(name)) {
+                return pool.getUsage();
+            }
+        }
+        return null;
+    }
+
+    /**
+     * Get MemoryUsage of Eden space.
+     *
+     * @return MemoryUsage
+     */
+    public static MemoryUsage getEdenUsage() {
+        return getUsage(".*Eden.*");
+    }
+
+    /**
+     * Get MemoryUsage of Survivor space.
+     *
+     * @return MemoryUsage
+     */
+    public static MemoryUsage getSurvivorUsage() {
+        return getUsage(".*Survivor.*");
+    }
+
+    /**
+     * Get memory usage of Tenured space
+     *
+     * @return MemoryUsage
+     */
+    public static MemoryUsage getOldUsage() {
+        return getUsage(".*(Old|Tenured).*");
+    }
+
+    /**
+     * Get heap usage.
+     *
+     * @return MemoryUsage
+     */
+    public static MemoryUsage getHeapUsage() {
+        return ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+    }
+
+    /**
+     * Helper function to align up.
+     *
+     * @param value
+     * @param alignment
+     * @return aligned value
+     */
+    public static long alignUp(long value, long alignment) {
+        return (value + alignment - 1) & ~(alignment - 1);
+    }
+
+    /**
+     * Helper function to align down.
+     *
+     * @param value
+     * @param alignment
+     * @return aligned value
+     */
+    public static long alignDown(long value, long alignment) {
+        return value & ~(alignment - 1);
+    }
+}