Merge
authoramurillo
Fri, 07 Jun 2013 09:25:18 -0700 (2013-06-07)
changeset 17883 0403dabe9186
parent 17821 c351e1da3e02 (current diff)
parent 17882 07a153bc43ad (diff)
child 17884 847c7c6c4e4a
Merge
hotspot/.hgtags
--- a/hotspot/.hgtags	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/.hgtags	Fri Jun 07 09:25:18 2013 -0700
@@ -347,3 +347,4 @@
 38da9f4f67096745f851318d792d6468aa1f6cf8 hs25-b34
 092018493d3bbeb1c24278fd8c40ff3d76e1fed7 jdk8-b92
 573d86d412cd9d3df7912194c1a540be50e9544e jdk8-b93
+b786c04b7be15194febe88dc1f0c9443e737a84b hs25-b35
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java	Fri Jun 07 09:25:18 2013 -0700
@@ -96,9 +96,10 @@
 
   public boolean containsProtectionDomain(Oop protectionDomain) {
     InstanceKlass ik = (InstanceKlass) klass();
-    if (protectionDomain.equals(ik.getProtectionDomain())) {
-      return true; // Succeeds trivially
-    }
+    // Currently unimplemented and not used.
+    // if (protectionDomain.equals(ik.getJavaMirror().getProtectionDomain())) {
+    //   return true; // Succeeds trivially
+    // }
     for (ProtectionDomainEntry current = pdSet(); current != null;
                                        current = current.next()) {
       if (protectionDomain.equals(current.protectionDomain())) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Fri Jun 07 09:25:18 2013 -0700
@@ -75,8 +75,6 @@
     javaFieldsCount      = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
     constants            = new MetadataField(type.getAddressField("_constants"), 0);
     classLoaderData      = type.getAddressField("_class_loader_data");
-    protectionDomain     = new OopField(type.getOopField("_protection_domain"), 0);
-    signers              = new OopField(type.getOopField("_signers"), 0);
     sourceFileName       = type.getAddressField("_source_file_name");
     sourceDebugExtension = type.getAddressField("_source_debug_extension");
     innerClasses         = type.getAddressField("_inner_classes");
@@ -136,8 +134,6 @@
   private static CIntField javaFieldsCount;
   private static MetadataField constants;
   private static AddressField  classLoaderData;
-  private static OopField  protectionDomain;
-  private static OopField  signers;
   private static AddressField  sourceFileName;
   private static AddressField  sourceDebugExtension;
   private static AddressField  innerClasses;
@@ -350,8 +346,6 @@
   public ConstantPool getConstants()        { return (ConstantPool) constants.getValue(this); }
   public ClassLoaderData getClassLoaderData() { return                ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); }
   public Oop       getClassLoader()         { return                getClassLoaderData().getClassLoader(); }
-  public Oop       getProtectionDomain()    { return                protectionDomain.getValue(this); }
-  public ObjArray  getSigners()             { return (ObjArray)     signers.getValue(this); }
   public Symbol    getSourceFileName()      { return getSymbol(sourceFileName); }
   public String    getSourceDebugExtension(){ return                CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); }
   public long      getNonstaticFieldSize()  { return                nonstaticFieldSize.getValue(this); }
@@ -541,8 +535,6 @@
     // visitor.doOop(methods, true);
     // visitor.doOop(localInterfaces, true);
     // visitor.doOop(transitiveInterfaces, true);
-      visitor.doOop(protectionDomain, true);
-      visitor.doOop(signers, true);
       visitor.doCInt(nonstaticFieldSize, true);
       visitor.doCInt(staticFieldSize, true);
       visitor.doCInt(staticOopFieldCount, true);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java	Fri Jun 07 09:25:18 2013 -0700
@@ -204,13 +204,13 @@
                 Oop loader = ik.getClassLoader();
                 writeEdge(instance, loader, "loaded-by");
 
-                // write signers
-                Oop signers = ik.getSigners();
-                writeEdge(instance, signers, "signed-by");
+                // write signers NYI
+                // Oop signers = ik.getJavaMirror().getSigners();
+                writeEdge(instance, null, "signed-by");
 
-                // write protection domain
-                Oop protectionDomain = ik.getProtectionDomain();
-                writeEdge(instance, protectionDomain, "protection-domain");
+                // write protection domain NYI
+                // Oop protectionDomain = ik.getJavaMirror().getProtectionDomain();
+                writeEdge(instance, null, "protection-domain");
 
                 // write edges for static reference fields from this class
                 for (Iterator itr = refFields.iterator(); itr.hasNext();) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Fri Jun 07 09:25:18 2013 -0700
@@ -477,8 +477,8 @@
         if (k instanceof InstanceKlass) {
             InstanceKlass ik = (InstanceKlass) k;
             writeObjectID(ik.getClassLoader());
-            writeObjectID(ik.getSigners());
-            writeObjectID(ik.getProtectionDomain());
+            writeObjectID(null);  // ik.getJavaMirror().getSigners());
+            writeObjectID(null);  // ik.getJavaMirror().getProtectionDomain());
             // two reserved id fields
             writeObjectID(null);
             writeObjectID(null);
@@ -516,8 +516,8 @@
                 if (bottomKlass instanceof InstanceKlass) {
                     InstanceKlass ik = (InstanceKlass) bottomKlass;
                     writeObjectID(ik.getClassLoader());
-                    writeObjectID(ik.getSigners());
-                    writeObjectID(ik.getProtectionDomain());
+                    writeObjectID(null); // ik.getJavaMirror().getSigners());
+                    writeObjectID(null); // ik.getJavaMirror().getProtectionDomain());
                 } else {
                     writeObjectID(null);
                     writeObjectID(null);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaInstanceKlass.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaInstanceKlass.java	Fri Jun 07 09:25:18 2013 -0700
@@ -47,8 +47,6 @@
    private static final int FIELD_IS_SYNTHETIC       = 13;
    private static final int FIELD_IS_INTERFACE       = 14;
    private static final int FIELD_CLASS_LOADER       = 15;
-   private static final int FIELD_PROTECTION_DOMAIN  = 16;
-   private static final int FIELD_SIGNERS            = 17;
    private static final int FIELD_STATICS            = 18;
    private static final int FIELD_UNDEFINED          = -1;
 
@@ -100,10 +98,6 @@
          return Boolean.valueOf(ik.isInterface());
       case FIELD_CLASS_LOADER:
          return factory.newJSJavaObject(ik.getClassLoader());
-      case FIELD_PROTECTION_DOMAIN:
-         return factory.newJSJavaObject(ik.getProtectionDomain());
-      case FIELD_SIGNERS:
-         return factory.newJSJavaObject(ik.getSigners());
       case FIELD_STATICS:
          return getStatics();
       case FIELD_UNDEFINED:
@@ -246,8 +240,6 @@
       addField("isSynthetic", FIELD_IS_SYNTHETIC);
       addField("isInterface", FIELD_IS_INTERFACE);
       addField("classLoader", FIELD_CLASS_LOADER);
-      addField("protectionDomain", FIELD_PROTECTION_DOMAIN);
-      addField("signers", FIELD_SIGNERS);
       addField("statics", FIELD_STATICS);
    }
 
--- a/hotspot/make/bsd/makefiles/adlc.make	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/make/bsd/makefiles/adlc.make	Fri Jun 07 09:25:18 2013 -0700
@@ -69,7 +69,7 @@
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 # Compiler warnings are treated as errors
 ifneq ($(COMPILER_WARNINGS_FATAL),false)
-  CFLAGS_WARN = -Werror
+  CFLAGS_WARN = $(WARNINGS_ARE_ERRORS)
 endif
 CFLAGS += $(CFLAGS_WARN)
 
--- a/hotspot/make/bsd/makefiles/gcc.make	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/make/bsd/makefiles/gcc.make	Fri Jun 07 09:25:18 2013 -0700
@@ -71,6 +71,11 @@
       CC  = $(CC32)
     endif
 
+    ifeq ($(USE_CLANG), true)
+      CXX = clang++
+      CC  = clang
+    endif
+
     HOSTCXX = $(CXX)
     HOSTCC  = $(CC)
   endif
@@ -79,21 +84,79 @@
 endif
 
 
-# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
-# prints the numbers (e.g. "2.95", "3.2.1")
-CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
-CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
-
-# check for precompiled headers support
-ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
-# Allow the user to turn off precompiled headers from the command line.
-ifneq ($(USE_PRECOMPILED_HEADER),0)
-PRECOMPILED_HEADER_DIR=.
-PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
-PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
-endif
+ifeq ($(USE_CLANG), true)
+  CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
+  CC_VER_MINOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f2)
+else
+  # -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
+  # prints the numbers (e.g. "2.95", "3.2.1")
+  CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
+  CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
 endif
 
+ifeq ($(USE_CLANG), true)
+  # clang has precompiled headers support by default, but the user can switch
+  # it off by using 'USE_PRECOMPILED_HEADER=0'.
+  ifdef LP64
+    ifeq ($(USE_PRECOMPILED_HEADER),)
+      USE_PRECOMPILED_HEADER=1
+    endif
+  else
+    # We don't support precompiled headers on 32-bit builds because there some files are
+    # compiled with -fPIC while others are compiled without (see 'NONPIC_OBJ_FILES' rules.make)
+    # Clang produces an error if the PCH file was compiled with other options than the actual compilation unit.
+    USE_PRECOMPILED_HEADER=0
+  endif
+  
+  ifeq ($(USE_PRECOMPILED_HEADER),1)
+  
+    ifndef LP64
+      $(error " Precompiled Headers only supported on 64-bit platforms!")
+    endif
+  
+    PRECOMPILED_HEADER_DIR=.
+    PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
+    PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.pch
+  
+    PCH_FLAG = -include precompiled.hpp
+    PCH_FLAG/DEFAULT = $(PCH_FLAG)
+    PCH_FLAG/NO_PCH = -DNO_PCH
+    PCH_FLAG/BY_FILE = $(PCH_FLAG/$@)$(PCH_FLAG/DEFAULT$(PCH_FLAG/$@))
+  
+    VM_PCH_FLAG/LIBJVM = $(PCH_FLAG/BY_FILE)
+    VM_PCH_FLAG/AOUT =
+    VM_PCH_FLAG = $(VM_PCH_FLAG/$(LINK_INTO))
+  
+    # We only use precompiled headers for the JVM build
+    CFLAGS += $(VM_PCH_FLAG)
+  
+    # There are some files which don't like precompiled headers
+    # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
+    # But Clang doesn't support a precompiled header which was compiled with -O3
+    # to be used in a compilation unit which uses '-O0'. We could also prepare an
+    # extra '-O0' PCH file for the opt build and use it here, but it's probably
+    # not worth the effort as long as only two files need this special handling.
+    PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
+    PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
+    PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
+  
+  endif
+else # ($(USE_CLANG), true)
+  # check for precompiled headers support
+  ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
+    # Allow the user to turn off precompiled headers from the command line.
+    ifneq ($(USE_PRECOMPILED_HEADER),0)
+      PRECOMPILED_HEADER_DIR=.
+      PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
+      PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
+    endif
+  endif
+endif
+
+# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
+ifeq ($(USE_PRECOMPILED_HEADER),0)
+  CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
+endif
 
 #------------------------------------------------------------------------
 # Compiler flags
@@ -115,17 +178,31 @@
 CFLAGS += $(VM_PICFLAG)
 CFLAGS += -fno-rtti
 CFLAGS += -fno-exceptions
-CFLAGS += -pthread
-CFLAGS += -fcheck-new
-# version 4 and above support fvisibility=hidden (matches jni_x86.h file)
-# except 4.1.2 gives pointless warnings that can't be disabled (afaik)
-ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
-CFLAGS += -fvisibility=hidden
+ifeq ($(USE_CLANG),)
+  CFLAGS += -pthread
+  CFLAGS += -fcheck-new
+  # version 4 and above support fvisibility=hidden (matches jni_x86.h file)
+  # except 4.1.2 gives pointless warnings that can't be disabled (afaik)
+  ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+    CFLAGS += -fvisibility=hidden
+  endif
+else
+  CFLAGS += -fvisibility=hidden
+endif
+
+ifeq ($(USE_CLANG), true)
+  # Before Clang 3.1, we had to pass the stack alignment specification directly to llvm with the help of '-mllvm'
+  # Starting with version 3.1, Clang understands the '-mstack-alignment' (and rejects '-mllvm -stack-alignment')
+  ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 1 \) \))" "0"
+    STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mstack-alignment=16
+  else
+    STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mllvm -stack-alignment=16
+  endif
 endif
 
 ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
 ARCHFLAG/i486    = -m32 -march=i586
-ARCHFLAG/amd64   = -m64
+ARCHFLAG/amd64   = -m64 $(STACK_ALIGNMENT_OPT)
 ARCHFLAG/ia64    =
 ARCHFLAG/sparc   = -m32 -mcpu=v9
 ARCHFLAG/sparcv9 = -m64 -mcpu=v9
@@ -163,14 +240,25 @@
   WARNINGS_ARE_ERRORS = -Werror
 endif
 
-# Except for a few acceptable ones
-# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
-# conversions which might affect the values. To avoid that, we need to turn
-# it off explicitly. 
-ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+ifeq ($(USE_CLANG), true)
+  # However we need to clean the code up before we can unrestrictedly enable this option with Clang
+  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare
+# Not yet supported by clang in Xcode 4.6.2
+#  WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
+  WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
+  WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
+endif
+
 WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
-else
-WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
+
+ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+  # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
+  # conversions which might affect the values. Only enable it in earlier versions.
+  WARNING_FLAGS = -Wunused-function
+  ifeq ($(USE_CLANG),)
+    WARNINGS_FLAGS += -Wconversion
+  endif
 endif
 
 CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
@@ -214,14 +302,24 @@
 
 OPT_CFLAGS/NOOPT=-O0
 
-# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. 
-ifneq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) = 3 \) \))" "0"
-OPT_CFLAGS/mulnode.o += -O0
+# Work around some compiler bugs.
+ifeq ($(USE_CLANG), true)
+  ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
+    OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
+  endif
+else
+  # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
+  ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1)
+    OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT)
+  endif
 endif
 
 # Flags for generating make dependency flags.
-ifneq ("${CC_VER_MAJOR}", "2")
-DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
+DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
+ifeq ($(USE_CLANG),)
+  ifneq ($(CC_VER_MAJOR), 2)
+    DEPFLAGS += -fpch-deps
+  endif
 endif
 
 # -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
@@ -249,13 +347,15 @@
 # statically link libstdc++.so, work with gcc but ignored by g++
 STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic
 
-# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
-ifneq ("${CC_VER_MAJOR}", "2")
-STATIC_LIBGCC += -static-libgcc
-endif
+ifeq ($(USE_CLANG),)
+  # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
+  ifneq ("${CC_VER_MAJOR}", "2")
+    STATIC_LIBGCC += -static-libgcc
+  endif
 
-ifeq ($(BUILDARCH), ia64)
-LFLAGS += -Wl,-relax
+  ifeq ($(BUILDARCH), ia64)
+    LFLAGS += -Wl,-relax
+  endif
 endif
 
 # Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
@@ -296,25 +396,31 @@
 #------------------------------------------------------------------------
 # Debug flags
 
-# Use the stabs format for debugging information (this is the default
-# on gcc-2.91). It's good enough, has all the information about line
-# numbers and local variables, and libjvm.so is only about 16M.
-# Change this back to "-g" if you want the most expressive format.
-# (warning: that could easily inflate libjvm.so to 150M!)
-# Note: The Itanium gcc compiler crashes when using -gstabs.
-DEBUG_CFLAGS/ia64  = -g
-DEBUG_CFLAGS/amd64 = -g
-DEBUG_CFLAGS/arm   = -g
-DEBUG_CFLAGS/ppc   = -g
-DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
-ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
-DEBUG_CFLAGS += -gstabs
+ifeq ($(USE_CLANG), true)
+  # Restrict the debug information created by Clang to avoid
+  # too big object files and speed the build up a little bit
+  # (see http://llvm.org/bugs/show_bug.cgi?id=7554)
+  CFLAGS += -flimit-debug-info
 endif
 
-# DEBUG_BINARIES overrides everything, use full -g debug information
+# DEBUG_BINARIES uses full -g debug information for all configs
 ifeq ($(DEBUG_BINARIES), true)
-  DEBUG_CFLAGS = -g
-  CFLAGS += $(DEBUG_CFLAGS)
+  CFLAGS += -g
+else
+  # Use the stabs format for debugging information (this is the default
+  # on gcc-2.91). It's good enough, has all the information about line
+  # numbers and local variables, and libjvm.so is only about 16M.
+  # Change this back to "-g" if you want the most expressive format.
+  # (warning: that could easily inflate libjvm.so to 150M!)
+  # Note: The Itanium gcc compiler crashes when using -gstabs.
+  DEBUG_CFLAGS/ia64  = -g
+  DEBUG_CFLAGS/amd64 = -g
+  DEBUG_CFLAGS/arm   = -g
+  DEBUG_CFLAGS/ppc   = -g
+  DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
+  ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
+  DEBUG_CFLAGS += -gstabs
+  endif
 endif
 
 # If we are building HEADLESS, pass on to VM
--- a/hotspot/make/bsd/makefiles/vm.make	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/make/bsd/makefiles/vm.make	Fri Jun 07 09:25:18 2013 -0700
@@ -126,7 +126,11 @@
 LFLAGS += -Xlinker -z -Xlinker noexecstack
 endif
 
-LIBS += -lm -pthread
+LIBS += -lm
+
+ifeq ($(USE_CLANG),)
+  LIBS += -pthread
+endif
 
 # By default, link the *.o into the library, not the executable.
 LINK_INTO$(LINK_INTO) = LIBJVM
--- a/hotspot/make/excludeSrc.make	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/make/excludeSrc.make	Fri Jun 07 09:25:18 2013 -0700
@@ -25,7 +25,7 @@
       CXXFLAGS += -DINCLUDE_JVMTI=0
       CFLAGS += -DINCLUDE_JVMTI=0
 
-      Src_Files_EXCLUDE += jvmtiGetLoadedClasses.cpp forte.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \
+      Src_Files_EXCLUDE += jvmtiGetLoadedClasses.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \
 	jvmtiImpl.cpp jvmtiManageCapabilities.cpp jvmtiRawMonitor.cpp jvmtiUtil.cpp jvmtiTrace.cpp \
 	jvmtiCodeBlobEvents.cpp jvmtiEnv.cpp jvmtiRedefineClasses.cpp jvmtiEnvBase.cpp jvmtiEnvThreadState.cpp \
 	jvmtiTagMap.cpp jvmtiEventController.cpp evmCompat.cpp jvmtiEnter.xsl jvmtiExport.cpp \
@@ -87,7 +87,7 @@
 	g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
 	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
 	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
-	g1RemSet.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
+	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
 	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
 	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
 	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
--- a/hotspot/make/hotspot_version	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/make/hotspot_version	Fri Jun 07 09:25:18 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=34
+HS_BUILD_NUMBER=36
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/linux/makefiles/adlc.make	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/make/linux/makefiles/adlc.make	Fri Jun 07 09:25:18 2013 -0700
@@ -68,7 +68,7 @@
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 # Compiler warnings are treated as errors
-CFLAGS_WARN = -Werror
+CFLAGS_WARN = $(WARNINGS_ARE_ERRORS)
 CFLAGS += $(CFLAGS_WARN)
 
 OBJECTNAMES = \
--- a/hotspot/make/linux/makefiles/gcc.make	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/make/linux/makefiles/gcc.make	Fri Jun 07 09:25:18 2013 -0700
@@ -36,8 +36,14 @@
     HOSTCC  = gcc
     STRIP = $(ALT_COMPILER_PATH)/strip
   else
-    CXX = g++
-    CC  = gcc
+    ifeq ($(USE_CLANG), true)
+      CXX = clang++
+      CC  = clang
+    else
+      CXX = g++
+      CC  = gcc
+    endif
+
     HOSTCXX = $(CXX)
     HOSTCC  = $(CC)
     STRIP = strip
@@ -46,19 +52,79 @@
 endif
 
 
-# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
-# prints the numbers (e.g. "2.95", "3.2.1")
-CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
-CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
+ifeq ($(USE_CLANG), true)
+  CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
+  CC_VER_MINOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f2)
+else
+  # -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
+  # prints the numbers (e.g. "2.95", "3.2.1")
+  CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
+  CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
+endif
+
+
+ifeq ($(USE_CLANG), true)
+  # Clang has precompiled headers support by default, but the user can switch
+  # it off by using 'USE_PRECOMPILED_HEADER=0'.
+  ifdef LP64
+    ifeq ($(USE_PRECOMPILED_HEADER),)
+      USE_PRECOMPILED_HEADER=1
+    endif
+  else
+    # We don't support precompiled headers on 32-bit builds because there some files are
+    # compiled with -fPIC while others are compiled without (see 'NONPIC_OBJ_FILES' rules.make)
+    # Clang produces an error if the PCH file was compiled with other options than the actual compilation unit.
+    USE_PRECOMPILED_HEADER=0
+  endif
+
+  ifeq ($(USE_PRECOMPILED_HEADER),1)
+
+    ifndef LP64
+      $(error " Precompiled Headers only supported on 64-bit platforms!")
+    endif
+
+    PRECOMPILED_HEADER_DIR=.
+    PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
+    PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.pch
 
-# check for precompiled headers support
-ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
-# Allow the user to turn off precompiled headers from the command line.
-ifneq ($(USE_PRECOMPILED_HEADER),0)
-PRECOMPILED_HEADER_DIR=.
-PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
-PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
+    PCH_FLAG = -include precompiled.hpp
+    PCH_FLAG/DEFAULT = $(PCH_FLAG)
+    PCH_FLAG/NO_PCH = -DNO_PCH
+    PCH_FLAG/BY_FILE = $(PCH_FLAG/$@)$(PCH_FLAG/DEFAULT$(PCH_FLAG/$@))
+
+    VM_PCH_FLAG/LIBJVM = $(PCH_FLAG/BY_FILE)
+    VM_PCH_FLAG/AOUT =
+    VM_PCH_FLAG = $(VM_PCH_FLAG/$(LINK_INTO))
+
+    # We only use precompiled headers for the JVM build
+    CFLAGS += $(VM_PCH_FLAG)
+
+    # There are some files which don't like precompiled headers
+    # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
+    # But Clang doesn't support a precompiled header which was compiled with -O3
+    # to be used in a compilation unit which uses '-O0'. We could also prepare an
+    # extra '-O0' PCH file for the opt build and use it here, but it's probably
+    # not worth the effoert as long as only two files need this special handling.
+    PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
+    PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
+    PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
+
+  endif
+else # ($(USE_CLANG), true)
+  # check for precompiled headers support
+  ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
+    # Allow the user to turn off precompiled headers from the command line.
+    ifneq ($(USE_PRECOMPILED_HEADER),0)
+      PRECOMPILED_HEADER_DIR=.
+      PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
+      PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
+    endif
+  endif
 endif
+
+# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
+ifeq ($(USE_PRECOMPILED_HEADER),0)
+  CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
 endif
 
 
@@ -83,16 +149,30 @@
 CFLAGS += -fno-rtti
 CFLAGS += -fno-exceptions
 CFLAGS += -D_REENTRANT
-CFLAGS += -fcheck-new
-# version 4 and above support fvisibility=hidden (matches jni_x86.h file)
-# except 4.1.2 gives pointless warnings that can't be disabled (afaik)
-ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
-CFLAGS += -fvisibility=hidden
+ifeq ($(USE_CLANG),)
+  CFLAGS += -fcheck-new
+  # version 4 and above support fvisibility=hidden (matches jni_x86.h file)
+  # except 4.1.2 gives pointless warnings that can't be disabled (afaik)
+  ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+    CFLAGS += -fvisibility=hidden
+  endif
+else
+  CFLAGS += -fvisibility=hidden
+endif
+
+ifeq ($(USE_CLANG), true)
+  # Before Clang 3.1, we had to pass the stack alignment specification directly to llvm with the help of '-mllvm'
+  # Starting with version 3.1, Clang understands the '-mstack-alignment' (and rejects '-mllvm -stack-alignment')
+  ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 1 \) \))" "0"
+    STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mstack-alignment=16
+  else
+    STACK_ALIGNMENT_OPT = -mno-omit-leaf-frame-pointer -mllvm -stack-alignment=16
+  endif
 endif
 
 ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
 ARCHFLAG/i486    = -m32 -march=i586
-ARCHFLAG/amd64   = -m64
+ARCHFLAG/amd64   = -m64 $(STACK_ALIGNMENT_OPT)
 ARCHFLAG/ia64    =
 ARCHFLAG/sparc   = -m32 -mcpu=v9
 ARCHFLAG/sparcv9 = -m64 -mcpu=v9
@@ -126,12 +206,22 @@
 # Compiler warnings are treated as errors
 WARNINGS_ARE_ERRORS = -Werror
 
+ifeq ($(USE_CLANG), true)
+  # However we need to clean the code up before we can unrestrictedly enable this option with Clang
+  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare
+  WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
+  WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
+endif
+
 WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
 
-# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
-# conversions which might affect the values. Only enable it in earlier versions.
-ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
-WARNING_FLAGS += -Wconversion
+ifeq ($(USE_CLANG),)
+  # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
+  # conversions which might affect the values. Only enable it in earlier versions.
+  ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+    WARNING_FLAGS += -Wconversion
+  endif
 endif
 
 CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
@@ -165,19 +255,24 @@
 
 OPT_CFLAGS/NOOPT=-O0
 
-# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. 
-ifneq "$(shell expr \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) = 3 \) \))" "0"
-OPT_CFLAGS/mulnode.o += -O0
+# Work around some compiler bugs.
+ifeq ($(USE_CLANG), true)
+  ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
+    OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
+  endif
+else
+  # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
+  ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1)
+    OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT)
+  endif
 endif
 
 # Flags for generating make dependency flags.
-ifneq ("${CC_VER_MAJOR}", "2")
-DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
-endif
-
-# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
-ifeq ($(USE_PRECOMPILED_HEADER),0)
-CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
+DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
+ifeq ($(USE_CLANG),)
+  ifneq ("${CC_VER_MAJOR}", "2")
+    DEPFLAGS += -fpch-deps
+  endif
 endif
 
 #------------------------------------------------------------------------
@@ -186,24 +281,33 @@
 # statically link libstdc++.so, work with gcc but ignored by g++
 STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic
 
-# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
-ifneq ("${CC_VER_MAJOR}", "2")
-STATIC_LIBGCC += -static-libgcc
-endif
+ifeq ($(USE_CLANG),)
+  # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
+  ifneq ("${CC_VER_MAJOR}", "2")
+    STATIC_LIBGCC += -static-libgcc
+  endif
 
-ifeq ($(BUILDARCH), ia64)
-LFLAGS += -Wl,-relax
+  ifeq ($(BUILDARCH), ia64)
+    LFLAGS += -Wl,-relax
+  endif
 endif
 
 # Enable linker optimization
 LFLAGS += -Xlinker -O1
 
-# If this is a --hash-style=gnu system, use --hash-style=both
-#   The gnu .hash section won't work on some Linux systems like SuSE 10.
-_HAS_HASH_STYLE_GNU:=$(shell $(CC) -dumpspecs | grep -- '--hash-style=gnu')
-ifneq ($(_HAS_HASH_STYLE_GNU),)
+ifeq ($(USE_CLANG),)
+  # If this is a --hash-style=gnu system, use --hash-style=both
+  #   The gnu .hash section won't work on some Linux systems like SuSE 10.
+  _HAS_HASH_STYLE_GNU:=$(shell $(CC) -dumpspecs | grep -- '--hash-style=gnu')
+  ifneq ($(_HAS_HASH_STYLE_GNU),)
+    LDFLAGS_HASH_STYLE = -Wl,--hash-style=both
+  endif
+else
+  # Don't know how to find out the 'hash style' of a system as '-dumpspecs'
+  # doesn't work for Clang. So for now we'll alwys use --hash-style=both
   LDFLAGS_HASH_STYLE = -Wl,--hash-style=both
 endif
+
 LFLAGS += $(LDFLAGS_HASH_STYLE)
 
 # Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
@@ -221,6 +325,13 @@
 #------------------------------------------------------------------------
 # Debug flags
 
+ifeq ($(USE_CLANG), true)
+  # Restrict the debug information created by Clang to avoid
+  # too big object files and speed the build up a little bit
+  # (see http://llvm.org/bugs/show_bug.cgi?id=7554)
+  CFLAGS += -flimit-debug-info
+endif
+
 # DEBUG_BINARIES uses full -g debug information for all configs
 ifeq ($(DEBUG_BINARIES), true)
   CFLAGS += -g
@@ -237,7 +348,12 @@
   DEBUG_CFLAGS/ppc   = -g
   DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
-    DEBUG_CFLAGS += -gstabs
+      ifeq ($(USE_CLANG), true)
+        # Clang doesn't understand -gstabs
+        OPT_CFLAGS += -g
+      else
+        OPT_CFLAGS += -gstabs
+      endif
   endif
   
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
@@ -247,7 +363,12 @@
     FASTDEBUG_CFLAGS/ppc   = -g
     FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
     ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
-      FASTDEBUG_CFLAGS += -gstabs
+      ifeq ($(USE_CLANG), true)
+        # Clang doesn't understand -gstabs
+        OPT_CFLAGS += -g
+      else
+        OPT_CFLAGS += -gstabs
+      endif
     endif
   
     OPT_CFLAGS/ia64  = -g
@@ -256,7 +377,12 @@
     OPT_CFLAGS/ppc   = -g
     OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
     ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
-      OPT_CFLAGS += -gstabs
+      ifeq ($(USE_CLANG), true)
+        # Clang doesn't understand -gstabs
+        OPT_CFLAGS += -g
+      else
+        OPT_CFLAGS += -gstabs
+      endif
     endif
   endif
 endif
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1065,7 +1065,7 @@
   const int slop_factor = 2*wordSize;
 
   const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
-                         //6815692//Method::extra_stack_words() +  // extra push slots for MH adapters
+                         Method::extra_stack_entries() + // extra stack for jsr 292
                          frame::memory_parameter_word_sp_offset +  // register save area + param window
                          (native ?  frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
 
@@ -1221,9 +1221,7 @@
   // Full size expression stack
   __ ld_ptr(constMethod, O3);
   __ lduh(O3, in_bytes(ConstMethod::max_stack_offset()), O3);
-  guarantee(!EnableInvokeDynamic, "no support yet for java.lang.invoke.MethodHandle"); //6815692
-  //6815692//if (EnableInvokeDynamic)
-  //6815692//  __ inc(O3, Method::extra_stack_entries());
+  __ inc(O3, Method::extra_stack_entries());
   __ sll(O3, LogBytesPerWord, O3);
   __ sub(O2, O3, O3);
 //  __ sub(O3, wordSize, O3);                    // so prepush doesn't look out of bounds
@@ -2084,9 +2082,7 @@
 
   const int fixed_size = sizeof(BytecodeInterpreter)/wordSize +           // interpreter state object
                          frame::memory_parameter_word_sp_offset;   // register save area + param window
-  const int extra_stack = 0; //6815692//Method::extra_stack_entries();
   return (round_to(max_stack +
-                   extra_stack +
                    slop_factor +
                    fixed_size +
                    monitor_size +
@@ -2173,8 +2169,7 @@
   // Need +1 here because stack_base points to the word just above the first expr stack entry
   // and stack_limit is supposed to point to the word just below the last expr stack entry.
   // See generate_compute_interpreter_state.
-  int extra_stack = 0; //6815692//Method::extra_stack_entries();
-  to_fill->_stack_limit = stack_base - (method->max_stack() + 1 + extra_stack);
+  to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
   to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
 
   // sparc specific
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -521,7 +521,7 @@
   // Compute max expression stack+register save area
   ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size);
   lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size);  // Load max stack.
-  add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
+  add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size );
 
   //
   // now set up a stack frame with the size computed above
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -507,7 +507,7 @@
 
   const int extra_space =
     rounded_vm_local_words +                   // frame local scratch space
-    //6815692//Method::extra_stack_words() +       // extra push slots for MH adapters
+    Method::extra_stack_entries() +            // extra stack for jsr 292
     frame::memory_parameter_word_sp_offset +   // register save area
     (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
 
@@ -1558,7 +1558,6 @@
        round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
   const int max_stack_words = max_stack * Interpreter::stackElementWords;
   return (round_to((max_stack_words
-                   //6815692//+ Method::extra_stack_words()
                    + rounded_vm_local_words
                    + frame::memory_parameter_word_sp_offset), WordsPerLong)
                    // already rounded
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -539,12 +539,11 @@
 
     // compute full expression stack limit
 
-    const int extra_stack = 0; //6815692//Method::extra_stack_words();
     __ movptr(rdx, Address(rbx, Method::const_offset()));
     __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
     __ negptr(rdx);                                                       // so we can subtract in next step
     // Allocate expression stack
-    __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack));
+    __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -Method::extra_stack_words()));
     __ movptr(STATE(_stack_limit), rsp);
   }
 
@@ -692,10 +691,9 @@
   // Always give one monitor to allow us to start interp if sync method.
   // Any additional monitors need a check when moving the expression stack
   const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
-  const int extra_stack = 0; //6815692//Method::extra_stack_entries();
   __ movptr(rax, Address(rbx, Method::const_offset()));
   __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
-  __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor));
+  __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor+Method::extra_stack_words()));
   __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
 
 #ifdef ASSERT
@@ -2265,8 +2263,7 @@
   const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
     ( frame::sender_sp_offset - frame::link_offset) + 2;
 
-  const int extra_stack = 0; //6815692//Method::extra_stack_entries();
-  const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
+  const int method_stack = (method->max_locals() + method->max_stack()) *
                            Interpreter::stackElementWords;
   return overhead_size + method_stack + stub_code;
 }
@@ -2331,8 +2328,7 @@
   // Need +1 here because stack_base points to the word just above the first expr stack entry
   // and stack_limit is supposed to point to the word just below the last expr stack entry.
   // See generate_compute_interpreter_state.
-  int extra_stack = 0; //6815692//Method::extra_stack_entries();
-  to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1);
+  to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
   to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
 
   to_fill->_self_link = to_fill;
@@ -2380,8 +2376,7 @@
                                                 monitor_size);
 
   // Now with full size expression stack
-  int extra_stack = 0; //6815692//Method::extra_stack_entries();
-  int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord;
+  int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord;
 
   // and now with only live portion of the expression stack
   short_frame_size = short_frame_size + tempcount * BytesPerWord;
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1565,8 +1565,7 @@
   // be sure to change this if you add/subtract anything to/from the overhead area
   const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
 
-  const int extra_stack = Method::extra_stack_entries();
-  const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
+  const int method_stack = (method->max_locals() + method->max_stack()) *
                            Interpreter::stackElementWords;
   return overhead_size + method_stack + stub_code;
 }
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1574,8 +1574,7 @@
     -(frame::interpreter_frame_initial_sp_offset) + entry_size;
 
   const int stub_code = frame::entry_frame_after_call_words;
-  const int extra_stack = Method::extra_stack_entries();
-  const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
+  const int method_stack = (method->max_locals() + method->max_stack()) *
                            Interpreter::stackElementWords;
   return (overhead_size + method_stack + stub_code);
 }
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -626,8 +626,6 @@
 //////////////////////////////////////////////////////////////////////////////
 // create new thread
 
-static address highest_vm_reserved_address();
-
 // check if it's safe to start a new thread
 static bool _thread_safety_check(Thread* thread) {
   return true;
@@ -935,10 +933,10 @@
   return (1000 * 1000);
 }
 
-// XXX: For now, code this as if BSD does not support vtime.
-bool os::supports_vtime() { return false; }
+bool os::supports_vtime() { return true; }
 bool os::enable_vtime()   { return false; }
 bool os::vtime_enabled()  { return false; }
+
 double os::elapsedVTime() {
   // better than nothing, but not much
   return elapsedTime();
@@ -2112,10 +2110,6 @@
   return anon_munmap(addr, size);
 }
 
-static address highest_vm_reserved_address() {
-  return _highest_vm_reserved_address;
-}
-
 static bool bsd_mprotect(char* addr, size_t size, int prot) {
   // Bsd wants the mprotect address argument to be page aligned.
   char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
@@ -2159,43 +2153,6 @@
   return false;
 }
 
-/*
-* Set the coredump_filter bits to include largepages in core dump (bit 6)
-*
-* From the coredump_filter documentation:
-*
-* - (bit 0) anonymous private memory
-* - (bit 1) anonymous shared memory
-* - (bit 2) file-backed private memory
-* - (bit 3) file-backed shared memory
-* - (bit 4) ELF header pages in file-backed private memory areas (it is
-*           effective only if the bit 2 is cleared)
-* - (bit 5) hugetlb private memory
-* - (bit 6) hugetlb shared memory
-*/
-static void set_coredump_filter(void) {
-  FILE *f;
-  long cdm;
-
-  if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
-    return;
-  }
-
-  if (fscanf(f, "%lx", &cdm) != 1) {
-    fclose(f);
-    return;
-  }
-
-  rewind(f);
-
-  if ((cdm & LARGEPAGES_BIT) == 0) {
-    cdm |= LARGEPAGES_BIT;
-    fprintf(f, "%#lx", cdm);
-  }
-
-  fclose(f);
-}
-
 // Large page support
 
 static size_t _large_page_size = 0;
@@ -3030,6 +2987,19 @@
     sigAct.sa_sigaction = signalHandler;
     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
   }
+#if __APPLE__
+  // Needed for main thread as XNU (Mac OS X kernel) will only deliver SIGSEGV
+  // (which starts as SIGBUS) on main thread with faulting address inside "stack+guard pages"
+  // if the signal handler declares it will handle it on alternate stack.
+  // Notice we only declare we will handle it on alt stack, but we are not
+  // actually going to use real alt stack - this is just a workaround.
+  // Please see ux_exception.c, method catch_mach_exception_raise for details
+  // link http://www.opensource.apple.com/source/xnu/xnu-2050.18.24/bsd/uxkern/ux_exception.c
+  if (sig == SIGSEGV) {
+    sigAct.sa_flags |= SA_ONSTACK;
+  }
+#endif
+
   // Save flags, which are set by ours
   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
   sigflags[sig] = sigAct.sa_flags;
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -101,6 +101,12 @@
 # include <inttypes.h>
 # include <sys/ioctl.h>
 
+// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
+// getrusage() is prepared to handle the associated failure.
+#ifndef RUSAGE_THREAD
+#define RUSAGE_THREAD   (1)               /* only the calling thread */
+#endif
+
 #define MAX_PATH    (2 * K)
 
 // for timer info max values which include all bits
@@ -1336,15 +1342,19 @@
   return (1000 * 1000);
 }
 
-// For now, we say that linux does not support vtime.  I have no idea
-// whether it can actually be made to (DLD, 9/13/05).
-
-bool os::supports_vtime() { return false; }
+bool os::supports_vtime() { return true; }
 bool os::enable_vtime()   { return false; }
 bool os::vtime_enabled()  { return false; }
+
 double os::elapsedVTime() {
-  // better than nothing, but not much
-  return elapsedTime();
+  struct rusage usage;
+  int retval = getrusage(RUSAGE_THREAD, &usage);
+  if (retval == 0) {
+    return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
+  } else {
+    // better than nothing, but not much
+    return elapsedTime();
+  }
 }
 
 jlong os::javaTimeMillis() {
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -813,15 +813,21 @@
   return result;
 }
 
-// For now, we say that Windows does not support vtime.  I have no idea
-// whether it can actually be made to (DLD, 9/13/05).
-
-bool os::supports_vtime() { return false; }
+bool os::supports_vtime() { return true; }
 bool os::enable_vtime() { return false; }
 bool os::vtime_enabled() { return false; }
+
 double os::elapsedVTime() {
-  // better than nothing, but not much
-  return elapsedTime();
+  FILETIME created;
+  FILETIME exited;
+  FILETIME kernel;
+  FILETIME user;
+  if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
+    // the resolution of windows_to_java_time() should be sufficient (ms)
+    return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
+  } else {
+    return elapsedTime();
+  }
 }
 
 jlong os::javaTimeMillis() {
@@ -944,6 +950,8 @@
   MINIDUMP_TYPE dumpType;
   static const char* cwd;
 
+// Default is to always create dump for debug builds, on product builds only dump on server versions of Windows.
+#ifndef ASSERT
   // If running on a client version of Windows and user has not explicitly enabled dumping
   if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
     VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
@@ -953,6 +961,12 @@
     VMError::report_coredump_status("Minidump has been disabled from the command line", false);
     return;
   }
+#else
+  if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
+    VMError::report_coredump_status("Minidump has been disabled from the command line", false);
+    return;
+  }
+#endif
 
   dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
 
@@ -1004,7 +1018,21 @@
   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
   if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
       _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
-    VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false);
+        DWORD error = GetLastError();
+        LPTSTR msgbuf = NULL;
+
+        if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+                      FORMAT_MESSAGE_FROM_SYSTEM |
+                      FORMAT_MESSAGE_IGNORE_INSERTS,
+                      NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
+
+          jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
+          LocalFree(msgbuf);
+        } else {
+          // Call to FormatMessage failed, just include the result from GetLastError
+          jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
+        }
+        VMError::report_coredump_status(buffer, false);
   } else {
     VMError::report_coredump_status(buffer, true);
   }
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s	Fri Jun 07 09:25:18 2013 -0700
@@ -241,7 +241,7 @@
         jbe      2f                   # <= 32 dwords
         rep;     smovl
         jmp      4f
-	.=.+8
+	.space 8
 2:      subl     %esi,%edi
         .p2align 4,,15
 3:      movl     (%esi),%edx
@@ -378,7 +378,7 @@
         rep;     smovl
         jmp      4f 
         # copy aligned dwords
-        .=.+5
+        .space 5
 2:      subl     %esi,%edi 
         .p2align 4,,15
 3:      movl     (%esi),%edx
@@ -454,7 +454,7 @@
         popl     %edi
         popl     %esi
         ret
-        .=.+10
+        .space 10
 2:      subl     %esi,%edi
         jmp      4f
         .p2align 4,,15
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -93,6 +93,10 @@
   register void *esp;
   __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
   return (address) ((char*)esp + sizeof(long)*2);
+#elif defined(__clang__)
+  intptr_t* esp;
+  __asm__ __volatile__ ("mov %%"SPELL_REG_SP", %0":"=r"(esp):);
+  return (address) esp;
 #else
   register void *esp __asm__ (SPELL_REG_SP);
   return (address) esp;
@@ -175,6 +179,9 @@
 #ifdef SPARC_WORKS
   register intptr_t **ebp;
   __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
+#elif defined(__clang__)
+  intptr_t **ebp;
+  __asm__ __volatile__ ("mov %%"SPELL_REG_FP", %0":"=r"(ebp):);
 #else
   register intptr_t **ebp __asm__ (SPELL_REG_FP);
 #endif
--- a/hotspot/src/share/vm/adlc/archDesc.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -29,8 +29,8 @@
 static FILE *errfile = stderr;
 
 //--------------------------- utility functions -----------------------------
-inline char  toUpper(char lower) {
-  return (('a' <= lower && lower <= 'z') ? (lower + ('A'-'a')) : lower);
+inline char toUpper(char lower) {
+  return (('a' <= lower && lower <= 'z') ? ((char) (lower + ('A'-'a'))) : lower);
 }
 char *toUpper(const char *str) {
   char *upper  = new char[strlen(str)+1];
--- a/hotspot/src/share/vm/adlc/dict2.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/adlc/dict2.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -64,18 +64,18 @@
   int i;
 
   // Precompute table of null character hashes
-  if( !initflag ) {             // Not initializated yet?
-    xsum[0] = (1<<shft[0])+1;   // Initialize
+  if (!initflag) {              // Not initializated yet?
+    xsum[0] = (short) ((1 << shft[0]) + 1);  // Initialize
     for( i = 1; i < MAXID; i++) {
-      xsum[i] = (1<<shft[i])+1+xsum[i-1];
+      xsum[i] = (short) ((1 << shft[i]) + 1 + xsum[i-1]);
     }
     initflag = 1;               // Never again
   }
 
   _size = 16;                   // Size is a power of 2
   _cnt = 0;                     // Dictionary is empty
-  _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket)*_size);
-  memset(_bin,0,sizeof(bucket)*_size);
+  _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket) * _size);
+  memset(_bin, 0, sizeof(bucket) * _size);
 }
 
 //------------------------------~Dict------------------------------------------
@@ -287,11 +287,11 @@
   register int sum = 0;
   register const char *s = (const char *)t;
 
-  while( ((c = s[k]) != '\0') && (k < MAXID-1) ) { // Get characters till nul
-    c = (c<<1)+1;               // Characters are always odd!
-    sum += c + (c<<shft[k++]);  // Universal hash function
+  while (((c = s[k]) != '\0') && (k < MAXID-1)) { // Get characters till nul
+    c = (char) ((c << 1) + 1);    // Characters are always odd!
+    sum += c + (c << shft[k++]);  // Universal hash function
   }
-  assert( k < (MAXID), "Exceeded maximum name length");
+  assert(k < (MAXID), "Exceeded maximum name length");
   return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size
 }
 
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -796,11 +796,11 @@
   return num_opnds;
 }
 
-const char *InstructForm::opnd_ident(int idx) {
+const char* InstructForm::opnd_ident(int idx) {
   return _components.at(idx)->_name;
 }
 
-const char *InstructForm::unique_opnd_ident(int idx) {
+const char* InstructForm::unique_opnd_ident(uint idx) {
   uint i;
   for (i = 1; i < num_opnds(); ++i) {
     if (unique_opnds_idx(i) == idx) {
@@ -1315,36 +1315,36 @@
 // Seach through operands to determine parameters unique positions.
 void InstructForm::set_unique_opnds() {
   uint* uniq_idx = NULL;
-  int  nopnds = num_opnds();
+  uint  nopnds = num_opnds();
   uint  num_uniq = nopnds;
-  int i;
+  uint i;
   _uniq_idx_length = 0;
-  if ( nopnds > 0 ) {
+  if (nopnds > 0) {
     // Allocate index array.  Worst case we're mapping from each
     // component back to an index and any DEF always goes at 0 so the
     // length of the array has to be the number of components + 1.
     _uniq_idx_length = _components.count() + 1;
-    uniq_idx = (uint*) malloc(sizeof(uint)*(_uniq_idx_length));
-    for( i = 0; i < _uniq_idx_length; i++ ) {
+    uniq_idx = (uint*) malloc(sizeof(uint) * _uniq_idx_length);
+    for (i = 0; i < _uniq_idx_length; i++) {
       uniq_idx[i] = i;
     }
   }
   // Do it only if there is a match rule and no expand rule.  With an
   // expand rule it is done by creating new mach node in Expand()
   // method.
-  if ( nopnds > 0 && _matrule != NULL && _exprule == NULL ) {
+  if (nopnds > 0 && _matrule != NULL && _exprule == NULL) {
     const char *name;
     uint count;
     bool has_dupl_use = false;
 
     _parameters.reset();
-    while( (name = _parameters.iter()) != NULL ) {
+    while ((name = _parameters.iter()) != NULL) {
       count = 0;
-      int position = 0;
-      int uniq_position = 0;
+      uint position = 0;
+      uint uniq_position = 0;
       _components.reset();
       Component *comp = NULL;
-      if( sets_result() ) {
+      if (sets_result()) {
         comp = _components.iter();
         position++;
       }
@@ -1352,11 +1352,11 @@
       for (; (comp = _components.iter()) != NULL; ++position) {
         // When the first component is not a DEF,
         // leave space for the result operand!
-        if ( position==0 && (! comp->isa(Component::DEF)) ) {
+        if (position==0 && (!comp->isa(Component::DEF))) {
           ++position;
         }
-        if( strcmp(name, comp->_name)==0 ) {
-          if( ++count > 1 ) {
+        if (strcmp(name, comp->_name) == 0) {
+          if (++count > 1) {
             assert(position < _uniq_idx_length, "out of bounds");
             uniq_idx[position] = uniq_position;
             has_dupl_use = true;
@@ -1364,22 +1364,25 @@
             uniq_position = position;
           }
         }
-        if( comp->isa(Component::DEF)
-            && comp->isa(Component::USE) ) {
+        if (comp->isa(Component::DEF) && comp->isa(Component::USE)) {
           ++position;
-          if( position != 1 )
+          if (position != 1)
             --position;   // only use two slots for the 1st USE_DEF
         }
       }
     }
-    if( has_dupl_use ) {
-      for( i = 1; i < nopnds; i++ )
-        if( i != uniq_idx[i] )
+    if (has_dupl_use) {
+      for (i = 1; i < nopnds; i++) {
+        if (i != uniq_idx[i]) {
           break;
-      int  j = i;
-      for( ; i < nopnds; i++ )
-        if( i == uniq_idx[i] )
+        }
+      }
+      uint j = i;
+      for (; i < nopnds; i++) {
+        if (i == uniq_idx[i]) {
           uniq_idx[i] = j++;
+        }
+      }
       num_uniq = j;
     }
   }
@@ -2216,21 +2219,27 @@
 
 
 bool OperandForm::is_bound_register() const {
-  RegClass *reg_class  = get_RegClass();
-  if (reg_class == NULL) return false;
-
-  const char * name = ideal_type(globalAD->globalNames());
-  if (name == NULL) return false;
-
-  int size = 0;
-  if (strcmp(name,"RegFlags")==0) size =  1;
-  if (strcmp(name,"RegI")==0) size =  1;
-  if (strcmp(name,"RegF")==0) size =  1;
-  if (strcmp(name,"RegD")==0) size =  2;
-  if (strcmp(name,"RegL")==0) size =  2;
-  if (strcmp(name,"RegN")==0) size =  1;
-  if (strcmp(name,"RegP")==0) size =  globalAD->get_preproc_def("_LP64") ? 2 : 1;
-  if (size == 0) return false;
+  RegClass* reg_class = get_RegClass();
+  if (reg_class == NULL) {
+    return false;
+  }
+
+  const char* name = ideal_type(globalAD->globalNames());
+  if (name == NULL) {
+    return false;
+  }
+
+  uint size = 0;
+  if (strcmp(name, "RegFlags") == 0) size = 1;
+  if (strcmp(name, "RegI") == 0) size = 1;
+  if (strcmp(name, "RegF") == 0) size = 1;
+  if (strcmp(name, "RegD") == 0) size = 2;
+  if (strcmp(name, "RegL") == 0) size = 2;
+  if (strcmp(name, "RegN") == 0) size = 1;
+  if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
+  if (size == 0) {
+    return false;
+  }
   return size == reg_class->size();
 }
 
--- a/hotspot/src/share/vm/adlc/formssel.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -106,7 +106,7 @@
   const char    *_ins_pipe;        // Instruction Scheduling description class
 
   uint          *_uniq_idx;        // Indexes of unique operands
-  int            _uniq_idx_length; // Length of _uniq_idx array
+  uint           _uniq_idx_length; // Length of _uniq_idx array
   uint           _num_uniq;        // Number  of unique operands
   ComponentList  _components;      // List of Components matches MachNode's
                                    // operand structure
@@ -272,14 +272,14 @@
   void                set_unique_opnds();
   uint                num_unique_opnds() { return _num_uniq; }
   uint                unique_opnds_idx(int idx) {
-                        if( _uniq_idx != NULL && idx > 0 ) {
-                          assert(idx < _uniq_idx_length, "out of bounds");
-                          return _uniq_idx[idx];
-                        } else {
-                          return idx;
-                        }
+    if (_uniq_idx != NULL && idx > 0) {
+      assert((uint)idx < _uniq_idx_length, "out of bounds");
+      return _uniq_idx[idx];
+    } else {
+      return idx;
+    }
   }
-  const char         *unique_opnd_ident(int idx);  // Name of operand at unique idx.
+  const char         *unique_opnd_ident(uint idx);  // Name of operand at unique idx.
 
   // Operands which are only KILLs aren't part of the input array and
   // require special handling in some cases.  Their position in this
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -463,8 +463,9 @@
   uint resources_used_exclusively = 0;
 
   for (pipeclass->_resUsage.reset();
-       (piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; )
+       (piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) {
     element_count++;
+  }
 
   // Pre-compute the string length
   int templen;
@@ -482,8 +483,8 @@
   for (i = rescount; i > 0; i /= 10)
     maskdigit++;
 
-  static const char * pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask";
-  static const char * pipeline_use_element    = "Pipeline_Use_Element";
+  static const char* pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask";
+  static const char* pipeline_use_element    = "Pipeline_Use_Element";
 
   templen = 1 +
     (int)(strlen(pipeline_use_cycle_mask) + (int)strlen(pipeline_use_element) +
@@ -496,11 +497,12 @@
   templen = 0;
 
   for (pipeclass->_resUsage.reset();
-       (piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; ) {
+       (piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) {
     int used_mask = pipeline->_resdict[piperesource->_resource]->is_resource()->mask();
 
-    if (!used_mask)
+    if (!used_mask) {
       fprintf(stderr, "*** used_mask is 0 ***\n");
+    }
 
     resources_used |= used_mask;
 
@@ -509,8 +511,9 @@
     for (lb =  0; (used_mask & (1 << lb)) == 0; lb++);
     for (ub = 31; (used_mask & (1 << ub)) == 0; ub--);
 
-    if (lb == ub)
+    if (lb == ub) {
       resources_used_exclusively |= used_mask;
+    }
 
     int formatlen =
       sprintf(&resource_mask[templen], "  %s(0x%0*x, %*d, %*d, %s %s(",
@@ -526,7 +529,7 @@
 
     int cycles = piperesource->_cycles;
     uint stage          = pipeline->_stages.index(piperesource->_stage);
-    if (NameList::Not_in_list == stage) {
+    if ((uint)NameList::Not_in_list == stage) {
       fprintf(stderr,
               "pipeline_res_mask_initializer: "
               "semantic error: "
@@ -534,8 +537,8 @@
               piperesource->_stage);
       exit(1);
     }
-    uint upper_limit    = stage+cycles-1;
-    uint lower_limit    = stage-1;
+    uint upper_limit    = stage + cycles - 1;
+    uint lower_limit    = stage - 1;
     uint upper_idx      = upper_limit >> 5;
     uint lower_idx      = lower_limit >> 5;
     uint upper_position = upper_limit & 0x1f;
@@ -543,7 +546,7 @@
 
     uint mask = (((uint)1) << upper_position) - 1;
 
-    while ( upper_idx > lower_idx ) {
+    while (upper_idx > lower_idx) {
       res_mask[upper_idx--] |= mask;
       mask = (uint)-1;
     }
@@ -565,8 +568,9 @@
   }
 
   resource_mask[templen] = 0;
-  if (last_comma)
+  if (last_comma) {
     last_comma[0] = ' ';
+  }
 
   // See if the same string is in the table
   int ndx = pipeline_res_mask.index(resource_mask);
@@ -580,7 +584,7 @@
       fprintf(fp_cpp, "static const Pipeline_Use_Element pipeline_res_mask_%03d[%d] = {\n%s};\n\n",
         ndx+1, element_count, resource_mask);
 
-    char * args = new char [9 + 2*masklen + maskdigit];
+    char* args = new char [9 + 2*masklen + maskdigit];
 
     sprintf(args, "0x%0*x, 0x%0*x, %*d",
       masklen, resources_used,
@@ -589,8 +593,9 @@
 
     pipeline_res_args.addName(args);
   }
-  else
+  else {
     delete [] resource_mask;
+  }
 
   delete [] res_mask;
 //delete [] res_masks;
@@ -1787,7 +1792,7 @@
       // Skip first unique operands.
       for( i = 1; i < cur_num_opnds; i++ ) {
         comp = node->_components.iter();
-        if( (int)i != node->unique_opnds_idx(i) ) {
+        if (i != node->unique_opnds_idx(i)) {
           break;
         }
         new_num_opnds++;
@@ -1795,7 +1800,7 @@
       // Replace not unique operands with next unique operands.
       for( ; i < cur_num_opnds; i++ ) {
         comp = node->_components.iter();
-        int j = node->unique_opnds_idx(i);
+        uint j = node->unique_opnds_idx(i);
         // unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
         if( j != node->unique_opnds_idx(j) ) {
           fprintf(fp,"  set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -2232,6 +2232,7 @@
       // We still need to continue with the checks.
       if (src.is_constant()) {
         ciObject* src_con = src.get_jobject_constant();
+        guarantee(src_con != NULL, "no source constant");
 
         if (src_con->is_null_object()) {
           // The constant src object is null - We can skip
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -444,8 +444,8 @@
             break;
           case JVM_REF_invokeStatic:
           case JVM_REF_invokeSpecial:
-            check_property(
-               tag.is_method() || tag.is_interface_method(),
+            check_property(tag.is_method() ||
+                           ((_major_version >= JAVA_8_VERSION) && tag.is_interface_method()),
                "Invalid constant pool index %u in class file %s (not a method)",
                ref_index, CHECK_(nullHandle));
              break;
@@ -3152,7 +3152,6 @@
       }
     }
   }
-  int contended_count = nonstatic_contended_count;
 
 
   // Calculate the starting byte offsets
@@ -3177,35 +3176,52 @@
 
   next_nonstatic_field_offset = nonstatic_fields_start;
 
+  bool is_contended_class     = parsed_annotations->is_contended();
+
   // Class is contended, pad before all the fields
-  if (parsed_annotations->is_contended()) {
+  if (is_contended_class) {
     next_nonstatic_field_offset += ContendedPaddingWidth;
   }
 
-  // Compute the non-contended fields count
+  // Compute the non-contended fields count.
+  // The packing code below relies on these counts to determine if some field
+  // can be squeezed into the alignment gap. Contended fields are obviously
+  // exempt from that.
   unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
   unsigned int nonstatic_word_count   = fac->count[NONSTATIC_WORD]   - fac_contended.count[NONSTATIC_WORD];
   unsigned int nonstatic_short_count  = fac->count[NONSTATIC_SHORT]  - fac_contended.count[NONSTATIC_SHORT];
   unsigned int nonstatic_byte_count   = fac->count[NONSTATIC_BYTE]   - fac_contended.count[NONSTATIC_BYTE];
   unsigned int nonstatic_oop_count    = fac->count[NONSTATIC_OOP]    - fac_contended.count[NONSTATIC_OOP];
 
+  // Total non-static fields count, including every contended field
+  unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] +
+                                        fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] +
+                                        fac->count[NONSTATIC_OOP];
+
   bool super_has_nonstatic_fields =
           (_super_klass() != NULL && _super_klass->has_nonstatic_fields());
-  bool has_nonstatic_fields = super_has_nonstatic_fields ||
-          ((nonstatic_double_count + nonstatic_word_count +
-            nonstatic_short_count + nonstatic_byte_count +
-            nonstatic_oop_count) != 0);
+  bool has_nonstatic_fields = super_has_nonstatic_fields || (nonstatic_fields_count != 0);
 
 
   // Prepare list of oops for oop map generation.
+  //
+  // "offset" and "count" lists are describing the set of contiguous oop
+  // regions. offset[i] is the start of the i-th region, which then has
+  // count[i] oops following. Before we know how many regions are required,
+  // we pessimistically allocate the maps to fit all the oops into the
+  // distinct regions.
+  //
+  // TODO: We add +1 to always allocate non-zero resource arrays; we need
+  // to figure out if we still need to do this.
   int* nonstatic_oop_offsets;
   unsigned int* nonstatic_oop_counts;
   unsigned int nonstatic_oop_map_count = 0;
+  unsigned int max_nonstatic_oop_maps  = fac->count[NONSTATIC_OOP] + 1;
 
   nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
-            THREAD, int, nonstatic_oop_count + 1);
+            THREAD, int, max_nonstatic_oop_maps);
   nonstatic_oop_counts  = NEW_RESOURCE_ARRAY_IN_THREAD(
-            THREAD, unsigned int, nonstatic_oop_count + 1);
+            THREAD, unsigned int, max_nonstatic_oop_maps);
 
   first_nonstatic_oop_offset = 0; // will be set for first oop field
 
@@ -3392,9 +3408,11 @@
             int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
             heapOopSize ) {
           // Extend current oop map
+          assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
           nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
         } else {
           // Create new oop map
+          assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
           nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
           nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
           nonstatic_oop_map_count += 1;
@@ -3452,12 +3470,10 @@
   //
   // Additionally, this should not break alignment for the fields, so we round the alignment up
   // for each field.
-  if (contended_count > 0) {
+  if (nonstatic_contended_count > 0) {
 
     // if there is at least one contended field, we need to have pre-padding for them
-    if (nonstatic_contended_count > 0) {
-      next_nonstatic_padded_offset += ContendedPaddingWidth;
-    }
+    next_nonstatic_padded_offset += ContendedPaddingWidth;
 
     // collect all contended groups
     BitMap bm(_cp->size());
@@ -3518,6 +3534,7 @@
             next_nonstatic_padded_offset += heapOopSize;
 
             // Create new oop map
+            assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
             nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
             nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
             nonstatic_oop_map_count += 1;
@@ -3554,18 +3571,17 @@
     // handle static fields
   }
 
-  // Size of instances
-  int notaligned_offset = next_nonstatic_padded_offset;
-
   // Entire class is contended, pad in the back.
   // This helps to alleviate memory contention effects for subclass fields
   // and/or adjacent object.
-  if (parsed_annotations->is_contended()) {
-    notaligned_offset += ContendedPaddingWidth;
+  if (is_contended_class) {
+    next_nonstatic_padded_offset += ContendedPaddingWidth;
   }
 
-  int nonstatic_fields_end      = align_size_up(notaligned_offset, heapOopSize);
-  int instance_end              = align_size_up(notaligned_offset, wordSize);
+  int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
+
+  int nonstatic_fields_end      = align_size_up(notaligned_nonstatic_fields_end, heapOopSize);
+  int instance_end              = align_size_up(notaligned_nonstatic_fields_end, wordSize);
   int static_fields_end         = align_size_up(next_static_byte_offset, wordSize);
 
   int static_field_size         = (static_fields_end -
@@ -3579,6 +3595,14 @@
          (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
           wordSize) / wordSize), "consistent layout helper value");
 
+  // Invariant: nonstatic_field end/start should only change if there are
+  // nonstatic fields in the class, or if the class is contended. We compare
+  // against the non-aligned value, so that end alignment will not fail the
+  // assert without actually having the fields.
+  assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) ||
+         is_contended_class ||
+         (nonstatic_fields_count > 0), "double-check nonstatic start/end");
+
   // Number of non-static oop map blocks allocated at end of klass.
   const unsigned int total_oop_map_count =
     compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
@@ -4040,6 +4064,9 @@
       }
     }
 
+    // Allocate mirror and initialize static fields
+    java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
+
 
 #ifdef ASSERT
     if (ParseAllGenericSignatures) {
@@ -4055,17 +4082,6 @@
           this_klass(), &all_mirandas, CHECK_(nullHandle));
     }
 
-    // Allocate mirror and initialize static fields
-    java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
-
-    // Allocate a simple java object for locking during class initialization.
-    // This needs to be a java object because it can be held across a java call.
-    typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
-    this_klass->set_init_lock(r);
-
-    // TODO: Move these oops to the mirror
-    this_klass->set_protection_domain(protection_domain());
-
     // Update the loader_data graph.
     record_defined_class_dependencies(this_klass, CHECK_NULL);
 
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1349,6 +1349,7 @@
 
   // Replace klass methods with new merged lists
   klass->set_methods(merged_methods);
+  klass->set_initial_method_idnum(new_size);
 
   ClassLoaderData* cld = klass->class_loader_data();
   MetadataFactory::free_array(cld, original_methods);
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -512,22 +512,22 @@
 
   // If the offset was read from the shared archive, it was fixed up already
   if (!k->is_shared()) {
-  if (k->oop_is_instance()) {
-    // During bootstrap, java.lang.Class wasn't loaded so static field
-    // offsets were computed without the size added it.  Go back and
-    // update all the static field offsets to included the size.
-      for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) {
-      if (fs.access_flags().is_static()) {
-        int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
-        fs.set_offset(real_offset);
+    if (k->oop_is_instance()) {
+      // During bootstrap, java.lang.Class wasn't loaded so static field
+      // offsets were computed without the size added it.  Go back and
+      // update all the static field offsets to included the size.
+        for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) {
+        if (fs.access_flags().is_static()) {
+          int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
+          fs.set_offset(real_offset);
+        }
       }
     }
   }
-  }
-  create_mirror(k, CHECK);
+  create_mirror(k, Handle(NULL), CHECK);
 }
 
-oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
+oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
   assert(k->java_mirror() == NULL, "should only assign mirror once");
   // Use this moment of initialization to cache modifier_flags also,
   // to support Class.getModifiers().  Instance classes recalculate
@@ -563,6 +563,16 @@
       set_array_klass(comp_mirror(), k());
     } else {
       assert(k->oop_is_instance(), "Must be");
+
+      // Allocate a simple java object for a lock.
+      // This needs to be a java object because during class initialization
+      // it can be held across a java call.
+      typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
+      set_init_lock(mirror(), r);
+
+      // Set protection domain also
+      set_protection_domain(mirror(), protection_domain());
+
       // Initialize static fields
       InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
     }
@@ -597,6 +607,34 @@
   java_class->int_field_put(_static_oop_field_count_offset, size);
 }
 
+oop java_lang_Class::protection_domain(oop java_class) {
+  assert(_protection_domain_offset != 0, "must be set");
+  return java_class->obj_field(_protection_domain_offset);
+}
+void java_lang_Class::set_protection_domain(oop java_class, oop pd) {
+  assert(_protection_domain_offset != 0, "must be set");
+  java_class->obj_field_put(_protection_domain_offset, pd);
+}
+
+oop java_lang_Class::init_lock(oop java_class) {
+  assert(_init_lock_offset != 0, "must be set");
+  return java_class->obj_field(_init_lock_offset);
+}
+void java_lang_Class::set_init_lock(oop java_class, oop init_lock) {
+  assert(_init_lock_offset != 0, "must be set");
+  java_class->obj_field_put(_init_lock_offset, init_lock);
+}
+
+objArrayOop java_lang_Class::signers(oop java_class) {
+  assert(_signers_offset != 0, "must be set");
+  return (objArrayOop)java_class->obj_field(_signers_offset);
+}
+void java_lang_Class::set_signers(oop java_class, objArrayOop signers) {
+  assert(_signers_offset != 0, "must be set");
+  java_class->obj_field_put(_signers_offset, (oop)signers);
+}
+
+
 oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
   // This should be improved by adding a field at the Java level or by
   // introducing a new VM klass (see comment in ClassFileParser)
@@ -2934,6 +2972,9 @@
 int java_lang_Class::_array_klass_offset;
 int java_lang_Class::_oop_size_offset;
 int java_lang_Class::_static_oop_field_count_offset;
+int java_lang_Class::_protection_domain_offset;
+int java_lang_Class::_init_lock_offset;
+int java_lang_Class::_signers_offset;
 GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
 int java_lang_Throwable::backtrace_offset;
 int java_lang_Throwable::detailMessage_offset;
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -208,7 +208,10 @@
   macro(java_lang_Class, klass,                  intptr_signature,  false) \
   macro(java_lang_Class, array_klass,            intptr_signature,  false) \
   macro(java_lang_Class, oop_size,               int_signature,     false) \
-  macro(java_lang_Class, static_oop_field_count, int_signature,     false)
+  macro(java_lang_Class, static_oop_field_count, int_signature,     false) \
+  macro(java_lang_Class, protection_domain,      object_signature,  false) \
+  macro(java_lang_Class, init_lock,              object_signature,  false) \
+  macro(java_lang_Class, signers,                object_signature,  false)
 
 class java_lang_Class : AllStatic {
   friend class VMStructs;
@@ -222,15 +225,20 @@
   static int _oop_size_offset;
   static int _static_oop_field_count_offset;
 
+  static int _protection_domain_offset;
+  static int _init_lock_offset;
+  static int _signers_offset;
+
   static bool offsets_computed;
   static int classRedefinedCount_offset;
   static GrowableArray<Klass*>* _fixup_mirror_list;
 
+  static void set_init_lock(oop java_class, oop init_lock);
  public:
   static void compute_offsets();
 
   // Instance creation
-  static oop  create_mirror(KlassHandle k, TRAPS);
+  static oop  create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
   static void fixup_mirror(KlassHandle k, TRAPS);
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
   // Conversion
@@ -262,6 +270,13 @@
   static int classRedefinedCount(oop the_class_mirror);
   static void set_classRedefinedCount(oop the_class_mirror, int value);
 
+  // Support for embedded per-class oops
+  static oop  protection_domain(oop java_class);
+  static void set_protection_domain(oop java_class, oop protection_domain);
+  static oop  init_lock(oop java_class);
+  static objArrayOop  signers(oop java_class);
+  static void set_signers(oop java_class, objArrayOop signers);
+
   static int oop_size(oop java_class);
   static void set_oop_size(oop java_class, int size);
   static int static_oop_field_count(oop java_class);
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -737,7 +737,7 @@
   return result;
 }
 
-void StringTable::unlink(BoolObjectClosure* is_alive) {
+void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
   // Readers of the table are unlocked, so we should only be removing
   // entries at a safepoint.
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@@ -745,41 +745,31 @@
     HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
     HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
-      // Shared entries are normally at the end of the bucket and if we run into
-      // a shared entry, then there is nothing more to remove. However, if we
-      // have rehashed the table, then the shared entries are no longer at the
-      // end of the bucket.
-      if (entry->is_shared() && !use_alternate_hashcode()) {
-        break;
-      }
-      assert(entry->literal() != NULL, "just checking");
-      if (entry->is_shared() || is_alive->do_object_b(entry->literal())) {
+      assert(!entry->is_shared(), "CDS not used for the StringTable");
+
+      if (is_alive->do_object_b(entry->literal())) {
+        if (f != NULL) {
+          f->do_oop((oop*)entry->literal_addr());
+        }
         p = entry->next_addr();
       } else {
         *p = entry->next();
         the_table()->free_entry(entry);
       }
-      entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
+      entry = *p;
     }
   }
 }
 
 void StringTable::oops_do(OopClosure* f) {
   for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
     HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
+      assert(!entry->is_shared(), "CDS not used for the StringTable");
+
       f->do_oop((oop*)entry->literal_addr());
 
-      // Did the closure remove the literal from the table?
-      if (entry->literal() == NULL) {
-        assert(!entry->is_shared(), "immutable hashtable entry?");
-        *p = entry->next();
-        the_table()->free_entry(entry);
-      } else {
-        p = entry->next_addr();
-      }
-      entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
+      entry = entry->next();
     }
   }
 }
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -272,7 +272,10 @@
 
   // GC support
   //   Delete pointers to otherwise-unreachable objects.
-  static void unlink(BoolObjectClosure* cl);
+  static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f);
+  static void unlink(BoolObjectClosure* cl) {
+    unlink_or_oops_do(cl, NULL);
+  }
 
   // Invoke "f->do_oop" on the locations of all oops in the table.
   static void oops_do(OopClosure* f);
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -36,8 +36,10 @@
 class Verifier : AllStatic {
  public:
   enum {
+    STRICTER_ACCESS_CTRL_CHECK_VERSION  = 49,
     STACKMAP_ATTRIBUTE_MAJOR_VERSION    = 50,
-    INVOKEDYNAMIC_MAJOR_VERSION         = 51
+    INVOKEDYNAMIC_MAJOR_VERSION         = 51,
+    NO_RELAX_ACCESS_CTRL_CHECK_VERSION  = 52
   };
   typedef enum { ThrowException, NoException } Mode;
 
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -392,6 +392,9 @@
   template(array_klass_name,                          "array_klass")                              \
   template(oop_size_name,                             "oop_size")                                 \
   template(static_oop_field_count_name,               "static_oop_field_count")                   \
+  template(protection_domain_name,                    "protection_domain")                        \
+  template(init_lock_name,                            "init_lock")                                \
+  template(signers_name,                              "signers_name")                             \
   template(loader_data_name,                          "loader_data")                              \
   template(dependencies_name,                         "dependencies")                             \
                                                                                                   \
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1976,11 +1976,10 @@
   if (!method()->is_native()) {
     SimpleScopeDesc ssd(this, fr.pc());
     Bytecode_invoke call(ssd.method(), ssd.bci());
-    // compiled invokedynamic call sites have an implicit receiver at
-    // resolution time, so make sure it gets GC'ed.
-    bool has_receiver = !call.is_invokestatic();
+    bool has_receiver = call.has_receiver();
+    bool has_appendix = call.has_appendix();
     Symbol* signature = call.signature();
-    fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
+    fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
   }
 #endif // !SHARK
 }
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1642,42 +1642,37 @@
 // Set up state required by +LogCompilation.
 void CompileBroker::init_compiler_thread_log() {
     CompilerThread* thread = CompilerThread::current();
-    char  fileBuf[4*K];
+    char  file_name[4*K];
     FILE* fp = NULL;
-    char* file = NULL;
     intx thread_id = os::current_thread_id();
     for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
       const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
       if (dir == NULL) {
-        jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log",
+        jio_snprintf(file_name, sizeof(file_name), "hs_c" UINTX_FORMAT "_pid%u.log",
                      thread_id, os::current_process_id());
       } else {
-        jio_snprintf(fileBuf, sizeof(fileBuf),
+        jio_snprintf(file_name, sizeof(file_name),
                      "%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
                      os::file_separator(), thread_id, os::current_process_id());
       }
-      fp = fopen(fileBuf, "at");
+
+      fp = fopen(file_name, "at");
       if (fp != NULL) {
-        file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1, mtCompiler);
-        strcpy(file, fileBuf);
-        break;
+        if (LogCompilation && Verbose) {
+          tty->print_cr("Opening compilation log %s", file_name);
+        }
+        CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file_name, fp, thread_id);
+        thread->init_log(log);
+
+        if (xtty != NULL) {
+          ttyLocker ttyl;
+          // Record any per thread log files
+          xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file_name);
+        }
+        return;
       }
     }
-    if (fp == NULL) {
-      warning("Cannot open log file: %s", fileBuf);
-    } else {
-      if (LogCompilation && Verbose)
-        tty->print_cr("Opening compilation log %s", file);
-      CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file, fp, thread_id);
-      thread->init_log(log);
-
-      if (xtty != NULL) {
-        ttyLocker ttyl;
-
-        // Record any per thread log files
-        xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file);
-      }
-    }
+    warning("Cannot open log file: %s", file_name);
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/compiler/compileLog.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileLog.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -34,17 +34,18 @@
 
 // ------------------------------------------------------------------
 // CompileLog::CompileLog
-CompileLog::CompileLog(const char* file, FILE* fp, intx thread_id)
+CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id)
   : _context(_context_buffer, sizeof(_context_buffer))
 {
-  initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp));
-  _file = file;
+  initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp, true));
   _file_end = 0;
   _thread_id = thread_id;
 
   _identities_limit = 0;
   _identities_capacity = 400;
   _identities = NEW_C_HEAP_ARRAY(char, _identities_capacity, mtCompiler);
+  _file = NEW_C_HEAP_ARRAY(char, strlen(file_name)+1, mtCompiler);
+   strcpy((char*)_file, file_name);
 
   // link into the global list
   { MutexLocker locker(CompileTaskAlloc_lock);
@@ -57,6 +58,7 @@
   delete _out;
   _out = NULL;
   FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
+  FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
 }
 
 
@@ -188,7 +190,8 @@
   if (called_exit)  return;
   called_exit = true;
 
-  for (CompileLog* log = _first; log != NULL; log = log->_next) {
+  CompileLog* log = _first;
+  while (log != NULL) {
     log->flush();
     const char* partial_file = log->file();
     int partial_fd = open(partial_file, O_RDONLY);
@@ -267,7 +270,11 @@
       close(partial_fd);
       unlink(partial_file);
     }
+    CompileLog* next_log = log->_next;
+    delete log;
+    log = next_log;
   }
+  _first = NULL;
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/compiler/compileLog.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileLog.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -57,7 +57,7 @@
   void va_tag(bool push, const char* format, va_list ap);
 
  public:
-  CompileLog(const char* file, FILE* fp, intx thread_id);
+  CompileLog(const char* file_name, FILE* fp, intx thread_id);
   ~CompileLog();
 
   intx          thread_id()                      { return _thread_id; }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -969,8 +969,8 @@
 }
 
 
-void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden,
-                                          size_t max_eden_size)
+void CMSAdaptiveSizePolicy::compute_eden_space_size(size_t cur_eden,
+                                                    size_t max_eden_size)
 {
   size_t desired_eden_size = cur_eden;
   size_t eden_limit = max_eden_size;
@@ -978,7 +978,7 @@
   // Printout input
   if (PrintGC && PrintAdaptiveSizePolicy) {
     gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_young_generation_free_space: "
+      "CMSAdaptiveSizePolicy::compute_eden_space_size: "
       "cur_eden " SIZE_FORMAT,
       cur_eden);
   }
@@ -1024,7 +1024,7 @@
 
   if (PrintGC && PrintAdaptiveSizePolicy) {
     gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_young_generation_free_space limits:"
+      "CMSAdaptiveSizePolicy::compute_eden_space_size limits:"
       " desired_eden_size: " SIZE_FORMAT
       " old_eden_size: " SIZE_FORMAT,
       desired_eden_size, cur_eden);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -436,8 +436,8 @@
 
   size_t generation_alignment() { return _generation_alignment; }
 
-  virtual void compute_young_generation_free_space(size_t cur_eden,
-                                                   size_t max_eden_size);
+  virtual void compute_eden_space_size(size_t cur_eden,
+                                       size_t max_eden_size);
   // Calculates new survivor space size;  returns a new tenuring threshold
   // value. Stores new survivor size in _survivor_size.
   virtual uint compute_survivor_space_size_and_threshold(
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -114,6 +114,14 @@
   }
 }
 
+void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
+  if (_threads != NULL) {
+    for (int i = 0; i < worker_thread_num(); i++) {
+      tc->do_thread(_threads[i]);
+    }
+  }
+}
+
 int ConcurrentG1Refine::thread_num() {
   int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
                                                 : ParallelGCThreads;
@@ -126,3 +134,7 @@
     st->cr();
   }
 }
+
+ConcurrentG1RefineThread * ConcurrentG1Refine::sampling_thread() const {
+  return _threads[worker_thread_num()];
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -35,6 +35,7 @@
 class G1CollectedHeap;
 class G1HotCardCache;
 class G1RemSet;
+class DirtyCardQueue;
 
 class ConcurrentG1Refine: public CHeapObj<mtGC> {
   ConcurrentG1RefineThread** _threads;
@@ -78,9 +79,15 @@
 
   void reinitialize_threads();
 
-  // Iterate over the conc refine threads
+  // Iterate over all concurrent refinement threads
   void threads_do(ThreadClosure *tc);
 
+  // Iterate over all worker refinement threads
+  void worker_threads_do(ThreadClosure * tc);
+
+  // The RS sampling thread
+  ConcurrentG1RefineThread * sampling_thread() const;
+
   static int thread_num();
 
   void print_worker_threads_on(outputStream* st) const;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1417,8 +1417,6 @@
 
       MemoryService::track_memory_usage();
 
-      verify_after_gc();
-
       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
       ref_processor_stw()->verify_no_references_recorded();
 
@@ -1521,6 +1519,8 @@
       _hrs.verify_optional();
       verify_region_sets_optional();
 
+      verify_after_gc();
+
       // Start a new incremental collection set for the next pause
       assert(g1_policy()->collection_set() == NULL, "must be");
       g1_policy()->start_incremental_cset_building();
@@ -3539,6 +3539,14 @@
 }
 
 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
+
+  if (G1SummarizeRSetStats &&
+      (G1SummarizeRSetStatsPeriod > 0) &&
+      // we are at the end of the GC. Total collections has already been increased.
+      ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
+    g1_rem_set()->print_periodic_summary_info();
+  }
+
   // FIXME: what is this about?
   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
   // is set.
@@ -4093,12 +4101,6 @@
     g1mm()->update_sizes();
   }
 
-  if (G1SummarizeRSetStats &&
-      (G1SummarizeRSetStatsPeriod > 0) &&
-      (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
-    g1_rem_set()->print_summary_info();
-  }
-
   // It should now be safe to tell the concurrent mark thread to start
   // without its logging output interfering with the logging output
   // that came from the pause.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -593,11 +593,6 @@
   // may not be a humongous - it must fit into a single heap region.
   HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
 
-  HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
-                                    HeapRegion*    alloc_region,
-                                    bool           par,
-                                    size_t         word_size);
-
   // Ensure that no further allocations can happen in "r", bearing in mind
   // that parallel threads might be attempting allocations.
   void par_allocate_remaining_space(HeapRegion* r);
@@ -1733,6 +1728,95 @@
     ParGCAllocBuffer::retire(end_of_gc, retain);
     _retired = true;
   }
+
+  bool is_retired() {
+    return _retired;
+  }
+};
+
+class G1ParGCAllocBufferContainer {
+protected:
+  static int const _priority_max = 2;
+  G1ParGCAllocBuffer* _priority_buffer[_priority_max];
+
+public:
+  G1ParGCAllocBufferContainer(size_t gclab_word_size) {
+    for (int pr = 0; pr < _priority_max; ++pr) {
+      _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
+    }
+  }
+
+  ~G1ParGCAllocBufferContainer() {
+    for (int pr = 0; pr < _priority_max; ++pr) {
+      assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
+      delete _priority_buffer[pr];
+    }
+  }
+
+  HeapWord* allocate(size_t word_sz) {
+    HeapWord* obj;
+    for (int pr = 0; pr < _priority_max; ++pr) {
+      obj = _priority_buffer[pr]->allocate(word_sz);
+      if (obj != NULL) return obj;
+    }
+    return obj;
+  }
+
+  bool contains(void* addr) {
+    for (int pr = 0; pr < _priority_max; ++pr) {
+      if (_priority_buffer[pr]->contains(addr)) return true;
+    }
+    return false;
+  }
+
+  void undo_allocation(HeapWord* obj, size_t word_sz) {
+    bool finish_undo;
+    for (int pr = 0; pr < _priority_max; ++pr) {
+      if (_priority_buffer[pr]->contains(obj)) {
+        _priority_buffer[pr]->undo_allocation(obj, word_sz);
+        finish_undo = true;
+      }
+    }
+    if (!finish_undo) ShouldNotReachHere();
+  }
+
+  size_t words_remaining() {
+    size_t result = 0;
+    for (int pr = 0; pr < _priority_max; ++pr) {
+      result += _priority_buffer[pr]->words_remaining();
+    }
+    return result;
+  }
+
+  size_t words_remaining_in_retired_buffer() {
+    G1ParGCAllocBuffer* retired = _priority_buffer[0];
+    return retired->words_remaining();
+  }
+
+  void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
+    for (int pr = 0; pr < _priority_max; ++pr) {
+      _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
+    }
+  }
+
+  void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
+    G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
+    retired_and_set->retire(end_of_gc, retain);
+    retired_and_set->set_buf(buf);
+    retired_and_set->set_word_size(word_sz);
+    adjust_priority_order();
+  }
+
+private:
+  void adjust_priority_order() {
+    G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
+
+    int last = _priority_max - 1;
+    for (int pr = 0; pr < last; ++pr) {
+      _priority_buffer[pr] = _priority_buffer[pr + 1];
+    }
+    _priority_buffer[last] = retired_and_set;
+  }
 };
 
 class G1ParScanThreadState : public StackObj {
@@ -1743,9 +1827,9 @@
   CardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
-  G1ParGCAllocBuffer  _surviving_alloc_buffer;
-  G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  G1ParGCAllocBufferContainer  _surviving_alloc_buffer;
+  G1ParGCAllocBufferContainer  _tenured_alloc_buffer;
+  G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
   ageTable            _age_table;
 
   size_t           _alloc_buffer_waste;
@@ -1809,7 +1893,7 @@
   RefToScanQueue*   refs()            { return _refs;             }
   ageTable*         age_table()       { return &_age_table;       }
 
-  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
+  G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
     return _alloc_buffers[purpose];
   }
 
@@ -1839,15 +1923,13 @@
     HeapWord* obj = NULL;
     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-      alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+      G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
 
       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
       if (buf == NULL) return NULL; // Let caller handle allocation failure.
-      // Otherwise.
-      alloc_buf->set_word_size(gclab_word_size);
-      alloc_buf->set_buf(buf);
+
+      add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
+      alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
 
       obj = alloc_buf->allocate(word_sz);
       assert(obj != NULL, "buffer was definitely big enough...");
@@ -1959,7 +2041,6 @@
     }
   }
 
-public:
   void trim_queue();
 };
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -34,6 +34,7 @@
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/intHisto.hpp"
@@ -73,7 +74,8 @@
     _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
     _cg1r(g1->concurrent_g1_refine()),
     _cset_rs_update_cl(NULL),
-    _cards_scanned(NULL), _total_cards_scanned(0)
+    _cards_scanned(NULL), _total_cards_scanned(0),
+    _prev_period_summary()
 {
   _seq_task = new SubTasksDone(NumSeqTasks);
   guarantee(n_workers() > 0, "There should be some workers");
@@ -81,6 +83,7 @@
   for (uint i = 0; i < n_workers(); i++) {
     _cset_rs_update_cl[i] = NULL;
   }
+  _prev_period_summary.initialize(this, n_workers());
 }
 
 G1RemSet::~G1RemSet() {
@@ -697,47 +700,29 @@
   return has_refs_into_cset;
 }
 
-class HRRSStatsIter: public HeapRegionClosure {
-  size_t _occupied;
-  size_t _total_mem_sz;
-  size_t _max_mem_sz;
-  HeapRegion* _max_mem_sz_region;
-public:
-  HRRSStatsIter() :
-    _occupied(0),
-    _total_mem_sz(0),
-    _max_mem_sz(0),
-    _max_mem_sz_region(NULL)
-  {}
+void G1RemSet::print_periodic_summary_info() {
+  G1RemSetSummary current;
+  current.initialize(this, n_workers());
 
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->continuesHumongous()) return false;
-    size_t mem_sz = r->rem_set()->mem_size();
-    if (mem_sz > _max_mem_sz) {
-      _max_mem_sz = mem_sz;
-      _max_mem_sz_region = r;
-    }
-    _total_mem_sz += mem_sz;
-    size_t occ = r->rem_set()->occupied();
-    _occupied += occ;
-    return false;
-  }
-  size_t total_mem_sz() { return _total_mem_sz; }
-  size_t max_mem_sz() { return _max_mem_sz; }
-  size_t occupied() { return _occupied; }
-  HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
-};
+  _prev_period_summary.subtract_from(&current);
+  print_summary_info(&_prev_period_summary);
 
-class PrintRSThreadVTimeClosure : public ThreadClosure {
-public:
-  virtual void do_thread(Thread *t) {
-    ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
-    gclog_or_tty->print("    %5.2f", crt->vtime_accum());
-  }
-};
+  _prev_period_summary.set(&current);
+}
 
 void G1RemSet::print_summary_info() {
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  G1RemSetSummary current;
+  current.initialize(this, n_workers());
+
+  print_summary_info(&current, " Cumulative RS summary");
+}
+
+void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header) {
+  assert(summary != NULL, "just checking");
+
+  if (header != NULL) {
+    gclog_or_tty->print_cr("%s", header);
+  }
 
 #if CARD_REPEAT_HISTO
   gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
@@ -745,52 +730,13 @@
   card_repeat_count.print_on(gclog_or_tty);
 #endif
 
-  gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
-                         _conc_refine_cards);
-  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-  jint tot_processed_buffers =
-    dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
-  gclog_or_tty->print_cr("  Of %d completed buffers:", tot_processed_buffers);
-  gclog_or_tty->print_cr("     %8d (%5.1f%%) by conc RS threads.",
-                dcqs.processed_buffers_rs_thread(),
-                100.0*(float)dcqs.processed_buffers_rs_thread()/
-                (float)tot_processed_buffers);
-  gclog_or_tty->print_cr("     %8d (%5.1f%%) by mutator threads.",
-                dcqs.processed_buffers_mut(),
-                100.0*(float)dcqs.processed_buffers_mut()/
-                (float)tot_processed_buffers);
-  gclog_or_tty->print_cr("  Conc RS threads times(s)");
-  PrintRSThreadVTimeClosure p;
-  gclog_or_tty->print("     ");
-  g1->concurrent_g1_refine()->threads_do(&p);
-  gclog_or_tty->print_cr("");
-
-  HRRSStatsIter blk;
-  g1->heap_region_iterate(&blk);
-  gclog_or_tty->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
-                         "  Max = "SIZE_FORMAT"K.",
-                         blk.total_mem_sz()/K, blk.max_mem_sz()/K);
-  gclog_or_tty->print_cr("  Static structures = "SIZE_FORMAT"K,"
-                         " free_lists = "SIZE_FORMAT"K.",
-                         HeapRegionRemSet::static_mem_size() / K,
-                         HeapRegionRemSet::fl_mem_size() / K);
-  gclog_or_tty->print_cr("    "SIZE_FORMAT" occupied cards represented.",
-                         blk.occupied());
-  HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
-  HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
-  gclog_or_tty->print_cr("    Max size region = "HR_FORMAT", "
-                         "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
-                         HR_FORMAT_PARAMS(max_mem_sz_region),
-                         (rem_set->mem_size() + K - 1)/K,
-                         (rem_set->occupied() + K - 1)/K);
-  gclog_or_tty->print_cr("    Did %d coarsenings.",
-                         HeapRegionRemSet::n_coarsenings());
+  summary->print_on(gclog_or_tty);
 }
 
 void G1RemSet::prepare_for_verify() {
   if (G1HRRSFlushLogBuffersOnVerify &&
       (VerifyBeforeGC || VerifyAfterGC)
-      &&  !_g1->full_collection()) {
+      &&  (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) {
     cleanupHRRS();
     _g1->set_refine_cte_cl_concurrency(false);
     if (SafepointSynchronize::is_at_safepoint()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
 
+#include "gc_implementation/g1/g1RemSetSummary.hpp"
+
 // A G1RemSet provides ways of iterating over pointers into a selected
 // collection set.
 
@@ -37,9 +39,11 @@
 // so that they can be used to update the individual region remsets.
 
 class G1RemSet: public CHeapObj<mtGC> {
+private:
+  G1RemSetSummary _prev_period_summary;
 protected:
   G1CollectedHeap* _g1;
-  unsigned _conc_refine_cards;
+  size_t _conc_refine_cards;
   uint n_workers();
 
 protected:
@@ -66,6 +70,8 @@
   // references into the collection set.
   OopsInHeapRegionClosure** _cset_rs_update_cl;
 
+  // Print the given summary info
+  virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
 public:
   // This is called to reset dual hash tables after the gc pause
   // is finished and the initial hash table is no longer being
@@ -123,11 +129,18 @@
                            int worker_i,
                            bool check_for_refs_into_cset);
 
-  // Print any relevant summary info.
+  // Print accumulated summary info from the start of the VM.
   virtual void print_summary_info();
 
+  // Print accumulated summary info from the last time called.
+  virtual void print_periodic_summary_info();
+
   // Prepare remembered set for verification.
   virtual void prepare_for_verify();
+
+  size_t conc_refine_cards() const {
+    return _conc_refine_cards;
+  }
 };
 
 class CountNonCleanMemRegionClosure: public MemRegionClosure {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/concurrentG1Refine.hpp"
+#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "gc_implementation/g1/g1RemSetSummary.hpp"
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
+#include "runtime/thread.inline.hpp"
+
+class GetRSThreadVTimeClosure : public ThreadClosure {
+private:
+  G1RemSetSummary* _summary;
+  uint _counter;
+
+public:
+  GetRSThreadVTimeClosure(G1RemSetSummary * summary) : ThreadClosure(), _summary(summary), _counter(0) {
+    assert(_summary != NULL, "just checking");
+  }
+
+  virtual void do_thread(Thread* t) {
+    ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
+    _summary->set_rs_thread_vtime(_counter, crt->vtime_accum());
+    _counter++;
+  }
+};
+
+void G1RemSetSummary::update() {
+  _num_refined_cards = remset()->conc_refine_cards();
+  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+  _num_processed_buf_mutator = dcqs.processed_buffers_mut();
+  _num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread();
+
+  _num_coarsenings = HeapRegionRemSet::n_coarsenings();
+
+  ConcurrentG1Refine * cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
+  if (_rs_threads_vtimes != NULL) {
+    GetRSThreadVTimeClosure p(this);
+    cg1r->worker_threads_do(&p);
+  }
+  set_sampling_thread_vtime(cg1r->sampling_thread()->vtime_accum());
+}
+
+void G1RemSetSummary::set_rs_thread_vtime(uint thread, double value) {
+  assert(_rs_threads_vtimes != NULL, "just checking");
+  assert(thread < _num_vtimes, "just checking");
+  _rs_threads_vtimes[thread] = value;
+}
+
+double G1RemSetSummary::rs_thread_vtime(uint thread) const {
+  assert(_rs_threads_vtimes != NULL, "just checking");
+  assert(thread < _num_vtimes, "just checking");
+  return _rs_threads_vtimes[thread];
+}
+
+void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
+  assert(_rs_threads_vtimes == NULL, "just checking");
+  assert(remset != NULL, "just checking");
+
+  _remset = remset;
+  _num_vtimes = num_workers;
+  _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
+  memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
+
+  update();
+}
+
+void G1RemSetSummary::set(G1RemSetSummary* other) {
+  assert(other != NULL, "just checking");
+  assert(remset() == other->remset(), "just checking");
+  assert(_num_vtimes == other->_num_vtimes, "just checking");
+
+  _num_refined_cards = other->num_concurrent_refined_cards();
+
+  _num_processed_buf_mutator = other->num_processed_buf_mutator();
+  _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads();
+
+  _num_coarsenings = other->_num_coarsenings;
+
+  memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes);
+
+  set_sampling_thread_vtime(other->sampling_thread_vtime());
+}
+
+void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
+  assert(other != NULL, "just checking");
+  assert(remset() == other->remset(), "just checking");
+  assert(_num_vtimes == other->_num_vtimes, "just checking");
+
+  _num_refined_cards = other->num_concurrent_refined_cards() - _num_refined_cards;
+
+  _num_processed_buf_mutator = other->num_processed_buf_mutator() - _num_processed_buf_mutator;
+  _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads() - _num_processed_buf_rs_threads;
+
+  _num_coarsenings = other->num_coarsenings() - _num_coarsenings;
+
+  for (uint i = 0; i < _num_vtimes; i++) {
+    set_rs_thread_vtime(i, other->rs_thread_vtime(i) - rs_thread_vtime(i));
+  }
+
+  _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
+}
+
+class HRRSStatsIter: public HeapRegionClosure {
+  size_t _occupied;
+  size_t _total_mem_sz;
+  size_t _max_mem_sz;
+  HeapRegion* _max_mem_sz_region;
+public:
+  HRRSStatsIter() :
+    _occupied(0),
+    _total_mem_sz(0),
+    _max_mem_sz(0),
+    _max_mem_sz_region(NULL)
+  {}
+
+  bool doHeapRegion(HeapRegion* r) {
+    size_t mem_sz = r->rem_set()->mem_size();
+    if (mem_sz > _max_mem_sz) {
+      _max_mem_sz = mem_sz;
+      _max_mem_sz_region = r;
+    }
+    _total_mem_sz += mem_sz;
+    size_t occ = r->rem_set()->occupied();
+    _occupied += occ;
+    return false;
+  }
+  size_t total_mem_sz() { return _total_mem_sz; }
+  size_t max_mem_sz() { return _max_mem_sz; }
+  size_t occupied() { return _occupied; }
+  HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
+};
+
+double calc_percentage(size_t numerator, size_t denominator) {
+  if (denominator != 0) {
+    return (double)numerator / denominator * 100.0;
+  } else {
+    return 0.0f;
+  }
+}
+
+void G1RemSetSummary::print_on(outputStream* out) {
+  out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
+                num_concurrent_refined_cards());
+  out->print_cr("  Of %d completed buffers:", num_processed_buf_total());
+  out->print_cr("     %8d (%5.1f%%) by concurrent RS threads.",
+                num_processed_buf_total(),
+                calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
+  out->print_cr("     %8d (%5.1f%%) by mutator threads.",
+                num_processed_buf_mutator(),
+                calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
+  out->print_cr("  Concurrent RS threads times (s)");
+  out->print("     ");
+  for (uint i = 0; i < _num_vtimes; i++) {
+    out->print("    %5.2f", rs_thread_vtime(i));
+  }
+  out->cr();
+  out->print_cr("  Concurrent sampling threads times (s)");
+  out->print_cr("         %5.2f", sampling_thread_vtime());
+
+  HRRSStatsIter blk;
+  G1CollectedHeap::heap()->heap_region_iterate(&blk);
+  out->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
+                "  Max = "SIZE_FORMAT"K.",
+                blk.total_mem_sz()/K, blk.max_mem_sz()/K);
+  out->print_cr("  Static structures = "SIZE_FORMAT"K,"
+                " free_lists = "SIZE_FORMAT"K.",
+                HeapRegionRemSet::static_mem_size() / K,
+                HeapRegionRemSet::fl_mem_size() / K);
+  out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
+                blk.occupied());
+  HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
+  HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
+  out->print_cr("    Max size region = "HR_FORMAT", "
+                "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
+                HR_FORMAT_PARAMS(max_mem_sz_region),
+                (rem_set->mem_size() + K - 1)/K,
+                (rem_set->occupied() + K - 1)/K);
+
+  out->print_cr("    Did %d coarsenings.", num_coarsenings());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
+
+#include "utilities/ostream.hpp"
+
+class G1RemSet;
+
+// A G1RemSetSummary manages statistical information about the G1RemSet
+
+class G1RemSetSummary VALUE_OBJ_CLASS_SPEC {
+private:
+  friend class GetRSThreadVTimeClosure;
+
+  G1RemSet* _remset;
+
+  G1RemSet* remset() const {
+    return _remset;
+  }
+
+  size_t _num_refined_cards;
+  size_t _num_processed_buf_mutator;
+  size_t _num_processed_buf_rs_threads;
+
+  size_t _num_coarsenings;
+
+  double* _rs_threads_vtimes;
+  size_t _num_vtimes;
+
+  double _sampling_thread_vtime;
+
+  void set_rs_thread_vtime(uint thread, double value);
+  void set_sampling_thread_vtime(double value) {
+    _sampling_thread_vtime = value;
+  }
+
+  void free_and_null() {
+    if (_rs_threads_vtimes) {
+      FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes, mtGC);
+      _rs_threads_vtimes = NULL;
+      _num_vtimes = 0;
+    }
+  }
+
+  // update this summary with current data from various places
+  void update();
+
+public:
+  G1RemSetSummary() : _remset(NULL), _num_refined_cards(0),
+    _num_processed_buf_mutator(0), _num_processed_buf_rs_threads(0), _num_coarsenings(0),
+    _rs_threads_vtimes(NULL), _num_vtimes(0), _sampling_thread_vtime(0.0f) {
+  }
+
+  ~G1RemSetSummary() {
+    free_and_null();
+  }
+
+  // set the counters in this summary to the values of the others
+  void set(G1RemSetSummary* other);
+  // subtract all counters from the other summary, and set them in the current
+  void subtract_from(G1RemSetSummary* other);
+
+  // initialize and get the first sampling
+  void initialize(G1RemSet* remset, uint num_workers);
+
+  void print_on(outputStream* out);
+
+  double rs_thread_vtime(uint thread) const;
+
+  double sampling_thread_vtime() const {
+    return _sampling_thread_vtime;
+  }
+
+  size_t num_concurrent_refined_cards() const {
+    return _num_refined_cards;
+  }
+
+  size_t num_processed_buf_mutator() const {
+    return _num_processed_buf_mutator;
+  }
+
+  size_t num_processed_buf_rs_threads() const {
+    return _num_processed_buf_rs_threads;
+  }
+
+  size_t num_processed_buf_total() const {
+    return num_processed_buf_mutator() + num_processed_buf_rs_threads();
+  }
+
+  size_t num_coarsenings() const {
+    return _num_coarsenings;
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -329,7 +329,11 @@
                                                                             \
   develop(bool, G1EvacuationFailureALotDuringMixedGC, true,                 \
           "Force use of evacuation failure handling during mixed "          \
-          "evacuation pauses")
+          "evacuation pauses")                                              \
+                                                                            \
+  diagnostic(bool, G1VerifyRSetsDuringFullGC, false,                        \
+             "If true, perform verification of each heap region's "         \
+             "remembered set when verifying the heap during a full GC.")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -139,7 +139,7 @@
         _n_failures++;
       }
 
-      if (!_g1h->full_collection()) {
+      if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
         HeapRegion* to   = _g1h->heap_region_containing(obj);
         if (from != NULL && to != NULL &&
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -585,8 +585,7 @@
   size_policy->avg_young_live()->sample(used());
   size_policy->avg_eden_live()->sample(eden()->used());
 
-  size_policy->compute_young_generation_free_space(eden()->capacity(),
-                                                   max_gen_size());
+  size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size());
 
   resize(size_policy->calculated_eden_size_in_bytes(),
          size_policy->calculated_survivor_size_in_bytes());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -116,7 +116,7 @@
 
   // The alignment used for eden and survivors within the young gen
   // and for boundary between young gen and old gen.
-  size_t intra_heap_alignment() const { return 64 * K; }
+  size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
 
   size_t capacity() const;
   size_t used() const;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -120,6 +120,9 @@
 
     case system_dictionary:
       SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
+      break;
+
+    case class_loader_data:
       ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
       break;
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -98,7 +98,8 @@
     management            = 6,
     jvmti                 = 7,
     system_dictionary     = 8,
-    code_cache            = 9
+    class_loader_data     = 9,
+    code_cache            = 10
   };
  private:
   RootType _root_type;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -194,7 +194,7 @@
 
 // If this is not a full GC, only test and modify the young generation.
 
-void PSAdaptiveSizePolicy::compute_generation_free_space(
+void PSAdaptiveSizePolicy::compute_generations_free_space(
                                            size_t young_live,
                                            size_t eden_live,
                                            size_t old_live,
@@ -729,7 +729,7 @@
 
   if (PrintAdaptiveSizePolicy && Verbose) {
     gclog_or_tty->print_cr(
-      "PSAdaptiveSizePolicy::compute_old_gen_free_space "
+      "PSAdaptiveSizePolicy::adjust_promo_for_pause_time "
       "adjusting gen sizes for major pause (avg %f goal %f). "
       "desired_promo_size " SIZE_FORMAT " promo delta " SIZE_FORMAT,
       _avg_major_pause->average(), gc_pause_goal_sec(),
@@ -786,7 +786,7 @@
 
   if (PrintAdaptiveSizePolicy && Verbose) {
     gclog_or_tty->print_cr(
-      "PSAdaptiveSizePolicy::compute_eden_space_size "
+      "PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
       "adjusting gen sizes for major pause (avg %f goal %f). "
       "desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT,
       _avg_major_pause->average(), gc_pause_goal_sec(),
@@ -1001,7 +1001,7 @@
 
   if (PrintAdaptiveSizePolicy && Verbose) {
     gclog_or_tty->print_cr(
-      "AdaptiveSizePolicy::compute_generation_free_space "
+      "AdaptiveSizePolicy::adjust_promo_for_footprint "
       "adjusting tenured gen for footprint. "
       "starting promo size " SIZE_FORMAT
       " reduced promo size " SIZE_FORMAT,
@@ -1025,7 +1025,7 @@
 
   if (PrintAdaptiveSizePolicy && Verbose) {
     gclog_or_tty->print_cr(
-      "AdaptiveSizePolicy::compute_generation_free_space "
+      "AdaptiveSizePolicy::adjust_eden_for_footprint "
       "adjusting eden for footprint. "
       " starting eden size " SIZE_FORMAT
       " reduced eden size " SIZE_FORMAT
@@ -1280,7 +1280,7 @@
 
   if (PrintAdaptiveSizePolicy) {
     gclog_or_tty->print(
-                  "AdaptiveSizePolicy::compute_survivor_space_size_and_thresh:"
+                  "AdaptiveSizePolicy::update_averages:"
                   "  survived: "  SIZE_FORMAT
                   "  promoted: "  SIZE_FORMAT
                   "  overflow: %s",
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -344,13 +344,13 @@
   // Takes current used space in all generations as input, as well
   // as an indication if a full gc has just been performed, for use
   // in deciding if an OOM error should be thrown.
-  void compute_generation_free_space(size_t young_live,
-                                     size_t eden_live,
-                                     size_t old_live,
-                                     size_t cur_eden,  // current eden in bytes
-                                     size_t max_old_gen_size,
-                                     size_t max_eden_size,
-                                     bool   is_full_gc);
+  void compute_generations_free_space(size_t young_live,
+                                      size_t eden_live,
+                                      size_t old_live,
+                                      size_t cur_eden,  // current eden in bytes
+                                      size_t max_old_gen_size,
+                                      size_t max_eden_size,
+                                      bool   is_full_gc);
 
   void compute_eden_space_size(size_t young_live,
                                size_t eden_live,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,7 +119,7 @@
       ps_size_policy()->change_old_gen_for_min_pauses());
   }
 
-  // compute_generation_free_space() statistics
+  // compute_generations_free_space() statistics
 
   inline void update_avg_major_pause() {
     _avg_major_pause->set_value(
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -290,13 +290,13 @@
         // Used for diagnostics
         size_policy->clear_generation_free_space_flags();
 
-        size_policy->compute_generation_free_space(young_live,
-                                                   eden_live,
-                                                   old_live,
-                                                   cur_eden,
-                                                   max_old_gen_size,
-                                                   max_eden_size,
-                                                   true /* full gc*/);
+        size_policy->compute_generations_free_space(young_live,
+                                                    eden_live,
+                                                    old_live,
+                                                    cur_eden,
+                                                    max_old_gen_size,
+                                                    max_eden_size,
+                                                    true /* full gc*/);
 
         size_policy->check_gc_overhead_limit(young_live,
                                              eden_live,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -59,13 +59,25 @@
 #include <math.h>
 
 // All sizes are in HeapWords.
-const size_t ParallelCompactData::Log2RegionSize  = 9; // 512 words
+const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 const size_t ParallelCompactData::RegionSizeBytes =
   RegionSize << LogHeapWordSize;
 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
-const size_t ParallelCompactData::RegionAddrMask  = ~RegionAddrOffsetMask;
+const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
+
+const size_t ParallelCompactData::Log2BlockSize   = 7; // 128 words
+const size_t ParallelCompactData::BlockSize       = (size_t)1 << Log2BlockSize;
+const size_t ParallelCompactData::BlockSizeBytes  =
+  BlockSize << LogHeapWordSize;
+const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
+const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
+const size_t ParallelCompactData::BlockAddrMask       = ~BlockAddrOffsetMask;
+
+const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
+const size_t ParallelCompactData::Log2BlocksPerRegion =
+  Log2RegionSize - Log2BlockSize;
 
 const ParallelCompactData::RegionData::region_sz_t
 ParallelCompactData::RegionData::dc_shift = 27;
@@ -359,6 +371,10 @@
   _reserved_byte_size = 0;
   _region_data = 0;
   _region_count = 0;
+
+  _block_vspace = 0;
+  _block_data = 0;
+  _block_count = 0;
 }
 
 bool ParallelCompactData::initialize(MemRegion covered_region)
@@ -372,8 +388,7 @@
   assert((region_size & RegionSizeOffsetMask) == 0,
          "region size not a multiple of RegionSize");
 
-  bool result = initialize_region_data(region_size);
-
+  bool result = initialize_region_data(region_size) && initialize_block_data();
   return result;
 }
 
@@ -418,17 +433,36 @@
   return false;
 }
 
+bool ParallelCompactData::initialize_block_data()
+{
+  assert(_region_count != 0, "region data must be initialized first");
+  const size_t count = _region_count << Log2BlocksPerRegion;
+  _block_vspace = create_vspace(count, sizeof(BlockData));
+  if (_block_vspace != 0) {
+    _block_data = (BlockData*)_block_vspace->reserved_low_addr();
+    _block_count = count;
+    return true;
+  }
+  return false;
+}
+
 void ParallelCompactData::clear()
 {
   memset(_region_data, 0, _region_vspace->committed_size());
+  memset(_block_data, 0, _block_vspace->committed_size());
 }
 
 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
   assert(beg_region <= _region_count, "beg_region out of range");
   assert(end_region <= _region_count, "end_region out of range");
+  assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
 
   const size_t region_cnt = end_region - beg_region;
   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
+
+  const size_t beg_block = beg_region * BlocksPerRegion;
+  const size_t block_cnt = region_cnt * BlocksPerRegion;
+  memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
 }
 
 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
@@ -707,49 +741,48 @@
 
 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
   assert(addr != NULL, "Should detect NULL oop earlier");
-  assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
-#ifdef ASSERT
-  if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
-    gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
-  }
-#endif
-  assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
+  assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
+  assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
 
   // Region covering the object.
-  size_t region_index = addr_to_region_idx(addr);
-  const RegionData* const region_ptr = region(region_index);
-  HeapWord* const region_addr = region_align_down(addr);
-
-  assert(addr < region_addr + RegionSize, "Region does not cover object");
-  assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
-
+  RegionData* const region_ptr = addr_to_region_ptr(addr);
   HeapWord* result = region_ptr->destination();
 
-  // If all the data in the region is live, then the new location of the object
-  // can be calculated from the destination of the region plus the offset of the
-  // object in the region.
+  // If the entire Region is live, the new location is region->destination + the
+  // offset of the object within in the Region.
+
+  // Run some performance tests to determine if this special case pays off.  It
+  // is worth it for pointers into the dense prefix.  If the optimization to
+  // avoid pointer updates in regions that only point to the dense prefix is
+  // ever implemented, this should be revisited.
   if (region_ptr->data_size() == RegionSize) {
-    result += pointer_delta(addr, region_addr);
-    DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
+    result += region_offset(addr);
     return result;
   }
 
-  // The new location of the object is
-  //    region destination +
-  //    size of the partial object extending onto the region +
-  //    sizes of the live objects in the Region that are to the left of addr
-  const size_t partial_obj_size = region_ptr->partial_obj_size();
-  HeapWord* const search_start = region_addr + partial_obj_size;
+  // Otherwise, the new location is region->destination + block offset + the
+  // number of live words in the Block that are (a) to the left of addr and (b)
+  // due to objects that start in the Block.
+
+  // Fill in the block table if necessary.  This is unsynchronized, so multiple
+  // threads may fill the block table for a region (harmless, since it is
+  // idempotent).
+  if (!region_ptr->blocks_filled()) {
+    PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
+    region_ptr->set_blocks_filled();
+  }
+
+  HeapWord* const search_start = block_align_down(addr);
+  const size_t block_offset = addr_to_block_ptr(addr)->offset();
 
   const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
-  size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
-
-  result += partial_obj_size + live_to_left;
-  DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
+  const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
+  result += block_offset + live;
+  DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
   return result;
 }
 
-#ifdef  ASSERT
+#ifdef ASSERT
 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 {
   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
@@ -762,16 +795,10 @@
 void ParallelCompactData::verify_clear()
 {
   verify_clear(_region_vspace);
+  verify_clear(_block_vspace);
 }
 #endif  // #ifdef ASSERT
 
-#ifdef NOT_PRODUCT
-ParallelCompactData::RegionData* debug_region(size_t region_index) {
-  ParallelCompactData& sd = PSParallelCompact::summary_data();
-  return sd.region(region_index);
-}
-#endif
-
 elapsedTimer        PSParallelCompact::_accumulated_time;
 unsigned int        PSParallelCompact::_total_invocations = 0;
 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
@@ -1961,11 +1988,6 @@
                                       maximum_heap_compaction);
 }
 
-bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
-  size_t addr_region_index = addr_to_region_idx(addr);
-  return region_index == addr_region_index;
-}
-
 // This method contains no policy. You should probably
 // be calling invoke() instead.
 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
@@ -2101,13 +2123,13 @@
         // Used for diagnostics
         size_policy->clear_generation_free_space_flags();
 
-        size_policy->compute_generation_free_space(young_live,
-                                                   eden_live,
-                                                   old_live,
-                                                   cur_eden,
-                                                   max_old_gen_size,
-                                                   max_eden_size,
-                                                   true /* full gc*/);
+        size_policy->compute_generations_free_space(young_live,
+                                                    eden_live,
+                                                    old_live,
+                                                    cur_eden,
+                                                    max_old_gen_size,
+                                                    max_eden_size,
+                                                    true /* full gc*/);
 
         size_policy->check_gc_overhead_limit(young_live,
                                              eden_live,
@@ -2338,6 +2360,7 @@
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
+    q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
 
@@ -2626,6 +2649,41 @@
   }
 }
 
+#ifdef ASSERT
+// Write a histogram of the number of times the block table was filled for a
+// region.
+void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
+{
+  if (!TraceParallelOldGCCompactionPhase) return;
+
+  typedef ParallelCompactData::RegionData rd_t;
+  ParallelCompactData& sd = summary_data();
+
+  for (unsigned int id = old_space_id; id < last_space_id; ++id) {
+    MutableSpace* const spc = _space_info[id].space();
+    if (spc->bottom() != spc->top()) {
+      const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
+      HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
+      const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
+
+      size_t histo[5] = { 0, 0, 0, 0, 0 };
+      const size_t histo_len = sizeof(histo) / sizeof(size_t);
+      const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
+
+      for (const rd_t* cur = beg; cur < end; ++cur) {
+        ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
+      }
+      out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
+      for (size_t i = 0; i < histo_len; ++i) {
+        out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
+                   histo[i], 100.0 * histo[i] / region_cnt);
+      }
+      out->cr();
+    }
+  }
+}
+#endif // #ifdef ASSERT
+
 void PSParallelCompact::compact() {
   // trace("5");
   TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
@@ -2665,6 +2723,8 @@
       update_deferred_objects(cm, SpaceId(id));
     }
   }
+
+  DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
 }
 
 #ifdef  ASSERT
@@ -3129,6 +3189,57 @@
   } while (true);
 }
 
+void PSParallelCompact::fill_blocks(size_t region_idx)
+{
+  // Fill in the block table elements for the specified region.  Each block
+  // table element holds the number of live words in the region that are to the
+  // left of the first object that starts in the block.  Thus only blocks in
+  // which an object starts need to be filled.
+  //
+  // The algorithm scans the section of the bitmap that corresponds to the
+  // region, keeping a running total of the live words.  When an object start is
+  // found, if it's the first to start in the block that contains it, the
+  // current total is written to the block table element.
+  const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
+  const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
+  const size_t RegionSize = ParallelCompactData::RegionSize;
+
+  ParallelCompactData& sd = summary_data();
+  const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
+  if (partial_obj_size >= RegionSize) {
+    return; // No objects start in this region.
+  }
+
+  // Ensure the first loop iteration decides that the block has changed.
+  size_t cur_block = sd.block_count();
+
+  const ParMarkBitMap* const bitmap = mark_bitmap();
+
+  const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
+  assert((size_t)1 << Log2BitsPerBlock ==
+         bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
+
+  size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
+  const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
+  size_t live_bits = bitmap->words_to_bits(partial_obj_size);
+  beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
+  while (beg_bit < range_end) {
+    const size_t new_block = beg_bit >> Log2BitsPerBlock;
+    if (new_block != cur_block) {
+      cur_block = new_block;
+      sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
+    }
+
+    const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
+    if (end_bit < range_end - 1) {
+      live_bits += end_bit - beg_bit + 1;
+      beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
+    } else {
+      return;
+    }
+  }
+}
+
 void
 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
   const MutableSpace* sp = space(space_id);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -220,6 +220,17 @@
   // Mask for the bits in a pointer to get the address of the start of a region.
   static const size_t RegionAddrMask;
 
+  static const size_t Log2BlockSize;
+  static const size_t BlockSize;
+  static const size_t BlockSizeBytes;
+
+  static const size_t BlockSizeOffsetMask;
+  static const size_t BlockAddrOffsetMask;
+  static const size_t BlockAddrMask;
+
+  static const size_t BlocksPerRegion;
+  static const size_t Log2BlocksPerRegion;
+
   class RegionData
   {
   public:
@@ -272,6 +283,12 @@
     inline uint destination_count() const;
     inline uint destination_count_raw() const;
 
+    // Whether the block table for this region has been filled.
+    inline bool blocks_filled() const;
+
+    // Number of times the block table was filled.
+    DEBUG_ONLY(inline size_t blocks_filled_count() const;)
+
     // The location of the java heap data that corresponds to this region.
     inline HeapWord* data_location() const;
 
@@ -296,6 +313,7 @@
     void set_partial_obj_size(size_t words)    {
       _partial_obj_size = (region_sz_t) words;
     }
+    inline void set_blocks_filled();
 
     inline void set_destination_count(uint count);
     inline void set_live_obj_size(size_t words);
@@ -328,7 +346,11 @@
     HeapWord*            _partial_obj_addr;
     region_sz_t          _partial_obj_size;
     region_sz_t volatile _dc_and_los;
+    bool                 _blocks_filled;
+
 #ifdef ASSERT
+    size_t               _blocks_filled_count;   // Number of block table fills.
+
     // These enable optimizations that are only partially implemented.  Use
     // debug builds to prevent the code fragments from breaking.
     HeapWord*            _data_location;
@@ -337,11 +359,26 @@
 
 #ifdef ASSERT
    public:
-    uint            _pushed;   // 0 until region is pushed onto a worker's stack
+    uint                 _pushed;   // 0 until region is pushed onto a stack
    private:
 #endif
   };
 
+  // "Blocks" allow shorter sections of the bitmap to be searched.  Each Block
+  // holds an offset, which is the amount of live data in the Region to the left
+  // of the first live object that starts in the Block.
+  class BlockData
+  {
+  public:
+    typedef unsigned short int blk_ofs_t;
+
+    blk_ofs_t offset() const    { return _offset; }
+    void set_offset(size_t val) { _offset = (blk_ofs_t)val; }
+
+  private:
+    blk_ofs_t _offset;
+  };
+
 public:
   ParallelCompactData();
   bool initialize(MemRegion covered_region);
@@ -353,8 +390,9 @@
   inline RegionData* region(size_t region_idx) const;
   inline size_t     region(const RegionData* const region_ptr) const;
 
-  // Returns true if the given address is contained within the region
-  bool region_contains(size_t region_index, HeapWord* addr);
+  size_t block_count() const { return _block_count; }
+  inline BlockData* block(size_t block_idx) const;
+  inline size_t     block(const BlockData* block_ptr) const;
 
   void add_obj(HeapWord* addr, size_t len);
   void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
@@ -394,11 +432,24 @@
   inline HeapWord*  region_align_up(HeapWord* addr) const;
   inline bool       is_region_aligned(HeapWord* addr) const;
 
+  // Analogous to region_offset() for blocks.
+  size_t     block_offset(const HeapWord* addr) const;
+  size_t     addr_to_block_idx(const HeapWord* addr) const;
+  size_t     addr_to_block_idx(const oop obj) const {
+    return addr_to_block_idx((HeapWord*) obj);
+  }
+  inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
+  inline HeapWord*  block_to_addr(size_t block) const;
+  inline size_t     region_to_block_idx(size_t region) const;
+
+  inline HeapWord*  block_align_down(HeapWord* addr) const;
+  inline HeapWord*  block_align_up(HeapWord* addr) const;
+  inline bool       is_block_aligned(HeapWord* addr) const;
+
   // Return the address one past the end of the partial object.
   HeapWord* partial_obj_end(size_t region_idx) const;
 
-  // Return the new location of the object p after the
-  // the compaction.
+  // Return the location of the object after compaction.
   HeapWord* calc_new_pointer(HeapWord* addr);
 
   HeapWord* calc_new_pointer(oop p) {
@@ -411,6 +462,7 @@
 #endif  // #ifdef ASSERT
 
 private:
+  bool initialize_block_data();
   bool initialize_region_data(size_t region_size);
   PSVirtualSpace* create_vspace(size_t count, size_t element_size);
 
@@ -424,6 +476,10 @@
   size_t          _reserved_byte_size;
   RegionData*     _region_data;
   size_t          _region_count;
+
+  PSVirtualSpace* _block_vspace;
+  BlockData*      _block_data;
+  size_t          _block_count;
 };
 
 inline uint
@@ -438,6 +494,28 @@
   return destination_count_raw() >> dc_shift;
 }
 
+inline bool
+ParallelCompactData::RegionData::blocks_filled() const
+{
+  return _blocks_filled;
+}
+
+#ifdef ASSERT
+inline size_t
+ParallelCompactData::RegionData::blocks_filled_count() const
+{
+  return _blocks_filled_count;
+}
+#endif // #ifdef ASSERT
+
+inline void
+ParallelCompactData::RegionData::set_blocks_filled()
+{
+  _blocks_filled = true;
+  // Debug builds count the number of times the table was filled.
+  DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
+}
+
 inline void
 ParallelCompactData::RegionData::set_destination_count(uint count)
 {
@@ -532,6 +610,12 @@
   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
 }
 
+inline ParallelCompactData::BlockData*
+ParallelCompactData::block(size_t n) const {
+  assert(n < block_count(), "bad arg");
+  return _block_data + n;
+}
+
 inline size_t
 ParallelCompactData::region_offset(const HeapWord* addr) const
 {
@@ -598,6 +682,63 @@
   return region_offset(addr) == 0;
 }
 
+inline size_t
+ParallelCompactData::block_offset(const HeapWord* addr) const
+{
+  assert(addr >= _region_start, "bad addr");
+  assert(addr <= _region_end, "bad addr");
+  return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize;
+}
+
+inline size_t
+ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
+{
+  assert(addr >= _region_start, "bad addr");
+  assert(addr <= _region_end, "bad addr");
+  return pointer_delta(addr, _region_start) >> Log2BlockSize;
+}
+
+inline ParallelCompactData::BlockData*
+ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
+{
+  return block(addr_to_block_idx(addr));
+}
+
+inline HeapWord*
+ParallelCompactData::block_to_addr(size_t block) const
+{
+  assert(block < _block_count, "block out of range");
+  return _region_start + (block << Log2BlockSize);
+}
+
+inline size_t
+ParallelCompactData::region_to_block_idx(size_t region) const
+{
+  return region << Log2BlocksPerRegion;
+}
+
+inline HeapWord*
+ParallelCompactData::block_align_down(HeapWord* addr) const
+{
+  assert(addr >= _region_start, "bad addr");
+  assert(addr < _region_end + RegionSize, "bad addr");
+  return (HeapWord*)(size_t(addr) & BlockAddrMask);
+}
+
+inline HeapWord*
+ParallelCompactData::block_align_up(HeapWord* addr) const
+{
+  assert(addr >= _region_start, "bad addr");
+  assert(addr <= _region_end, "bad addr");
+  return block_align_down(addr + BlockSizeOffsetMask);
+}
+
+inline bool
+ParallelCompactData::is_block_aligned(HeapWord* addr) const
+{
+  return block_offset(addr) == 0;
+}
+
 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
 // do_addr() method.
 //
@@ -775,6 +916,7 @@
   // Convenient access to type names.
   typedef ParMarkBitMap::idx_t idx_t;
   typedef ParallelCompactData::RegionData RegionData;
+  typedef ParallelCompactData::BlockData BlockData;
 
   typedef enum {
     old_space_id, eden_space_id,
@@ -962,6 +1104,8 @@
   // Adjust addresses in roots.  Does not adjust addresses in heap.
   static void adjust_roots();
 
+  DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);)
+
   // Move objects to new locations.
   static void compact_perm(ParCompactionManager* cm);
   static void compact();
@@ -1128,6 +1272,9 @@
     fill_region(cm, region);
   }
 
+  // Fill in the block table for the specified region.
+  static void fill_blocks(size_t region_idx);
+
   // Update the deferred objects in the space.
   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -42,7 +42,7 @@
     if (o->is_forwarded()) {
       o = o->forwardee();
       // Card mark
-      if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
+      if (PSScavenge::is_obj_in_young(o)) {
         PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
       }
       oopDesc::encode_store_heap_oop_not_null(p, o);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -61,6 +61,7 @@
 bool                       PSScavenge::_survivor_overflow = false;
 uint                       PSScavenge::_tenuring_threshold = 0;
 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
+uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
 elapsedTimer               PSScavenge::_accumulated_time;
 Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
 Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
@@ -71,7 +72,7 @@
 class PSIsAliveClosure: public BoolObjectClosure {
 public:
   bool do_object_b(oop p) {
-    return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
+    return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
   }
 };
 
@@ -408,6 +409,7 @@
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
+      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
 
@@ -449,11 +451,9 @@
       reference_processor()->enqueue_discovered_references(NULL);
     }
 
-      // Unlink any dead interned Strings
-      StringTable::unlink(&_is_alive_closure);
-      // Process the remaining live ones
-      PSScavengeRootsClosure root_closure(promotion_manager);
-      StringTable::oops_do(&root_closure);
+    // Unlink any dead interned Strings and process the remaining live ones.
+    PSScavengeRootsClosure root_closure(promotion_manager);
+    StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
 
     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
     PSPromotionManager::post_scavenge();
@@ -816,7 +816,7 @@
   // Set boundary between young_gen and old_gen
   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
          "old above young");
-  _young_generation_boundary = young_gen->eden_space()->bottom();
+  set_young_generation_boundary(young_gen->eden_space()->bottom());
 
   // Initialize ref handling object for scavenging.
   MemRegion mr = young_gen->reserved();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -62,19 +62,22 @@
 
  protected:
   // Flags/counters
-  static ReferenceProcessor* _ref_processor;        // Reference processor for scavenging.
-  static PSIsAliveClosure    _is_alive_closure;     // Closure used for reference processing
-  static CardTableExtension* _card_table;           // We cache the card table for fast access.
-  static bool                _survivor_overflow;    // Overflow this collection
-  static uint                _tenuring_threshold;   // tenuring threshold for next scavenge
-  static elapsedTimer        _accumulated_time;     // total time spent on scavenge
-  static HeapWord*           _young_generation_boundary; // The lowest address possible for the young_gen.
-                                                         // This is used to decide if an oop should be scavenged,
-                                                         // cards should be marked, etc.
+  static ReferenceProcessor*  _ref_processor;        // Reference processor for scavenging.
+  static PSIsAliveClosure     _is_alive_closure;     // Closure used for reference processing
+  static CardTableExtension*  _card_table;           // We cache the card table for fast access.
+  static bool                 _survivor_overflow;    // Overflow this collection
+  static uint                 _tenuring_threshold;   // tenuring threshold for next scavenge
+  static elapsedTimer         _accumulated_time;     // total time spent on scavenge
+  // The lowest address possible for the young_gen.
+  // This is used to decide if an oop should be scavenged,
+  // cards should be marked, etc.
+  static HeapWord*            _young_generation_boundary;
+  // Used to optimize compressed oops young gen boundary checking.
+  static uintptr_t            _young_generation_boundary_compressed;
   static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
   static Stack<oop, mtGC>     _preserved_oop_stack;  // List of oops that need their mark restored.
-  static CollectorCounters*      _counters;          // collector performance counters
-  static bool                    _promotion_failed;
+  static CollectorCounters*   _counters;             // collector performance counters
+  static bool                 _promotion_failed;
 
   static void clean_up_failed_promotion();
 
@@ -112,6 +115,9 @@
   // boundary moves, _young_generation_boundary must be reset
   static void set_young_generation_boundary(HeapWord* v) {
     _young_generation_boundary = v;
+    if (UseCompressedOops) {
+      _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
+    }
   }
 
   // Called by parallelScavengeHeap to init the tenuring threshold
@@ -140,11 +146,19 @@
   static void copy_and_push_safe_barrier_from_klass(PSPromotionManager* pm, oop* p);
 
   // Is an object in the young generation
-  // This assumes that the HeapWord argument is in the heap,
+  // This assumes that the 'o' is in the heap,
   // so it only checks one side of the complete predicate.
+
+  inline static bool is_obj_in_young(oop o) {
+    return (HeapWord*)o >= _young_generation_boundary;
+  }
+
+  inline static bool is_obj_in_young(narrowOop o) {
+    return (uintptr_t)o >= _young_generation_boundary_compressed;
+  }
+
   inline static bool is_obj_in_young(HeapWord* o) {
-    const bool result = (o >= _young_generation_boundary);
-    return result;
+    return o >= _young_generation_boundary;
   }
 };
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -39,9 +39,7 @@
 
 template <class T> inline bool PSScavenge::should_scavenge(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
-  if (oopDesc::is_null(heap_oop)) return false;
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-  return PSScavenge::is_obj_in_young((HeapWord*)obj);
+  return PSScavenge::is_obj_in_young(heap_oop);
 }
 
 template <class T>
@@ -94,7 +92,7 @@
   // or from metadata.
   if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
       Universe::heap()->is_in_reserved(p)) {
-    if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
+    if (PSScavenge::is_obj_in_young(new_obj)) {
       card_table()->inline_write_ref_field_gc(p, new_obj);
     }
   }
@@ -147,7 +145,7 @@
       }
       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 
-      if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
+      if (PSScavenge::is_obj_in_young(new_obj)) {
         do_klass_barrier();
       }
     }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -79,14 +79,15 @@
       break;
 
     case system_dictionary:
-      {
       SystemDictionary::oops_do(&roots_closure);
+      break;
 
-        // Move this to another root_type?
-        PSScavengeKlassClosure klass_closure(pm);
-        ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
-      }
-      break;
+    case class_loader_data:
+    {
+      PSScavengeKlassClosure klass_closure(pm);
+      ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
+    }
+    break;
 
     case management:
       Management::oops_do(&roots_closure);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -59,9 +59,10 @@
     object_synchronizer   = 4,
     flat_profiler         = 5,
     system_dictionary     = 6,
-    management            = 7,
-    jvmti                 = 8,
-    code_cache            = 9
+    class_loader_data     = 7,
+    management            = 8,
+    jvmti                 = 9,
+    code_cache            = 10
   };
  private:
   RootType _root_type;
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -467,7 +467,7 @@
       (free_in_old_gen < (size_t) mem_free_old_limit &&
        free_in_eden < (size_t) mem_free_eden_limit))) {
     gclog_or_tty->print_cr(
-          "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
+          "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
           " promo_limit: " SIZE_FORMAT
           " max_eden_size: " SIZE_FORMAT
           " total_free_limit: " SIZE_FORMAT
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -158,7 +158,7 @@
   // Fills in the unallocated portion of the buffer with a garbage object.
   // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
   // is true, attempt to re-use the unused portion in the next GC.
-  void retire(bool end_of_gc, bool retain);
+  virtual void retire(bool end_of_gc, bool retain);
 
   void print() PRODUCT_RETURN;
 };
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -468,7 +468,25 @@
 
 #ifdef ASSERT
   if (istate->_msg != initialize) {
-    assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
+    // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
+    // because in that case, EnableInvokeDynamic is true by default but will be later switched off
+    // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
+    // for the old JSR292 implementation.
+    // This leads to a situation where 'istate->_stack_limit' always accounts for
+    // methodOopDesc::extra_stack_entries() because it is computed in
+    // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
+    // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
+    // account for extra_stack_entries() anymore because at the time when it is called
+    // EnableInvokeDynamic was already set to false.
+    // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
+    // switched off because of the wrong classes.
+    if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
+      assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
+    } else {
+      const int extra_stack_entries = Method::extra_stack_entries_for_indy;
+      assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
+                                                                                               + 1), "bad stack limit");
+    }
 #ifndef SHARK
     IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
 #endif // !SHARK
--- a/hotspot/src/share/vm/memory/allocation.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -60,10 +60,11 @@
 void  _ValueObj::operator delete [](void* p)    { ShouldNotCallThis(); }
 
 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
-                                size_t word_size, bool read_only, TRAPS) {
+                                 size_t word_size, bool read_only,
+                                 MetaspaceObj::Type type, TRAPS) {
   // Klass has it's own operator new
   return Metaspace::allocate(loader_data, word_size, read_only,
-                             Metaspace::NonClassType, CHECK_NULL);
+                             type, CHECK_NULL);
 }
 
 bool MetaspaceObj::is_shared() const {
--- a/hotspot/src/share/vm/memory/allocation.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -268,8 +268,55 @@
   bool is_shared() const;
   void print_address_on(outputStream* st) const;  // nonvirtual address printing
 
+#define METASPACE_OBJ_TYPES_DO(f) \
+  f(Unknown) \
+  f(Class) \
+  f(Symbol) \
+  f(TypeArrayU1) \
+  f(TypeArrayU2) \
+  f(TypeArrayU4) \
+  f(TypeArrayU8) \
+  f(TypeArrayOther) \
+  f(Method) \
+  f(ConstMethod) \
+  f(MethodData) \
+  f(ConstantPool) \
+  f(ConstantPoolCache) \
+  f(Annotation) \
+  f(MethodCounters)
+
+#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
+#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
+
+  enum Type {
+    // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
+    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
+    _number_of_types
+  };
+
+  static const char * type_name(Type type) {
+    switch(type) {
+    METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
+    default:
+      ShouldNotReachHere();
+      return NULL;
+    }
+  }
+
+  static MetaspaceObj::Type array_type(size_t elem_size) {
+    switch (elem_size) {
+    case 1: return TypeArrayU1Type;
+    case 2: return TypeArrayU2Type;
+    case 4: return TypeArrayU4Type;
+    case 8: return TypeArrayU8Type;
+    default:
+      return TypeArrayOtherType;
+    }
+  }
+
   void* operator new(size_t size, ClassLoaderData* loader_data,
-                     size_t word_size, bool read_only, Thread* thread);
+                     size_t word_size, bool read_only,
+                     Type type, Thread* thread);
                      // can't use TRAPS from this header file.
   void operator delete(void* p) { ShouldNotCallThis(); }
 };
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -713,6 +713,23 @@
 #ifdef ASSERT
   void verify_allocated_blocks_words();
 #endif
+
+  size_t get_raw_word_size(size_t word_size) {
+    // If only the dictionary is going to be used (i.e., no
+    // indexed free list), then there is a minimum size requirement.
+    // MinChunkSize is a placeholder for the real minimum size JJJ
+    size_t byte_size = word_size * BytesPerWord;
+
+    size_t byte_size_with_overhead = byte_size + Metablock::overhead();
+
+    size_t raw_bytes_size = MAX2(byte_size_with_overhead,
+                                 Metablock::min_block_byte_size());
+    raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
+    size_t raw_word_size = raw_bytes_size / BytesPerWord;
+    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
+
+    return raw_word_size;
+  }
 };
 
 uint const SpaceManager::_small_chunk_limit = 4;
@@ -2320,19 +2337,7 @@
 MetaWord* SpaceManager::allocate(size_t word_size) {
   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 
-  // If only the dictionary is going to be used (i.e., no
-  // indexed free list), then there is a minimum size requirement.
-  // MinChunkSize is a placeholder for the real minimum size JJJ
-  size_t byte_size = word_size * BytesPerWord;
-
-  size_t byte_size_with_overhead = byte_size + Metablock::overhead();
-
-  size_t raw_bytes_size = MAX2(byte_size_with_overhead,
-                               Metablock::min_block_byte_size());
-  raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
-  size_t raw_word_size = raw_bytes_size / BytesPerWord;
-  assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
-
+  size_t raw_word_size = get_raw_word_size(word_size);
   BlockFreelist* fl =  block_freelists();
   MetaWord* p = NULL;
   // Allocation from the dictionary is expensive in the sense that
@@ -2896,6 +2901,9 @@
   if (class_chunk != NULL) {
     class_vsm()->add_chunk(class_chunk, true);
   }
+
+  _alloc_record_head = NULL;
+  _alloc_record_tail = NULL;
 }
 
 size_t Metaspace::align_word_size_up(size_t word_size) {
@@ -3000,12 +3008,14 @@
 }
 
 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
-                              bool read_only, MetadataType mdtype, TRAPS) {
+                              bool read_only, MetaspaceObj::Type type, TRAPS) {
   if (HAS_PENDING_EXCEPTION) {
     assert(false, "Should not allocate with exception pending");
     return NULL;  // caller does a CHECK_NULL too
   }
 
+  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
+
   // SSS: Should we align the allocations and make sure the sizes are aligned.
   MetaWord* result = NULL;
 
@@ -3015,13 +3025,13 @@
   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
   // to revisit this for application class data sharing.
   if (DumpSharedSpaces) {
-    if (read_only) {
-      result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
-    } else {
-      result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
-    }
+    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
+    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
+    result = space->allocate(word_size, NonClassType);
     if (result == NULL) {
       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
+    } else {
+      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
     }
     return Metablock::initialize(result, word_size);
   }
@@ -3056,6 +3066,38 @@
   return Metablock::initialize(result, word_size);
 }
 
+void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
+  assert(DumpSharedSpaces, "sanity");
+
+  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
+  if (_alloc_record_head == NULL) {
+    _alloc_record_head = _alloc_record_tail = rec;
+  } else {
+    _alloc_record_tail->_next = rec;
+    _alloc_record_tail = rec;
+  }
+}
+
+void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
+  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
+
+  address last_addr = (address)bottom();
+
+  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
+    address ptr = rec->_ptr;
+    if (last_addr < ptr) {
+      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
+    }
+    closure->doit(ptr, rec->_type, rec->_byte_size);
+    last_addr = ptr + rec->_byte_size;
+  }
+
+  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
+  if (last_addr < top) {
+    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
+  }
+}
+
 void Metaspace::purge() {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -127,6 +127,23 @@
   static VirtualSpaceList* space_list()       { return _space_list; }
   static VirtualSpaceList* class_space_list() { return _class_space_list; }
 
+  // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
+  // maintain a single list for now.
+  void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
+
+  class AllocRecord : public CHeapObj<mtClass> {
+  public:
+    AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
+      : _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {}
+    AllocRecord *_next;
+    address _ptr;
+    MetaspaceObj::Type _type;
+    int _byte_size;
+  };
+
+  AllocRecord * _alloc_record_head;
+  AllocRecord * _alloc_record_tail;
+
  public:
 
   Metaspace(Mutex* lock, MetaspaceType type);
@@ -148,8 +165,8 @@
   size_t used_bytes_slow(MetadataType mdtype) const;
   size_t capacity_bytes_slow(MetadataType mdtype) const;
 
-  static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
-                            bool read_only, MetadataType mdtype, TRAPS);
+  static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
+                             bool read_only, MetaspaceObj::Type type, TRAPS);
   void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
 
   MetaWord* expand_and_allocate(size_t size,
@@ -166,6 +183,13 @@
   void print_on(outputStream* st) const;
   // Debugging support
   void verify();
+
+  class AllocRecordClosure :  public StackObj {
+  public:
+    virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;
+  };
+
+  void iterate(AllocRecordClosure *closure);
 };
 
 class MetaspaceAux : AllStatic {
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -243,6 +243,147 @@
   bool reading() const { return false; }
 };
 
+// This is for dumping detailed statistics for the allocations
+// in the shared spaces.
+class DumpAllocClosure : public Metaspace::AllocRecordClosure {
+public:
+
+  // Here's poor man's enum inheritance
+#define SHAREDSPACE_OBJ_TYPES_DO(f) \
+  METASPACE_OBJ_TYPES_DO(f) \
+  f(SymbolHashentry) \
+  f(SymbolBuckets) \
+  f(Other)
+
+#define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type,
+#define SHAREDSPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
+
+  enum Type {
+    // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
+    SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_DECLARE)
+    _number_of_types
+  };
+
+  static const char * type_name(Type type) {
+    switch(type) {
+    SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_NAME_CASE)
+    default:
+      ShouldNotReachHere();
+      return NULL;
+    }
+  }
+
+public:
+  enum {
+    RO = 0,
+    RW = 1
+  };
+
+  int _counts[2][_number_of_types];
+  int _bytes [2][_number_of_types];
+  int _which;
+
+  DumpAllocClosure() {
+    memset(_counts, 0, sizeof(_counts));
+    memset(_bytes,  0, sizeof(_bytes));
+  };
+
+  void iterate_metaspace(Metaspace* space, int which) {
+    assert(which == RO || which == RW, "sanity");
+    _which = which;
+    space->iterate(this);
+  }
+
+  virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) {
+    assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
+    _counts[_which][type] ++;
+    _bytes [_which][type] += byte_size;
+  }
+
+  void dump_stats(int ro_all, int rw_all, int md_all, int mc_all);
+};
+
+void DumpAllocClosure::dump_stats(int ro_all, int rw_all, int md_all, int mc_all) {
+  rw_all += (md_all + mc_all); // md and mc are all mapped Read/Write
+  int other_bytes = md_all + mc_all;
+
+  // Calculate size of data that was not allocated by Metaspace::allocate()
+  int symbol_count = _counts[RO][MetaspaceObj::SymbolType];
+  int symhash_bytes = symbol_count * sizeof (HashtableEntry<Symbol*, mtSymbol>);
+  int symbuck_count = SymbolTable::the_table()->table_size();
+  int symbuck_bytes = symbuck_count * sizeof(HashtableBucket<mtSymbol>);
+
+  _counts[RW][SymbolHashentryType] = symbol_count;
+  _bytes [RW][SymbolHashentryType] = symhash_bytes;
+  other_bytes -= symhash_bytes;
+
+  _counts[RW][SymbolBucketsType] = symbuck_count;
+  _bytes [RW][SymbolBucketsType] = symbuck_bytes;
+  other_bytes -= symbuck_bytes;
+
+  // TODO: count things like dictionary, vtable, etc
+  _bytes[RW][OtherType] =  other_bytes;
+
+  // prevent divide-by-zero
+  if (ro_all < 1) {
+    ro_all = 1;
+  }
+  if (rw_all < 1) {
+    rw_all = 1;
+  }
+
+  int all_ro_count = 0;
+  int all_ro_bytes = 0;
+  int all_rw_count = 0;
+  int all_rw_bytes = 0;
+
+  const char *fmt = "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f";
+  const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
+  const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %";
+
+  tty->print_cr("Detailed metadata info (rw includes md and mc):");
+  tty->print_cr(hdr);
+  tty->print_cr(sep);
+  for (int type = 0; type < int(_number_of_types); type ++) {
+    const char *name = type_name((Type)type);
+    int ro_count = _counts[RO][type];
+    int ro_bytes = _bytes [RO][type];
+    int rw_count = _counts[RW][type];
+    int rw_bytes = _bytes [RW][type];
+    int count = ro_count + rw_count;
+    int bytes = ro_bytes + rw_bytes;
+
+    double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
+    double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
+    double perc    = 100.0 * double(bytes)    / double(ro_all + rw_all);
+
+    tty->print_cr(fmt, name,
+                  ro_count, ro_bytes, ro_perc,
+                  rw_count, rw_bytes, rw_perc,
+                  count, bytes, perc);
+
+    all_ro_count += ro_count;
+    all_ro_bytes += ro_bytes;
+    all_rw_count += rw_count;
+    all_rw_bytes += rw_bytes;
+  }
+
+  int all_count = all_ro_count + all_rw_count;
+  int all_bytes = all_ro_bytes + all_rw_bytes;
+
+  double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
+  double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
+  double all_perc    = 100.0 * double(all_bytes)    / double(ro_all + rw_all);
+
+  tty->print_cr(sep);
+  tty->print_cr(fmt, "Total",
+                all_ro_count, all_ro_bytes, all_ro_perc,
+                all_rw_count, all_rw_bytes, all_rw_perc,
+                all_count, all_bytes, all_perc);
+
+  assert(all_ro_bytes == ro_all, "everything should have been counted");
+  assert(all_rw_bytes == rw_all, "everything should have been counted");
+}
 
 // Populate the shared space.
 
@@ -454,6 +595,14 @@
   mapinfo->close();
 
   memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
+
+  if (PrintSharedSpaces) {
+    DumpAllocClosure dac;
+    dac.iterate_metaspace(_loader_data->ro_metaspace(), DumpAllocClosure::RO);
+    dac.iterate_metaspace(_loader_data->rw_metaspace(), DumpAllocClosure::RW);
+
+    dac.dump_stats(int(ro_bytes), int(rw_bytes), int(md_bytes), int(mc_bytes));
+  }
 }
 
 static void link_shared_classes(Klass* obj, TRAPS) {
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -45,6 +45,7 @@
   SH_PS_FlatProfiler_oops_do,
   SH_PS_Management_oops_do,
   SH_PS_SystemDictionary_oops_do,
+  SH_PS_ClassLoaderDataGraph_oops_do,
   SH_PS_jvmti_oops_do,
   SH_PS_StringTable_oops_do,
   SH_PS_CodeCache_oops_do,
@@ -173,15 +174,21 @@
   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
     if (so & SO_AllClasses) {
       SystemDictionary::oops_do(roots);
-      ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
     } else if (so & SO_SystemClasses) {
       SystemDictionary::always_strong_oops_do(roots);
-      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
     } else {
       fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
     }
   }
 
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
+    if (so & SO_AllClasses) {
+      ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
+    } else if (so & SO_SystemClasses) {
+      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
+    }
+  }
+
   if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
     if (so & SO_Strings) {
       StringTable::oops_do(roots);
--- a/hotspot/src/share/vm/memory/universe.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -228,11 +228,8 @@
 
 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
   if (size < alignment || size % alignment != 0) {
-    ResourceMark rm;
-    stringStream st;
-    st.print("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment);
-    char* error = st.as_string();
-    vm_exit_during_initialization(error);
+    vm_exit_during_initialization(
+      err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
   }
 }
 
@@ -916,7 +913,7 @@
   }
 
   if (!total_rs.is_reserved()) {
-    vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
+    vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
     return total_rs;
   }
 
--- a/hotspot/src/share/vm/oops/annotations.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/annotations.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -33,7 +33,7 @@
 
 // Allocate annotations in metadata area
 Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
-  return new (loader_data, size(), true, THREAD) Annotations();
+  return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations();
 }
 
 // helper
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -94,7 +94,7 @@
   ResourceMark rm(THREAD);
   k->initialize_supers(super_klass(), CHECK);
   k->vtable()->initialize_vtable(false, CHECK);
-  java_lang_Class::create_mirror(k, CHECK);
+  java_lang_Class::create_mirror(k, Handle(NULL), CHECK);
 }
 
 GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots) {
--- a/hotspot/src/share/vm/oops/constMethod.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/constMethod.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -40,7 +40,7 @@
                                    MethodType method_type,
                                    TRAPS) {
   int size = ConstMethod::size(byte_code_size, sizes);
-  return new (loader_data, size, true, THREAD) ConstMethod(
+  return new (loader_data, size, true, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
       byte_code_size, sizes, method_type, size);
 }
 
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -55,7 +55,7 @@
   // the resolved_references array, which is recreated at startup time.
   // But that could be moved to InstanceKlass (although a pain to access from
   // assembly code).  Maybe it could be moved to the cpCache which is RW.
-  return new (loader_data, size, false, THREAD) ConstantPool(tags);
+  return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
 }
 
 ConstantPool::ConstantPool(Array<u1>* tags) {
@@ -1063,9 +1063,10 @@
     int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
     int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
     int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
-    bool match = compare_entry_to(k1, cp2, k2, CHECK_false) &&
-                 compare_operand_to(i1, cp2, i2, CHECK_false);
-    return match;
+    // separate statements and variables because CHECK_false is used
+    bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false);
+    bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false);
+    return (match_entry && match_operand);
   } break;
 
   case JVM_CONSTANT_String:
--- a/hotspot/src/share/vm/oops/cpCache.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/cpCache.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -542,7 +542,8 @@
                                      const intStack& invokedynamic_map, TRAPS) {
   int size = ConstantPoolCache::size(length);
 
-  return new (loader_data, size, false, THREAD) ConstantPoolCache(length, index_map, invokedynamic_map);
+  return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
+    ConstantPoolCache(length, index_map, invokedynamic_map);
 }
 
 void ConstantPoolCache::initialize(const intArray& inverse_index_map,
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -268,8 +268,6 @@
   set_fields(NULL, 0);
   set_constants(NULL);
   set_class_loader_data(NULL);
-  set_protection_domain(NULL);
-  set_signers(NULL);
   set_source_file_name(NULL);
   set_source_debug_extension(NULL, 0);
   set_array_name(NULL);
@@ -279,7 +277,6 @@
   set_is_marked_dependent(false);
   set_init_state(InstanceKlass::allocated);
   set_init_thread(NULL);
-  set_init_lock(NULL);
   set_reference_type(rt);
   set_oop_map_cache(NULL);
   set_jni_ids(NULL);
@@ -408,12 +405,6 @@
   }
   set_inner_classes(NULL);
 
-  // Null out Java heap objects, although these won't be walked to keep
-  // alive once this InstanceKlass is deallocated.
-  set_protection_domain(NULL);
-  set_signers(NULL);
-  set_init_lock(NULL);
-
   // We should deallocate the Annotations instance
   MetadataFactory::free_metadata(loader_data, annotations());
   set_annotations(NULL);
@@ -451,6 +442,24 @@
   }
 }
 
+// JVMTI spec thinks there are signers and protection domain in the
+// instanceKlass.  These accessors pretend these fields are there.
+// The hprof specification also thinks these fields are in InstanceKlass.
+oop InstanceKlass::protection_domain() const {
+  // return the protection_domain from the mirror
+  return java_lang_Class::protection_domain(java_mirror());
+}
+
+// To remove these from requires an incompatible change and CCC request.
+objArrayOop InstanceKlass::signers() const {
+  // return the signers from the mirror
+  return java_lang_Class::signers(java_mirror());
+}
+
+volatile oop InstanceKlass::init_lock() const {
+  // return the init lock from the mirror
+  return java_lang_Class::init_lock(java_mirror());
+}
 
 void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
   EXCEPTION_MARK;
@@ -1883,16 +1892,6 @@
 
 // Garbage collection
 
-void InstanceKlass::oops_do(OopClosure* cl) {
-  Klass::oops_do(cl);
-
-  cl->do_oop(adr_protection_domain());
-  cl->do_oop(adr_signers());
-  cl->do_oop(adr_init_lock());
-
-  // Don't walk the arrays since they are walked from the ClassLoaderData objects.
-}
-
 #ifdef ASSERT
 template <class T> void assert_is_in(T *p) {
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -2241,9 +2240,6 @@
     m->remove_unshareable_info();
   }
 
-  // Need to reinstate when reading back the class.
-  set_init_lock(NULL);
-
   // do array classes also.
   array_klasses_do(remove_unshareable_in_class);
 }
@@ -2275,13 +2271,6 @@
     ik->itable()->initialize_itable(false, CHECK);
   }
 
-  // Allocate a simple java object for a lock.
-  // This needs to be a java object because during class initialization
-  // it can be held across a java call.
-  typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
-  Handle h(THREAD, (oop)r);
-  ik->set_init_lock(h());
-
   // restore constant pool resolved references
   ik->constants()->restore_unshareable_info(CHECK);
 
@@ -2331,10 +2320,15 @@
     FreeHeap(jmeths);
   }
 
-  MemberNameTable* mnt = member_names();
-  if (mnt != NULL) {
-    delete mnt;
-    set_member_names(NULL);
+  // Deallocate MemberNameTable
+  {
+    Mutex* lock_or_null = SafepointSynchronize::is_at_safepoint() ? NULL : MemberNameTable_lock;
+    MutexLockerEx ml(lock_or_null, Mutex::_no_safepoint_check_flag);
+    MemberNameTable* mnt = member_names();
+    if (mnt != NULL) {
+      delete mnt;
+      set_member_names(NULL);
+    }
   }
 
   int* indices = methods_cached_itable_indices_acquire();
@@ -2765,15 +2759,28 @@
   return NULL;
 }
 
-void InstanceKlass::add_member_name(Handle mem_name) {
+void InstanceKlass::add_member_name(int index, Handle mem_name) {
   jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
   MutexLocker ml(MemberNameTable_lock);
+  assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
   DEBUG_ONLY(No_Safepoint_Verifier nsv);
 
   if (_member_names == NULL) {
-    _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable();
+    _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
   }
-  _member_names->add_member_name(mem_name_wref);
+  _member_names->add_member_name(index, mem_name_wref);
+}
+
+oop InstanceKlass::get_member_name(int index) {
+  MutexLocker ml(MemberNameTable_lock);
+  assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
+  DEBUG_ONLY(No_Safepoint_Verifier nsv);
+
+  if (_member_names == NULL) {
+    return NULL;
+  }
+  oop mem_name =_member_names->get_member_name(index);
+  return mem_name;
 }
 
 // -----------------------------------------------------------------------------------------------------
@@ -2836,10 +2843,7 @@
     class_loader_data()->print_value_on(st);
     st->cr();
   }
-  st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr();
   st->print(BULLET"host class:        "); host_klass()->print_value_on_maybe_null(st); st->cr();
-  st->print(BULLET"signers:           "); signers()->print_value_on(st);               st->cr();
-  st->print(BULLET"init_lock:         "); ((oop)_init_lock)->print_value_on(st);       st->cr();
   if (source_file_name() != NULL) {
     st->print(BULLET"source file:       ");
     source_file_name()->print_value_on(st);
@@ -3040,7 +3044,6 @@
   n += (sz->_method_ordering_bytes       = sz->count_array(method_ordering()));
   n += (sz->_local_interfaces_bytes      = sz->count_array(local_interfaces()));
   n += (sz->_transitive_interfaces_bytes = sz->count_array(transitive_interfaces()));
-  n += (sz->_signers_bytes               = sz->count_array(signers()));
   n += (sz->_fields_bytes                = sz->count_array(fields()));
   n += (sz->_inner_classes_bytes         = sz->count_array(inner_classes()));
   sz->_ro_bytes += n;
@@ -3206,17 +3209,11 @@
     guarantee(constants()->is_metadata(), "should be in metaspace");
     guarantee(constants()->is_constantPool(), "should be constant pool");
   }
-  if (protection_domain() != NULL) {
-    guarantee(protection_domain()->is_oop(), "should be oop");
-  }
   const Klass* host = host_klass();
   if (host != NULL) {
     guarantee(host->is_metadata(), "should be in metaspace");
     guarantee(host->is_klass(), "should be klass");
   }
-  if (signers() != NULL) {
-    guarantee(signers()->is_objArray(), "should be obj array");
-  }
 }
 
 void InstanceKlass::oop_verify_on(oop obj, outputStream* st) {
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -58,8 +58,6 @@
 //    [fields                     ]
 //    [constants                  ]
 //    [class loader               ]
-//    [protection domain          ]
-//    [signers                    ]
 //    [source file name           ]
 //    [inner classes              ]
 //    [static field size          ]
@@ -180,16 +178,6 @@
   static volatile int _total_instanceKlass_count;
 
  protected:
-  // Protection domain.
-  oop             _protection_domain;
-  // Class signers.
-  objArrayOop     _signers;
-  // Lock for (1) initialization; (2) access to the ConstantPool of this class.
-  // Must be one per class and it has to be a VM internal object so java code
-  // cannot lock it (like the mirror).
-  // It has to be an object not a Mutex because it's held through java calls.
-  volatile oop    _init_lock;
-
   // Annotations for this class
   Annotations*    _annotations;
   // Array classes holding elements of this class.
@@ -527,8 +515,10 @@
   void set_constants(ConstantPool* c)    { _constants = c; }
 
   // protection domain
-  oop protection_domain()                  { return _protection_domain; }
-  void set_protection_domain(oop pd)       { klass_oop_store(&_protection_domain, pd); }
+  oop protection_domain() const;
+
+  // signers
+  objArrayOop signers() const;
 
   // host class
   Klass* host_klass() const              {
@@ -575,10 +565,6 @@
     }
   }
 
-  // signers
-  objArrayOop signers() const              { return _signers; }
-  void set_signers(objArrayOop s)          { klass_oop_store((oop*)&_signers, s); }
-
   // source file name
   Symbol* source_file_name() const         { return _source_file_name; }
   void set_source_file_name(Symbol* n);
@@ -912,8 +898,6 @@
   Method* method_at_itable(Klass* holder, int index, TRAPS);
 
   // Garbage collection
-  virtual void oops_do(OopClosure* cl);
-
   void oop_follow_contents(oop obj);
   int  oop_adjust_pointers(oop obj);
 
@@ -999,14 +983,12 @@
 
   // Lock during initialization
 public:
-  volatile oop init_lock() const     {return _init_lock; }
+  // Lock for (1) initialization; (2) access to the ConstantPool of this class.
+  // Must be one per class and it has to be a VM internal object so java code
+  // cannot lock it (like the mirror).
+  // It has to be an object not a Mutex because it's held through java calls.
+  volatile oop init_lock() const;
 private:
-  void set_init_lock(oop value) { klass_oop_store(&_init_lock, value); }
-
-  // Offsets for memory management
-  oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;}
-  oop* adr_signers() const           { return (oop*)&this->_signers;}
-  oop* adr_init_lock() const         { return (oop*)&this->_init_lock;}
 
   // Static methods that are used to implement member methods where an exposed this pointer
   // is needed due to possible GCs
@@ -1040,7 +1022,8 @@
   // JSR-292 support
   MemberNameTable* member_names() { return _member_names; }
   void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
-  void add_member_name(Handle member_name);
+  void add_member_name(int index, Handle member_name);
+  oop  get_member_name(int index);
 
 public:
   // JVMTI support
--- a/hotspot/src/share/vm/oops/klass.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -140,7 +140,7 @@
 
 void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
   return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
-                             Metaspace::ClassType, CHECK_NULL);
+                             MetaspaceObj::ClassType, CHECK_NULL);
 }
 
 Klass::Klass() {
@@ -511,8 +511,9 @@
   // (same order as class file parsing)
   loader_data->add_class(this);
 
-  // Recreate the class mirror
-  java_lang_Class::create_mirror(this, CHECK);
+  // Recreate the class mirror.  The protection_domain is always null for
+  // boot loader, for now.
+  java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
 }
 
 Klass* Klass::array_klass_or_null(int rank) {
--- a/hotspot/src/share/vm/oops/klass.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -445,7 +445,7 @@
   Klass* array_klass_or_null(int rank);
   Klass* array_klass_or_null();
 
-  virtual oop protection_domain()       { return NULL; }
+  virtual oop protection_domain() const = 0;
 
   oop class_loader() const;
 
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -74,7 +74,7 @@
 
   int size = Method::size(access_flags.is_native());
 
-  return new (loader_data, size, false, THREAD) Method(cm, access_flags, size);
+  return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags, size);
 }
 
 Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -671,13 +671,15 @@
                                                    Symbol* signature, //anything at all
                                                    TRAPS);
   static Klass* check_non_bcp_klass(Klass* klass);
-  // these operate only on invoke methods:
+
+  // How many extra stack entries for invokedynamic when it's enabled
+  static const int extra_stack_entries_for_jsr292 = 1;
+
+  // this operates only on invoke methods:
   // presize interpreter frames for extra interpreter stack entries, if needed
-  // method handles want to be able to push a few extra values (e.g., a bound receiver), and
-  // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist,
-  // all without checking for a stack overflow
-  static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; }
-  static int extra_stack_words();  // = extra_stack_entries() * Interpreter::stackElementSize()
+  // Account for the extra appendix argument for invokehandle/invokedynamic
+  static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
+  static int extra_stack_words();  // = extra_stack_entries() * Interpreter::stackElementSize
 
   // RedefineClasses() support:
   bool is_old() const                               { return access_flags().is_old(); }
--- a/hotspot/src/share/vm/oops/methodCounters.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/methodCounters.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -26,7 +26,7 @@
 #include "runtime/thread.inline.hpp"
 
 MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
-  return new(loader_data, size(), false, THREAD) MethodCounters();
+  return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters();
 }
 
 void MethodCounters::clear_counters() {
--- a/hotspot/src/share/vm/oops/methodData.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -388,7 +388,8 @@
 MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS) {
   int size = MethodData::compute_allocation_size_in_words(method);
 
-  return new (loader_data, size, false, THREAD) MethodData(method(), size, CHECK_NULL);
+  return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
+    MethodData(method(), size, CHECK_NULL);
 }
 
 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -75,7 +75,7 @@
   void  copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);
 
   // Compute protection domain
-  oop protection_domain() { return bottom_klass()->protection_domain(); }
+  oop protection_domain() const { return bottom_klass()->protection_domain(); }
 
  private:
   // Either oop or narrowOop depending on UseCompressedOops.
--- a/hotspot/src/share/vm/oops/symbol.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/symbol.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -55,7 +55,7 @@
   address res;
   int alloc_size = size(len)*HeapWordSize;
   res = (address) Metaspace::allocate(loader_data, size(len), true,
-                                      Metaspace::NonClassType, CHECK_NULL);
+                                      MetaspaceObj::SymbolType, CHECK_NULL);
   return res;
 }
 
--- a/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -67,6 +67,8 @@
   typeArrayOop allocate(int length, TRAPS) { return allocate_common(length, true, THREAD); }
   oop multi_allocate(int rank, jint* sizes, TRAPS);
 
+  oop protection_domain() const { return NULL; }
+
   // Copying
   void  copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);
 
--- a/hotspot/src/share/vm/opto/escape.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/opto/escape.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -2202,7 +2202,7 @@
     int opcode = uncast_base->Opcode();
     assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
            opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
-           (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
+           (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
            (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
   }
   return base;
--- a/hotspot/src/share/vm/opto/matcher.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1282,16 +1282,6 @@
     mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
   }
 
-  if (is_method_handle_invoke) {
-    // Kill some extra stack space in case method handles want to do
-    // a little in-place argument insertion.
-    // FIXME: Is this still necessary?
-    int regs_per_word  = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const!
-    out_arg_limit_per_call += Method::extra_stack_entries() * regs_per_word;
-    // Do not update mcall->_argsize because (a) the extra space is not
-    // pushed as arguments and (b) _argsize is dead (not used anywhere).
-  }
-
   // Compute the max stack slot killed by any call.  These will not be
   // available for debug info, and will be used to adjust FIRST_STACK_mask
   // after all call sites have been visited.
--- a/hotspot/src/share/vm/opto/reg_split.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/opto/reg_split.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -51,6 +51,15 @@
 
 static const char out_of_nodes[] = "out of nodes during split";
 
+static bool contains_no_live_range_input(const Node* def) {
+  for (uint i = 1; i < def->req(); ++i) {
+    if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) {
+      return false;
+    }
+  }
+  return true;
+}
+
 //------------------------------get_spillcopy_wide-----------------------------
 // Get a SpillCopy node with wide-enough masks.  Use the 'wide-mask', the
 // wide ideal-register spill-mask if possible.  If the 'wide-mask' does
@@ -1312,7 +1321,7 @@
       Node *def = Reaches[pidx][slidx];
       assert( def, "must have reaching def" );
       // If input up/down sense and reg-pressure DISagree
-      if( def->rematerialize() ) {
+      if (def->rematerialize() && contains_no_live_range_input(def)) {
         // Place the rematerialized node above any MSCs created during
         // phi node splitting.  end_idx points at the insertion point
         // so look at the node before it.
--- a/hotspot/src/share/vm/prims/forte.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/prims/forte.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -35,6 +35,19 @@
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 
+// call frame copied from old .h file and renamed
+typedef struct {
+    jint lineno;                      // line number in the source file
+    jmethodID method_id;              // method executed in this frame
+} ASGCT_CallFrame;
+
+// call trace copied from old .h file and renamed
+typedef struct {
+    JNIEnv *env_id;                   // Env where trace was recorded
+    jint num_frames;                  // number of frames in this trace
+    ASGCT_CallFrame *frames;          // frames
+} ASGCT_CallTrace;
+
 // These name match the names reported by the forte quality kit
 enum {
   ticks_no_Java_frame         =  0,
@@ -50,6 +63,8 @@
   ticks_safepoint             = -10
 };
 
+#if INCLUDE_JVMTI
+
 //-------------------------------------------------------
 
 // Native interfaces for use by Forte tools.
@@ -360,20 +375,6 @@
 
 }
 
-
-// call frame copied from old .h file and renamed
-typedef struct {
-    jint lineno;                      // line number in the source file
-    jmethodID method_id;              // method executed in this frame
-} ASGCT_CallFrame;
-
-// call trace copied from old .h file and renamed
-typedef struct {
-    JNIEnv *env_id;                   // Env where trace was recorded
-    jint num_frames;                  // number of frames in this trace
-    ASGCT_CallFrame *frames;          // frames
-} ASGCT_CallTrace;
-
 static void forte_fill_call_trace_given_top(JavaThread* thd,
                                             ASGCT_CallTrace* trace,
                                             int depth,
@@ -634,3 +635,12 @@
     pointer_delta(end, start, sizeof(jbyte)), 0, NULL);
 #endif // !_WINDOWS && !IA64
 }
+
+#else // INCLUDE_JVMTI
+extern "C" {
+  JNIEXPORT
+  void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) {
+    trace->num_frames = ticks_no_class_load; // -1
+  }
+}
+#endif // INCLUDE_JVMTI
--- a/hotspot/src/share/vm/prims/jvm.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1072,11 +1072,7 @@
     return NULL;
   }
 
-  Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
-  objArrayOop signers = NULL;
-  if (k->oop_is_instance()) {
-    signers = InstanceKlass::cast(k)->signers();
-  }
+  objArrayOop signers = java_lang_Class::signers(JNIHandles::resolve_non_null(cls));
 
   // If there are no signers set in the class, or if the class
   // is an array, return NULL.
@@ -1102,7 +1098,7 @@
     // be called with an array.  Only the bootstrap loader creates arrays.
     Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
     if (k->oop_is_instance()) {
-      InstanceKlass::cast(k)->set_signers(objArrayOop(JNIHandles::resolve(signers)));
+      java_lang_Class::set_signers(k->java_mirror(), objArrayOop(JNIHandles::resolve(signers)));
     }
   }
 JVM_END
@@ -1119,8 +1115,8 @@
     return NULL;
   }
 
-  Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
-  return (jobject) JNIHandles::make_local(env, k->protection_domain());
+  oop pd = java_lang_Class::protection_domain(JNIHandles::resolve(cls));
+  return (jobject) JNIHandles::make_local(env, pd);
 JVM_END
 
 
@@ -1139,7 +1135,7 @@
     if (k->oop_is_instance()) {
       oop pd = JNIHandles::resolve(protection_domain);
       assert(pd == NULL || pd->is_oop(), "just checking");
-      InstanceKlass::cast(k)->set_protection_domain(pd);
+      java_lang_Class::set_protection_domain(k->java_mirror(), pd);
     }
   }
 JVM_END
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1349,12 +1349,11 @@
         CHECK_0);
     }
 
-    finalize_operands_merge(*merge_cp_p, THREAD);
-
     RC_TRACE_WITH_THREAD(0x00020000, THREAD,
       ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d",
       *merge_cp_length_p, scratch_i, _index_map_count));
   }
+  finalize_operands_merge(*merge_cp_p, THREAD);
 
   return true;
 } // end merge_constant_pools()
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -232,7 +232,8 @@
   // This is done eagerly, since it is readily available without
   // constructing any new objects.
   // TO DO: maybe intern mname_oop
-  m->method_holder()->add_member_name(mname);
+  m->method_holder()->add_member_name(m->method_idnum(), mname);
+
   return mname();
 }
 
@@ -301,7 +302,6 @@
   // Although the fieldDescriptor::_index would also identify the field,
   // we do not use it, because it is harder to decode.
   // TO DO: maybe intern mname_oop
-  InstanceKlass::cast(field_holder())->add_member_name(mname);
   return mname();
 }
 
@@ -943,7 +943,8 @@
 // MemberNameTable
 //
 
-MemberNameTable::MemberNameTable() : GrowableArray<jweak>(10, true) {
+MemberNameTable::MemberNameTable(int methods_cnt)
+                  : GrowableArray<jweak>(methods_cnt, true) {
   assert_locked_or_safepoint(MemberNameTable_lock);
 }
 
@@ -957,29 +958,18 @@
   }
 }
 
-// Return entry index if found, return -1 otherwise.
-int MemberNameTable::find_member_name(oop mem_name) {
+void MemberNameTable::add_member_name(int index, jweak mem_name_wref) {
   assert_locked_or_safepoint(MemberNameTable_lock);
-  int len = this->length();
-
-  for (int idx = 0; idx < len; idx++) {
-    jweak ref = this->at(idx);
-    oop entry = JNIHandles::resolve(ref);
-    if (entry == mem_name) {
-      return idx;
-    }
-  }
-  return -1;
+  this->at_put_grow(index, mem_name_wref);
 }
 
-void MemberNameTable::add_member_name(jweak mem_name_wref) {
+// Return a member name oop or NULL.
+oop MemberNameTable::get_member_name(int index) {
   assert_locked_or_safepoint(MemberNameTable_lock);
-  oop mem_name = JNIHandles::resolve(mem_name_wref);
 
-  // Each member name may appear just once: add only if not found
-  if (find_member_name(mem_name) == -1) {
-    this->append(mem_name_wref);
-  }
+  jweak ref = this->at(index);
+  oop mem_name = JNIHandles::resolve(ref);
+  return mem_name;
 }
 
 #if INCLUDE_JVMTI
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,7 +219,6 @@
   }
 };
 
-
 //------------------------------------------------------------------------------
 // MethodHandlesAdapterGenerator
 //
@@ -233,13 +232,13 @@
 //------------------------------------------------------------------------------
 // MemberNameTable
 //
+
 class MemberNameTable : public GrowableArray<jweak> {
  public:
-  MemberNameTable();
+  MemberNameTable(int methods_cnt);
   ~MemberNameTable();
-  void add_member_name(jweak mem_name_ref);
- private:
-  int find_member_name(oop mem_name);
+  void add_member_name(int index, jweak mem_name_ref);
+  oop  get_member_name(int index);
 
 #if INCLUDE_JVMTI
  public:
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -115,8 +115,6 @@
 
 inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
   jlong byte_offset = field_offset_to_byte_offset(field_offset);
-  // Don't allow unsafe to be used to read or write the header word of oops
-  assert(p == NULL || field_offset >= oopDesc::header_size(), "offset must be outside of header");
 #ifdef ASSERT
   if (p != NULL) {
     assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -37,6 +37,7 @@
 #include "runtime/os.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
+#include "utilities/exceptions.hpp"
 
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/concurrentMark.hpp"
@@ -330,8 +331,18 @@
 WB_END
 
 
-WB_ENTRY(jlong, WB_ReserveMemory(JNIEnv* env, jobject o, jlong size))
-  return (jlong)os::reserve_memory(size, NULL, 0);
+WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
+  // static+volatile in order to force the read to happen
+  // (not be eliminated by the compiler)
+  static char c;
+  static volatile char* p;
+
+  p = os::reserve_memory(os::vm_allocation_granularity(), NULL, 0);
+  if (p == NULL) {
+    THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Failed to reserve memory");
+  }
+
+  c = *p;
 WB_END
 
 //Some convenience methods to deal with objects from java
@@ -437,7 +448,7 @@
   {CC"isInStringTable",   CC"(Ljava/lang/String;)Z",  (void*)&WB_IsInStringTable  },
   {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
 
-  {CC"reserveMemory", CC"(J)J", (void*)&WB_ReserveMemory },
+  {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
 };
 
 #undef CC
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -2217,6 +2217,13 @@
     status = false;
   }
 
+  if (ReservedCodeCacheSize < InitialCodeCacheSize) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid ReservedCodeCacheSize: %dK. Should be greater than InitialCodeCacheSize=%dK\n",
+                ReservedCodeCacheSize/K, InitialCodeCacheSize/K);
+    status = false;
+  }
+
   return status;
 }
 
@@ -2619,13 +2626,10 @@
     } else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
                match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) {
       julong long_ReservedCodeCacheSize = 0;
-      ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize,
-                                            (size_t)InitialCodeCacheSize);
+      ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1);
       if (errcode != arg_in_range) {
         jio_fprintf(defaultStream::error_stream(),
-                    "Invalid maximum code cache size: %s. Should be greater than InitialCodeCacheSize=%dK\n",
-                    option->optionString, InitialCodeCacheSize/K);
-        describe_range_error(errcode);
+                    "Invalid maximum code cache size: %s.\n", option->optionString);
         return JNI_EINVAL;
       }
       FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize);
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -635,18 +635,22 @@
       // at an uncommon trap for an invoke (where the compiler
       // generates debug info before the invoke has executed)
       Bytecodes::Code cur_code = str.next();
-      if (cur_code == Bytecodes::_invokevirtual ||
-          cur_code == Bytecodes::_invokespecial ||
-          cur_code == Bytecodes::_invokestatic  ||
-          cur_code == Bytecodes::_invokeinterface) {
+      if (cur_code == Bytecodes::_invokevirtual   ||
+          cur_code == Bytecodes::_invokespecial   ||
+          cur_code == Bytecodes::_invokestatic    ||
+          cur_code == Bytecodes::_invokeinterface ||
+          cur_code == Bytecodes::_invokedynamic) {
         Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
         Symbol* signature = invoke.signature();
         ArgumentSizeComputer asc(signature);
         cur_invoke_parameter_size = asc.size();
-        if (cur_code != Bytecodes::_invokestatic) {
+        if (invoke.has_receiver()) {
           // Add in receiver
           ++cur_invoke_parameter_size;
         }
+        if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
+          callee_size_of_parameters++;
+        }
       }
       if (str.bci() < max_bci) {
         Bytecodes::Code bc = str.next();
@@ -661,6 +665,7 @@
             case Bytecodes::_invokespecial:
             case Bytecodes::_invokestatic:
             case Bytecodes::_invokeinterface:
+            case Bytecodes::_invokedynamic:
             case Bytecodes::_athrow:
               break;
             default: {
--- a/hotspot/src/share/vm/runtime/frame.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1008,6 +1008,7 @@
   OopClosure*     _f;
   int             _offset;        // the current offset, incremented with each argument
   bool            _has_receiver;  // true if the callee has a receiver
+  bool            _has_appendix;  // true if the call has an appendix
   frame           _fr;
   RegisterMap*    _reg_map;
   int             _arg_size;
@@ -1027,19 +1028,20 @@
   }
 
  public:
-  CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, OopClosure* f, frame fr,  const RegisterMap* reg_map)
+  CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr,  const RegisterMap* reg_map)
     : SignatureInfo(signature) {
 
     // initialize CompiledArgumentOopFinder
     _f         = f;
     _offset    = 0;
     _has_receiver = has_receiver;
+    _has_appendix = has_appendix;
     _fr        = fr;
     _reg_map   = (RegisterMap*)reg_map;
-    _arg_size  = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
+    _arg_size  = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
 
     int arg_size;
-    _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, &arg_size);
+    _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
     assert(arg_size == _arg_size, "wrong arg size");
   }
 
@@ -1049,12 +1051,16 @@
       _offset++;
     }
     iterate_parameters();
+    if (_has_appendix) {
+      handle_oop_offset();
+      _offset++;
+    }
   }
 };
 
-void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f) {
+void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) {
   ResourceMark rm;
-  CompiledArgumentOopFinder finder(signature, has_receiver, f, *this, reg_map);
+  CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
   finder.oops_do();
 }
 
--- a/hotspot/src/share/vm/runtime/frame.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/frame.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -411,7 +411,7 @@
   oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
 
   // Oops-do's
-  void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f);
+  void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f);
   void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
 
  private:
--- a/hotspot/src/share/vm/runtime/reflection.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/reflection.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,6 @@
 #include "runtime/signature.hpp"
 #include "runtime/vframe.hpp"
 
-#define JAVA_1_5_VERSION                  49
-
 static void trace_class_resolution(Klass* to_class) {
   ResourceMark rm;
   int line_number = -1;
@@ -375,7 +373,7 @@
     }
   }
   klass = klass->array_klass(dim, CHECK_NULL);
-  oop obj = ArrayKlass::cast(klass)->multi_allocate(len, dimensions, THREAD);
+  oop obj = ArrayKlass::cast(klass)->multi_allocate(len, dimensions, CHECK_NULL);
   assert(obj->is_array(), "just checking");
   return arrayOop(obj);
 }
@@ -507,9 +505,11 @@
       under_host_klass(accessee_ik, accessor))
     return true;
 
-  if (RelaxAccessControlCheck ||
-      (accessor_ik->major_version() < JAVA_1_5_VERSION &&
-       accessee_ik->major_version() < JAVA_1_5_VERSION)) {
+  if ((RelaxAccessControlCheck &&
+        accessor_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION &&
+        accessee_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION) ||
+      (accessor_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION &&
+       accessee_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION)) {
     return classloader_only &&
       Verifier::relax_verify_for(accessor_ik->class_loader()) &&
       accessor_ik->protection_domain() == accessee_ik->protection_domain() &&
@@ -817,6 +817,10 @@
     typeArrayOop an_oop = Annotations::make_java_array(method->parameter_annotations(), CHECK_NULL);
     java_lang_reflect_Constructor::set_parameter_annotations(ch(), an_oop);
   }
+  if (java_lang_reflect_Constructor::has_type_annotations_field()) {
+    typeArrayOop an_oop = Annotations::make_java_array(method->type_annotations(), CHECK_NULL);
+    java_lang_reflect_Constructor::set_type_annotations(ch(), an_oop);
+  }
   return ch();
 }
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -2726,7 +2726,7 @@
   return regs.first();
 }
 
-VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, int* arg_size) {
+VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
   // This method is returning a data structure allocating as a
   // ResourceObject, so do not put any ResourceMarks in here.
   char *s = sig->as_C_string();
@@ -2770,6 +2770,11 @@
     default : ShouldNotReachHere();
     }
   }
+
+  if (has_appendix) {
+    sig_bt[cnt++] = T_OBJECT;
+  }
+
   assert( cnt < 256, "grow table size" );
 
   int comp_args_on_stack;
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -410,7 +410,7 @@
 
   // Convert a sig into a calling convention register layout
   // and find interesting things about it.
-  static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, int *arg_size);
+  static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size);
   static VMReg     name_for_receiver();
 
   // "Top of Stack" slots that may be unused by the calling convention but must
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -638,9 +638,6 @@
   jint _hashStateZ ;
   void * _schedctl ;
 
-  intptr_t _ScratchA, _ScratchB ;              // Scratch locations for fast-path sync code
-  static ByteSize ScratchA_offset()            { return byte_offset_of(Thread, _ScratchA ); }
-  static ByteSize ScratchB_offset()            { return byte_offset_of(Thread, _ScratchB ); }
 
   volatile jint rng [4] ;                      // RNG for spin loop
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -292,10 +292,8 @@
   nonstatic_field(InstanceKlass,               _transitive_interfaces,                        Array<Klass*>*)                        \
   nonstatic_field(InstanceKlass,               _fields,                                       Array<u2>*)                            \
   nonstatic_field(InstanceKlass,               _java_fields_count,                            u2)                                    \
-  nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                  \
+  nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                         \
   nonstatic_field(InstanceKlass,               _class_loader_data,                            ClassLoaderData*)                      \
-  nonstatic_field(InstanceKlass,               _protection_domain,                            oop)                                   \
-  nonstatic_field(InstanceKlass,               _signers,                                      objArrayOop)                           \
   nonstatic_field(InstanceKlass,               _source_file_name,                             Symbol*)                               \
   nonstatic_field(InstanceKlass,               _source_debug_extension,                       char*)                                 \
   nonstatic_field(InstanceKlass,               _inner_classes,                               Array<jushort>*)                       \
--- a/hotspot/src/share/vm/services/memTracker.cpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/services/memTracker.cpp	Fri Jun 07 09:25:18 2013 -0700
@@ -34,6 +34,7 @@
 #include "services/memReporter.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/decoder.hpp"
+#include "utilities/defaultStream.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 bool NMT_track_callsite = false;
@@ -77,7 +78,15 @@
   if (strcmp(option_line, "=summary") == 0) {
     _tracking_level = NMT_summary;
   } else if (strcmp(option_line, "=detail") == 0) {
-    _tracking_level = NMT_detail;
+    // detail relies on a stack-walking ability that may not
+    // be available depending on platform and/or compiler flags
+    if (PLATFORM_NMT_DETAIL_SUPPORTED) {
+      _tracking_level = NMT_detail;
+    } else {
+      jio_fprintf(defaultStream::error_stream(),
+        "NMT detail is not supported on this platform.  Using NMT summary instead.");
+      _tracking_level = NMT_summary;
+    }
   } else if (strcmp(option_line, "=off") != 0) {
     vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
   }
--- a/hotspot/src/share/vm/utilities/array.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/utilities/array.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -320,7 +320,7 @@
   void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) {
     size_t word_size = Array::size(length);
     return (void*) Metaspace::allocate(loader_data, word_size, read_only,
-                        Metaspace::NonClassType, CHECK_NULL);
+                                       MetaspaceObj::array_type(sizeof(T)), CHECK_NULL);
   }
 
   static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Jun 07 09:25:18 2013 -0700
@@ -380,6 +380,14 @@
 # include "globalDefinitions_ppc.hpp"
 #endif
 
+/*
+ * If a platform does not support NMT_detail
+ * the platform specific globalDefinitions (above)
+ * can set PLATFORM_NMT_DETAIL_SUPPORTED to false
+ */
+#ifndef PLATFORM_NMT_DETAIL_SUPPORTED
+#define PLATFORM_NMT_DETAIL_SUPPORTED true
+#endif
 
 // The byte alignment to be used by Arena::Amalloc.  See bugid 4169348.
 // Note: this value must be a power of 2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/8011771/Test8011771.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8011771
+ * @summary Array bound check elimination's in block motion doesn't always reset its data structures from one step to the other.
+ * @run main/othervm -XX:-BackgroundCompilation Test8011771
+ *
+ */
+
+public class Test8011771 {
+
+    static void m(int[] a, int[] b, int j) {
+        // Array bound check elimination inserts a predicate before
+        // the loop. We'll have the predicate fail, so the method is
+        // recompiled without optimistic optimizations
+        for (int i = 0; i < 10; i++) {
+            a[i] = i;
+        }
+
+        // The test itself
+        a[j] = 0;
+        a[j+5] = 0;
+        b[j+4] = 0; // this range check shouldn't be eliminated
+    }
+
+    static public void main(String[] args) {
+        int[] arr1 = new int[10], arr2 = new int[10];
+        // force compilation:
+        for (int i = 0; i < 5000; i++) {
+            m(arr1, arr2, 0);
+        }
+
+        try {
+            m(new int[1], null, 0); // force predicate failure
+        } catch(ArrayIndexOutOfBoundsException e) {}
+
+        // force compilation again (no optimistic opts):
+        for (int i = 0; i < 5000; i++) {
+            m(arr1, arr2, 0);
+        }
+
+        // Check that the range check  on the second array wasn't optimized out
+        boolean success = false;
+        try {
+            m(arr1, new int[1], 0);
+        } catch(ArrayIndexOutOfBoundsException e) {
+            success = true;
+        }
+        if (success) {
+            System.out.println("TEST PASSED");
+        } else {
+            throw new RuntimeException("TEST FAILED: erroneous bound check elimination");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/8013496/Test8013496.sh	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,55 @@
+#!/bin/sh
+# 
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+#
+# @test
+# @bug 8013496
+# @summary Test checks that the order in which ReversedCodeCacheSize and 
+#          InitialCodeCacheSize are passed to the VM is irrelevant.  
+# @run shell Test8013496.sh
+#
+#
+## some tests require path to find test source dir
+if [ "${TESTSRC}" = "" ]
+then
+  TESTSRC=${PWD}
+  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
+set -x
+
+${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1
+${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1
+
+diff 1.out 2.out
+
+result=$?
+if [ $result -eq 0 ] ; then  
+  echo "Test Passed"
+  exit 0
+else
+  echo "Test Failed"
+  exit 1
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/8015436/Test8015436.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8015436
+ * @summary the IK _initial_method_idnum value must be adjusted if overpass methods are added
+ * @run main Test8015436
+ *
+ */
+
+/*
+ * The test checks that a MemberName for the defaultMethod() is cached in
+ * the class MemberNameTable without a crash in the VM fastdebug mode.
+ * The original issue was that the InstanceKlass _initial_method_idnum was
+ * not adjusted properly when the overpass methods are added to the class.
+ * The expected/correct behavior: The test does not crash nor throw any exceptions.
+ * All the invocations of the defaultMethod() must be completed successfully.
+ */
+
+import java.lang.invoke.*;
+
+interface InterfaceWithDefaultMethod {
+    public void someMethod();
+
+    default public void defaultMethod(String str){
+        System.out.println("defaultMethod() " + str);
+    }
+}
+
+public class Test8015436 implements InterfaceWithDefaultMethod {
+    @Override
+    public void someMethod() {
+        System.out.println("someMethod() invoked");
+    }
+
+    public static void main(String[] args) throws Throwable {
+        Test8015436 testObj = new Test8015436();
+        testObj.someMethod();
+        testObj.defaultMethod("invoked directly");
+
+        MethodHandles.Lookup lookup = MethodHandles.lookup();
+        MethodType   mt = MethodType.methodType(void.class, String.class);
+        MethodHandle mh = lookup.findVirtual(Test8015436.class, "defaultMethod", mt);
+        mh.invokeExact(testObj, "invoked via a MethodHandle");
+    }
+}
+
+/*
+ * A successful execution gives the output:
+ *   someMethod() invoked
+ *   defaultMethod() invoked directly
+ *   defaultMethod() invoked via a MethodHandle
+ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestSummarizeRSetStats.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestSummarizeRSetStats.java
+ * @bug 8013895
+ * @library /testlibrary
+ * @build TestSummarizeRSetStats
+ * @summary Verify output of -XX:+G1SummarizeRSetStats
+ * @run main TestSummarizeRSetStats
+ *
+ * Test the output of G1SummarizeRSetStats in conjunction with G1SummarizeRSetStatsPeriod.
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.lang.Thread;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+class RunSystemGCs {
+    // 4M size, both are directly allocated into the old gen
+    static Object[] largeObject1 = new Object[1024 * 1024];
+    static Object[] largeObject2 = new Object[1024 * 1024];
+
+    static int[] temp;
+
+    public static void main(String[] args) {
+        // create some cross-references between these objects
+        for (int i = 0; i < largeObject1.length; i++) {
+            largeObject1[i] = largeObject2;
+        }
+
+        for (int i = 0; i < largeObject2.length; i++) {
+            largeObject2[i] = largeObject1;
+        }
+
+        int numGCs = Integer.parseInt(args[0]);
+
+        if (numGCs > 0) {
+            // try to force a minor collection: the young gen is 4M, the
+            // amount of data allocated below is roughly that (4*1024*1024 +
+            // some header data)
+            for (int i = 0; i < 1024 ; i++) {
+                temp = new int[1024];
+            }
+        }
+
+        for (int i = 0; i < numGCs - 1; i++) {
+            System.gc();
+        }
+    }
+}
+
+public class TestSummarizeRSetStats {
+
+    public static String runTest(String[] additionalArgs, int numGCs) throws Exception {
+        ArrayList<String> finalargs = new ArrayList<String>();
+        String[] defaultArgs = new String[] {
+            "-XX:+UseG1GC",
+            "-Xmn4m",
+            "-Xmx20m",
+            "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
+            "-XX:+PrintGC",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:G1HeapRegionSize=1M",
+        };
+
+        finalargs.addAll(Arrays.asList(defaultArgs));
+
+        if (additionalArgs != null) {
+            finalargs.addAll(Arrays.asList(additionalArgs));
+        }
+
+        finalargs.add(RunSystemGCs.class.getName());
+        finalargs.add(String.valueOf(numGCs));
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            finalargs.toArray(new String[0]));
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        output.shouldHaveExitValue(0);
+
+        String result = output.getStdout();
+        return result;
+    }
+
+    private static void expectStatistics(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
+        int actualTotal = result.split("Concurrent RS processed").length - 1;
+        int actualCumulative = result.split("Cumulative RS summary").length - 1;
+
+        if (expectedCumulative != actualCumulative) {
+            throw new Exception("Incorrect amount of RSet summaries at the end. Expected " + expectedCumulative + ", got " + actualCumulative);
+        }
+
+        if (expectedPeriodic != (actualTotal - actualCumulative)) {
+            throw new Exception("Incorrect amount of per-period RSet summaries at the end. Expected " + expectedPeriodic + ", got " + (actualTotal - actualCumulative));
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        String result;
+
+        // no RSet statistics output
+        result = runTest(null, 0);
+        expectStatistics(result, 0, 0);
+
+        // no RSet statistics output
+        result = runTest(null, 2);
+        expectStatistics(result, 0, 0);
+
+        // no RSet statistics output
+        result = runTest(new String[] { "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
+        expectStatistics(result, 0, 0);
+
+        // single RSet statistics output at the end
+        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0);
+        expectStatistics(result, 1, 0);
+
+        // single RSet statistics output at the end
+        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 2);
+        expectStatistics(result, 1, 0);
+
+        // single RSet statistics output
+        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 0);
+        expectStatistics(result, 1, 0);
+
+        // two times RSet statistics output
+        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
+        expectStatistics(result, 1, 1);
+
+        // four times RSet statistics output
+        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
+        expectStatistics(result, 1, 3);
+
+        // three times RSet statistics output
+        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=2" }, 3);
+        expectStatistics(result, 1, 2);
+
+        // single RSet statistics output
+        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=100" }, 3);
+        expectStatistics(result, 1, 1);
+    }
+}
+
--- a/hotspot/test/runtime/8007320/ConstMethodTest.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/test/runtime/8007320/ConstMethodTest.java	Fri Jun 07 09:25:18 2013 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8007320
+ * @bug 8007320 8014709
  * @summary Test all optional fields in ConstMethod
  * @compile -g -parameters ConstMethodTest.java
  * @run main ConstMethodTest
@@ -74,6 +74,11 @@
 
 @MyAnnotation(name="someName", value = "Hello World")
 public class ConstMethodTest {
+    public @TypeAnno("constructor") ConstMethodTest() { }
+
+    public ConstMethodTest(int i) {
+        // needs a second unannotated constructor
+    }
 
     private static void check(boolean b) {
         if (!b)
@@ -139,10 +144,26 @@
         }
     }
 
+    private static void testConstructor() throws Exception {
+        for (Constructor c : ConstMethodTest.class.getDeclaredConstructors()) {
+            Annotation[] aa = c.getAnnotatedReturnType().getAnnotations();
+            if (c.getParameterTypes().length == 1) { // should be un-annotated
+                check(aa.length == 0);
+            } else if (c.getParameterTypes().length == 0) { //should be annotated
+                check(aa.length == 1);
+                check(((TypeAnno)aa[0]).value().equals("constructor"));
+            } else {
+                //should not happen
+                check(false);
+            }
+        }
+    }
+
     public static void main(java.lang.String[] unused) throws Throwable {
         // pass 5 so kitchenSinkFunc is instantiated with an int
         kitchenSinkFunc("parameter", "param2", 5);
         test1();
+        testConstructor();
     }
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/Metaspace/FragmentMetaspace.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @library /runtime/testlibrary
+ * @build GeneratedClassLoader
+ * @run main/othervm/timeout=200 FragmentMetaspace
+ */
+
+import java.io.IOException;
+
+/**
+ * Test that tries to fragment the native memory used by class loaders.
+ * This test creates class loaders that load classes of increasing size for every
+ * iteration. By increasing the size of the class meta data needed for every iteration
+ * we stress the subsystem for allocating native memory for meta data.
+ */
+public class FragmentMetaspace {
+
+    public static void main(String... args) {
+        runGrowing(Long.valueOf(System.getProperty("time", "80000")));
+        // try to clean up and unload classes to decrease
+        // class verification time in debug vm
+        System.gc();
+    }
+
+    private static void runGrowing(long time) {
+        long startTime = System.currentTimeMillis();
+        for (int i = 0; System.currentTimeMillis() < startTime + time; ++i) {
+            try {
+                GeneratedClassLoader gcl = new GeneratedClassLoader();
+
+                Class<?> c = gcl.getGeneratedClasses(i, 100)[0];
+                c.newInstance();
+                c = null;
+
+                gcl = null;
+            } catch (IOException|InstantiationException|IllegalAccessException ex) {
+                throw new RuntimeException(ex);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/Metaspace/FragmentMetaspaceSimple.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @library /runtime/testlibrary
+ * @library classes
+ * @build test.Empty ClassUnloadCommon
+ * @run main/othervm/timeout=200 FragmentMetaspaceSimple
+ */
+
+import java.util.ArrayList;
+
+/**
+ * Test that tries to fragment the native memory used by class loaders.
+ * Keeps every other class loader alive in order to fragment the memory space
+ * used to store classes and meta data. Since the memory is probably allocated in
+ * chunks per class loader this will cause a lot of fragmentation if not handled
+ * properly since every other chunk will be unused.
+ */
+public class FragmentMetaspaceSimple {
+    public static void main(String... args) {
+        runSimple(Long.valueOf(System.getProperty("time", "80000")));
+        System.gc();
+    }
+
+    private static void runSimple(long time) {
+        long startTime = System.currentTimeMillis();
+        ArrayList<ClassLoader> cls = new ArrayList<>();
+        for (int i = 0; System.currentTimeMillis() < startTime + time; ++i) {
+            ClassLoader ldr = ClassUnloadCommon.newClassLoader();
+            if (i % 1000 == 0) {
+                cls.clear();
+            }
+            // only keep every other class loader alive
+            if (i % 2 == 1) {
+                cls.add(ldr);
+            }
+            Class<?> c = null;
+            try {
+                c = ldr.loadClass("test.Empty");
+            } catch (ClassNotFoundException ex) {
+                throw new RuntimeException(ex);
+            }
+            c = null;
+        }
+        cls = null;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/Metaspace/classes/test/Empty.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package test;
+
+public class Empty {
+public String toString() { return "nothing"; }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/contended/HasNonStatic.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.lang.Class;
+import java.lang.String;
+import java.lang.System;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CyclicBarrier;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import sun.misc.Unsafe;
+import sun.misc.Contended;
+
+/*
+ * @test
+ * @bug     8015270
+ * @summary \@Contended: fix multiple issues in the layout code
+ *
+ * @run main/othervm -XX:-RestrictContended HasNonStatic
+ */
+public class HasNonStatic {
+
+    public static void main(String[] args) throws Exception {
+        R1 r1 = new R1();
+        R2 r2 = new R2();
+        R3 r3 = new R3();
+        R4 r4 = new R4();
+    }
+
+    public static class R1 {
+        @Contended
+        Object o;
+    }
+
+    @Contended
+    public static class R2 {
+        Object o;
+    }
+
+    @Contended
+    public static class R3 {
+    }
+
+    public static class R4 extends R3 {
+    }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/contended/OopMaps.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.lang.Class;
+import java.lang.String;
+import java.lang.System;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CyclicBarrier;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import sun.misc.Unsafe;
+import sun.misc.Contended;
+
+/*
+ * @test
+ * @bug     8015270
+ * @bug     8015493
+ * @summary \@Contended: fix multiple issues in the layout code
+ *
+ * @run main/othervm -XX:-RestrictContended -XX:ContendedPaddingWidth=128 -Xmx128m OopMaps
+ */
+public class OopMaps {
+
+    public static final int COUNT = 10000;
+
+    public static void main(String[] args) throws Exception {
+        Object o01 = new Object();
+        Object o02 = new Object();
+        Object o03 = new Object();
+        Object o04 = new Object();
+        Object o05 = new Object();
+        Object o06 = new Object();
+        Object o07 = new Object();
+        Object o08 = new Object();
+        Object o09 = new Object();
+        Object o10 = new Object();
+        Object o11 = new Object();
+        Object o12 = new Object();
+        Object o13 = new Object();
+        Object o14 = new Object();
+
+        R1[] rs = new R1[COUNT];
+
+        for (int i = 0; i < COUNT; i++) {
+           R1 r1 = new R1();
+           r1.o01 = o01;
+           r1.o02 = o02;
+           r1.o03 = o03;
+           r1.o04 = o04;
+           r1.o05 = o05;
+           r1.o06 = o06;
+           r1.o07 = o07;
+           r1.o08 = o08;
+           r1.o09 = o09;
+           r1.o10 = o10;
+           r1.o11 = o11;
+           r1.o12 = o12;
+           r1.o13 = o13;
+           r1.o14 = o14;
+           r1.i1 = 1;
+           r1.i2 = 2;
+           r1.i3 = 3;
+           r1.i4 = 4;
+           rs[i] = r1;
+        }
+
+        System.gc();
+
+        for (int i = 0; i < COUNT; i++) {
+           R1 r1 = rs[i];
+           if (r1.o01 != o01) throw new Error("Test Error: o01");
+           if (r1.o02 != o02) throw new Error("Test Error: o02");
+           if (r1.o03 != o03) throw new Error("Test Error: o03");
+           if (r1.o04 != o04) throw new Error("Test Error: o04");
+           if (r1.o05 != o05) throw new Error("Test Error: o05");
+           if (r1.o06 != o06) throw new Error("Test Error: o06");
+           if (r1.o07 != o07) throw new Error("Test Error: o07");
+           if (r1.o08 != o08) throw new Error("Test Error: o08");
+           if (r1.o09 != o09) throw new Error("Test Error: o09");
+           if (r1.o10 != o10) throw new Error("Test Error: o10");
+           if (r1.o11 != o11) throw new Error("Test Error: o11");
+           if (r1.o12 != o12) throw new Error("Test Error: o12");
+           if (r1.o13 != o13) throw new Error("Test Error: o13");
+           if (r1.o14 != o14) throw new Error("Test Error: o14");
+           if (r1.i1 != 1)    throw new Error("Test Error: i1");
+           if (r1.i2 != 2)    throw new Error("Test Error: i2");
+           if (r1.i3 != 3)    throw new Error("Test Error: i3");
+           if (r1.i4 != 4)    throw new Error("Test Error: i4");
+        }
+    }
+
+    public static class R0 {
+        int i1;
+        int i2;
+
+        Object o01;
+        Object o02;
+
+        @Contended
+        Object o03;
+
+        @Contended
+        Object o04;
+
+        @Contended
+        Object o05;
+
+        @Contended
+        Object o06;
+
+        @Contended
+        Object o07;
+   }
+
+   public static class R1 extends R0 {
+        int i3;
+        int i4;
+
+        Object o08;
+        Object o09;
+
+        @Contended
+        Object o10;
+
+        @Contended
+        Object o11;
+
+        @Contended
+        Object o12;
+
+        @Contended
+        Object o13;
+
+        @Contended
+        Object o14;
+   }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/memory/MultiAllocateNullCheck.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test MultiAllocateNullCheck
+ * @bug 6726963
+ * @summary multi_allocate() call does not CHECK_NULL and causes crash in fastdebug bits
+ * @run main/othervm -Xmx32m MultiAllocateNullCheck
+ */
+
+import java.lang.reflect.Array;
+
+public class MultiAllocateNullCheck {
+      public static void main(String[] args) throws Exception {
+        Object x = null;
+        try
+        {
+            x = Array.newInstance(String.class, new int[]
+                {Integer.MAX_VALUE, Integer.MAX_VALUE});
+            System.out.println("Array was created");
+        } catch (OutOfMemoryError e) {
+            System.out.println("Out of memory occured, which is OK in this case");
+        }
+    }
+}
--- a/hotspot/test/runtime/memory/ReserveMemory.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/test/runtime/memory/ReserveMemory.java	Fri Jun 07 09:25:18 2013 -0700
@@ -34,29 +34,20 @@
 
 import com.oracle.java.testlibrary.*;
 
-import java.lang.reflect.Field;
 import sun.hotspot.WhiteBox;
-import sun.misc.Unsafe;
 
 public class ReserveMemory {
-  private static Unsafe getUnsafe() throws Exception {
-    Field f = Unsafe.class.getDeclaredField("theUnsafe");
-    f.setAccessible(true);
-    return (Unsafe)f.get(null);
-  }
-
   private static boolean isWindows() {
     return System.getProperty("os.name").toLowerCase().startsWith("win");
   }
 
+  private static boolean isOsx() {
+    return System.getProperty("os.name").toLowerCase().startsWith("mac");
+  }
+
   public static void main(String args[]) throws Exception {
     if (args.length > 0) {
-      long address = WhiteBox.getWhiteBox().reserveMemory(4096);
-
-      System.out.println("Reserved memory at address: 0x" + Long.toHexString(address));
-      System.out.println("Will now read from the address, expecting a crash!");
-
-      int x = getUnsafe().getInt(address);
+      WhiteBox.getWhiteBox().readReservedMemory();
 
       throw new Exception("Read of reserved/uncommitted memory unexpectedly succeeded, expected crash!");
     }
@@ -71,6 +62,8 @@
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
     if (isWindows()) {
       output.shouldContain("EXCEPTION_ACCESS_VIOLATION");
+    } else if (isOsx()) {
+      output.shouldContain("SIGBUS");
     } else {
       output.shouldContain("SIGSEGV");
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/testlibrary/GeneratedClassLoader.java	Fri Jun 07 09:25:18 2013 -0700
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import javax.tools.JavaCompiler;
+import javax.tools.ToolProvider;
+
+/**
+ * A class loader that generates new classes.
+ * The generated classes are made by first emitting java sources with nested
+ * static classes, these are then compiled and the class files are read back.
+ * Some efforts are made to make the class instances unique and of not insignificant
+ * size.
+ */
+public class GeneratedClassLoader extends ClassLoader {
+    /**
+     * Holds a pair of class bytecodes and class name (for use with defineClass).
+     */
+    private static class GeneratedClass {
+        public byte[] bytes;
+        public String name;
+        public GeneratedClass(byte[] bytes, String name) {
+            this.bytes = bytes; this.name = name;
+        }
+    }
+
+    /**
+     * Used to uniquely name every class generated.
+     */
+    private static int count = 0;
+    /**
+     * Used to enable/disable keeping the class files and java sources for
+     * the generated classes.
+     */
+    private static boolean deleteFiles = Boolean.parseBoolean(
+        System.getProperty("GeneratedClassLoader.deleteFiles", "true"));
+
+    private static String bigstr =
+        "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
+        + "In facilisis scelerisque vehicula. Donec congue nisi a "
+        + "leo posuere placerat lobortis felis ultrices. Pellentesque "
+        + "habitant morbi tristique senectus et netus et malesuada "
+        + "fames ac turpis egestas. Nam tristique velit at felis "
+        + "iaculis at tempor sem vestibulum. Sed adipiscing lectus "
+        + "non mi molestie sagittis. Morbi eu purus urna. Nam tempor "
+        + "tristique massa eget semper. Mauris cursus, nulla et ornare "
+        + "vehicula, leo dolor scelerisque metus, sit amet rutrum erat "
+        + "sapien quis dui. Nullam eleifend risus et velit accumsan sed "
+        + "suscipit felis pulvinar. Nullam faucibus suscipit gravida. "
+        + "Pellentesque habitant morbi tristique senectus et netus et "
+        + "malesuada fames ac turpis egestas. Nullam ut massa augue, "
+        + "nec viverra mauris.";
+
+    private static int getNextCount() {
+        return count++;
+    }
+
+    ////// end statics
+
+    private JavaCompiler javac;
+    private String nameBase;
+
+    public GeneratedClassLoader() {
+        javac = ToolProvider.getSystemJavaCompiler();
+        nameBase = "TestSimpleClass";
+    }
+
+    private long getBigValue(int which) {
+        // > 65536 is too large to encode in the bytecode
+        // so this will force us to emit a constant pool entry for this int
+        return (long)which + 65537;
+    }
+
+    private String getBigString(int which) {
+        return bigstr + which;
+    }
+
+    private String getClassName(int count) {
+        return nameBase + count;
+    }
+
+    private String generateSource(int count, int sizeFactor, int numClasses) {
+        StringBuilder sb = new StringBuilder();
+        sb.append("public class ").append(getClassName(count)).append("{\n");
+        for (int j = 0; j < numClasses; ++j) {
+            sb.append("public static class ")
+              .append("Class")
+              .append(j)
+              .append("{\n");
+            for (int i = 0; i < sizeFactor; ++i) {
+                int value = i;
+                sb.append("private long field")
+                  .append(i).append(" = ")
+                  .append(getBigValue(value++))
+                  .append(";\n");
+                sb.append("public long method")
+                  .append(i)
+                  .append("() {\n");
+                sb.append("return ")
+                  .append(getBigValue(value++))
+                  .append(";");
+                sb.append("}\n");
+                sb.append("private String str").append(i)
+                  .append(" = \"")
+                  .append(getBigString(i))
+                  .append("\";");
+            }
+            sb.append("\n}");
+        }
+        sb.append("\n}");
+        return sb.toString();
+    }
+
+    private GeneratedClass[] getGeneratedClass(int sizeFactor, int numClasses) throws IOException {
+        int uniqueCount = getNextCount();
+        String src = generateSource(uniqueCount, sizeFactor, numClasses);
+        String className = getClassName(uniqueCount);
+        File file = new File(className + ".java");
+        try (PrintWriter pw = new PrintWriter(new FileWriter(file))) {
+            pw.append(src);
+            pw.flush();
+        }
+        int exitcode = javac.run(null, null, null, file.getCanonicalPath());
+        if (exitcode != 0) {
+            throw new RuntimeException("javac failure when compiling: " +
+                    file.getCanonicalPath());
+        } else {
+            if (deleteFiles) {
+                file.delete();
+            }
+        }
+        GeneratedClass[] gc = new GeneratedClass[numClasses];
+        for (int i = 0; i < numClasses; ++i) {
+            String name = className + "$" + "Class" + i;
+            File classFile = new File(name + ".class");
+            byte[] bytes;
+            try (DataInputStream dis = new DataInputStream(new FileInputStream(classFile))) {
+                bytes = new byte[dis.available()];
+                dis.readFully(bytes);
+            }
+            if (deleteFiles) {
+                classFile.delete();
+            }
+            gc[i] = new GeneratedClass(bytes, name);
+        }
+        if (deleteFiles) {
+            new File(className + ".class").delete();
+        }
+        return gc;
+    }
+
+    /**
+     * Generate a single class, compile it and load it.
+     * @param sizeFactor Fuzzy measure of how large the class should be.
+     * @return the Class instance.
+     * @throws IOException
+     */
+    public Class<?> generateClass(int sizeFactor) throws IOException {
+        return getGeneratedClasses(sizeFactor, 1)[0];
+    }
+
+    /**
+     * Generate several classes, compile and load them.
+     * @param sizeFactor Fuzzy measure of how large each class should be.
+     * @param numClasses The number of classes to create
+     * @return an array of the Class instances.
+     * @throws IOException
+     */
+    public Class<?>[] getGeneratedClasses(int sizeFactor, int numClasses) throws IOException {
+        GeneratedClass[] gc = getGeneratedClass(sizeFactor, numClasses);
+        Class<?>[] classes = new Class[numClasses];
+        for (int i = 0; i < numClasses; ++i) {
+            classes[i] = defineClass(gc[i].name, gc[i].bytes, 0 , gc[i].bytes.length);
+        }
+        return classes;
+    }
+}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Jun 06 09:54:16 2013 -0700
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Jun 07 09:25:18 2013 -0700
@@ -115,7 +115,7 @@
   public native boolean isInStringTable(String str);
 
   // Memory
-  public native long reserveMemory(long size);
+  public native void readReservedMemory();
 
   // force Full GC
   public native void fullGC();