Merge
authoramurillo
Fri, 18 Oct 2013 21:30:42 -0700
changeset 20736 9e1f4b025644
parent 20656 3e33a0db9817 (current diff)
parent 20735 7a50168793a7 (diff)
child 20737 203859308ae3
Merge
hotspot/src/share/vm/memory/metablock.cpp
hotspot/src/share/vm/memory/metablock.hpp
hotspot/test/compiler/8013496/Test8013496.sh
hotspot/test/gc/7168848/HumongousAlloc.java
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1HeapRegionTable. It's essentially an index -> HeapRegion map.
+
+public class G1HeapRegionTable extends VMObject {
+    // HeapRegion** _base;
+    static private AddressField baseField;
+    // uint _length;
+    static private CIntegerField lengthField;
+    // HeapRegion** _biased_base
+    static private AddressField biasedBaseField;
+    // size_t _bias
+    static private CIntegerField biasField;
+    // uint _shift_by
+    static private CIntegerField shiftByField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1HeapRegionTable");
+
+        baseField = type.getAddressField("_base");
+        lengthField = type.getCIntegerField("_length");
+        biasedBaseField = type.getAddressField("_biased_base");
+        biasField = type.getCIntegerField("_bias");
+        shiftByField = type.getCIntegerField("_shift_by");
+    }
+
+    private HeapRegion at(long index) {
+        Address arrayAddr = baseField.getValue(addr);
+        // Offset of &_base[index]
+        long offset = index * VM.getVM().getAddressSize();
+        Address regionAddr = arrayAddr.getAddressAt(offset);
+        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
+                                                      regionAddr);
+    }
+
+    public long length() {
+        return lengthField.getValue(addr);
+    }
+
+    public long bias() {
+        return biasField.getValue(addr);
+    }
+
+    public long shiftBy() {
+        return shiftByField.getValue(addr);
+    }
+
+    private class HeapRegionIterator implements Iterator<HeapRegion> {
+        private long index;
+        private long length;
+
+        @Override
+        public boolean hasNext() { return index < length; }
+
+        @Override
+        public HeapRegion next() { return at(index++);    }
+
+        @Override
+        public void remove()     { /* not supported */    }
+
+        HeapRegionIterator(Address addr) {
+            index = 0;
+            length = length();
+        }
+    }
+
+    public Iterator<HeapRegion> heapRegionIterator() {
+        return new HeapRegionIterator(addr);
+    }
+
+    public G1HeapRegionTable(Address addr) {
+        super(addr);
+    }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,13 +37,11 @@
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
-// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
+// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
 
 public class HeapRegionSeq extends VMObject {
-    // HeapRegion** _regions;
-    static private AddressField regionsField;
-    // uint _length;
-    static private CIntegerField lengthField;
+    // G1HeapRegionTable _regions
+    static private long regionsFieldOffset;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -56,44 +54,21 @@
     static private synchronized void initialize(TypeDataBase db) {
         Type type = db.lookupType("HeapRegionSeq");
 
-        regionsField = type.getAddressField("_regions");
-        lengthField = type.getCIntegerField("_length");
+        regionsFieldOffset = type.getField("_regions").getOffset();
     }
 
-    private HeapRegion at(long index) {
-        Address arrayAddr = regionsField.getValue(addr);
-        // Offset of &_region[index]
-        long offset = index * VM.getVM().getAddressSize();
-        Address regionAddr = arrayAddr.getAddressAt(offset);
-        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
-                                                      regionAddr);
+    private G1HeapRegionTable regions() {
+        Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
+        return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
+                                                             regionsAddr);
     }
 
     public long length() {
-        return lengthField.getValue(addr);
-    }
-
-    private class HeapRegionIterator implements Iterator<HeapRegion> {
-        private long index;
-        private long length;
-
-        @Override
-        public boolean hasNext() { return index < length; }
-
-        @Override
-        public HeapRegion next() { return at(index++);    }
-
-        @Override
-        public void remove()     { /* not supported */    }
-
-        HeapRegionIterator(Address addr) {
-            index = 0;
-            length = length();
-        }
+        return regions().length();
     }
 
     public Iterator<HeapRegion> heapRegionIterator() {
-        return new HeapRegionIterator(addr);
+        return regions().heapRegionIterator();
     }
 
     public HeapRegionSeq(Address addr) {
--- a/hotspot/make/Makefile	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/Makefile	Fri Oct 18 21:30:42 2013 -0700
@@ -334,6 +334,11 @@
 	$(install-file)
 $(EXPORT_SERVER_DIR)/64/%.diz:    		$(C2_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(C2_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_SERVER_DIR)/%.dSYM:       		$(C2_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 # Client (C1)
@@ -379,6 +384,11 @@
 	$(install-file)
 $(EXPORT_CLIENT_DIR)/64/%.diz:    		$(C1_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(C1_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_CLIENT_DIR)/%.dSYM:       		$(C1_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 # Minimal1
@@ -424,6 +434,7 @@
 	$(install-file)
 $(EXPORT_MINIMAL_DIR)/64/%.diz:			$(MINIMAL1_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X does not support Minimal1 config
 endif
 
 # Zero
@@ -446,6 +457,11 @@
 	$(install-file)
 $(EXPORT_SERVER_DIR)/%.diz:			$(ZERO_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(ZERO_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_SERVER_DIR)/%.dSYM:			$(ZERO_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 # Shark
@@ -468,6 +484,11 @@
 	$(install-file)
 $(EXPORT_SERVER_DIR)/%.diz:			$(SHARK_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(SHARK_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_SERVER_DIR)/%.dSYM:			$(SHARK_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
--- a/hotspot/make/bsd/Makefile	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/Makefile	Fri Oct 18 21:30:42 2013 -0700
@@ -204,6 +204,7 @@
 BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
 BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
 BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+BUILDTREE_VARS   += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
 
 BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
 
@@ -337,9 +338,11 @@
 
 # Doc target.  This is the same for all build options.
 #     Hence create a docs directory beside ...$(ARCH)_[...]
+# We specify 'BUILD_FLAVOR=product' so that the proper
+# ENABLE_FULL_DEBUG_SYMBOLS value is used.
 docs: checks
 	$(QUIETLY) mkdir -p $(SUBDIR_DOCS)
-	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs
+	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
 
 # Synonyms for win32-like targets.
 compiler2:  debug product
--- a/hotspot/make/bsd/makefiles/buildtree.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/buildtree.make	Fri Oct 18 21:30:42 2013 -0700
@@ -261,6 +261,16 @@
 	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
 	[ -n "$(CFLAGS_BROWSE)" ] && \
 	    echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
+	[ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
+	    echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \
+	[ -n "$(OBJCOPY)" ] && \
+	    echo && echo "OBJCOPY = $(OBJCOPY)"; \
+	[ -n "$(STRIP_POLICY)" ] && \
+	    echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
+	[ -n "$(ZIP_DEBUGINFO_FILES)" ] && \
+	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
+	[ -n "$(ZIPEXE)" ] && \
+	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
 	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/hotspot/make/bsd/makefiles/defs.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/defs.make	Fri Oct 18 21:30:42 2013 -0700
@@ -136,10 +136,127 @@
   endif
 endif
 
+OS_VENDOR:=$(shell uname -s)
+
+# determine if HotSpot is being built in JDK6 or earlier version
+JDK6_OR_EARLIER=0
+ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
+  # if the longer variable names (newer build style) are set, then check those
+  ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+else
+  # the longer variables aren't set so check the shorter variable names
+  ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+endif
+
+ifeq ($(JDK6_OR_EARLIER),0)
+  # Full Debug Symbols is supported on JDK7 or newer.
+  # The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product
+  # builds is enabled with debug info files ZIP'ed to save space. For
+  # BUILD_FLAVOR != product builds, FDS is always enabled, after all a
+  # debug build without debug info isn't very useful.
+  # The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled.
+  #
+  # If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be
+  # disabled for a BUILD_FLAVOR == product build.
+  #
+  # Note: Use of a different variable name for the FDS override option
+  # versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS
+  # versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass
+  # in options via environment variables, use of distinct variables
+  # prevents strange behaviours. For example, in a BUILD_FLAVOR !=
+  # product build, the FULL_DEBUG_SYMBOLS environment variable will be
+  # 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If
+  # the same variable name is used, then different values can be picked
+  # up by different parts of the build. Just to be clear, we only need
+  # two variable names because the incoming option value can be
+  # overridden in some situations, e.g., a BUILD_FLAVOR != product
+  # build.
+
+  # Due to the multiple sub-make processes that occur this logic gets
+  # executed multiple times. We reduce the noise by at least checking that
+  # BUILD_FLAVOR has been set.
+  ifneq ($(BUILD_FLAVOR),)
+    ifeq ($(BUILD_FLAVOR), product)
+      FULL_DEBUG_SYMBOLS ?= 1
+      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+    else
+      # debug variants always get Full Debug Symbols (if available)
+      ENABLE_FULL_DEBUG_SYMBOLS = 1
+    endif
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+    # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
+
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      ifeq ($(OS_VENDOR), Darwin)
+          # MacOS X doesn't use OBJCOPY or STRIP_POLICY
+          OBJCOPY=
+          STRIP_POLICY=
+          ZIP_DEBUGINFO_FILES ?= 1
+      else
+        # Default OBJCOPY comes from GNU Binutils on BSD
+        ifeq ($(CROSS_COMPILE_ARCH),)
+          DEF_OBJCOPY=/usr/bin/objcopy
+        else
+          # Assume objcopy is part of the cross-compilation toolset
+          ifneq ($(ALT_COMPILER_PATH),)
+            DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
+          endif
+        endif
+        OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
+        ifneq ($(ALT_OBJCOPY),)
+          _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
+          OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+        endif
+
+        ifeq ($(OBJCOPY),)
+          _JUNK_ := $(shell \
+            echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo" \
+              "files. You may need to set ALT_OBJCOPY.")
+          ENABLE_FULL_DEBUG_SYMBOLS=0
+          _JUNK_ := $(shell \
+            echo >&2 "INFO:" \
+              "ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+        else
+          _JUNK_ := $(shell \
+            echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo" \
+              "files.")
+
+          # Library stripping policies for .debuginfo configs:
+          #   all_strip - strips everything from the library
+          #   min_strip - strips most stuff from the library; leaves
+          #               minimum symbols
+          #   no_strip  - does not strip the library at all
+          #
+          # Oracle security policy requires "all_strip". A waiver was
+          # granted on 2011.09.01 that permits using "min_strip" in the
+          # Java JDK and Java JRE.
+          #
+          # Currently, STRIP_POLICY is only used when Full Debug Symbols
+          # is enabled.
+          #
+          STRIP_POLICY ?= min_strip
+
+          _JUNK_ := $(shell \
+            echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+
+          ZIP_DEBUGINFO_FILES ?= 1
+        endif
+
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
+      endif
+    endif # ENABLE_FULL_DEBUG_SYMBOLS=1
+  endif # BUILD_FLAVOR
+endif # JDK_6_OR_EARLIER
+
 JDK_INCLUDE_SUBDIR=bsd
 
 # Library suffix
-OS_VENDOR:=$(shell uname -s)
 ifeq ($(OS_VENDOR),Darwin)
   LIBRARY_SUFFIX=dylib
 else
@@ -150,6 +267,19 @@
 
 # client and server subdirectories have symbolic links to ../libjsig.so
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+      EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+  else
+    ifeq ($(OS_VENDOR), Darwin)
+        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
+    else
+        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+    endif
+  endif
+endif
+
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
@@ -157,34 +287,76 @@
 ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
+    else
+      ifeq ($(OS_VENDOR), Darwin)
+          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
+      else
+          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+      endif
+    endif
+  endif
 endif
 
 ifeq ($(JVM_VARIANT_CLIENT),true)
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
+
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
+    else
+      ifeq ($(OS_VENDOR), Darwin)
+          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
+      else
+          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+      endif
+    endif
+  endif
 endif
 
 ifeq ($(JVM_VARIANT_MINIMAL1),true)
   EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
-
-  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-    ifeq ($(ZIP_DEBUGINFO_FILES),1)
-	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.diz
-    else
-	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
-    endif
-  endif
 endif
 
 # Serviceability Binaries
 # No SA Support for PPC, IA64, ARM or zero
 ADD_SA_BINARIES/x86   = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
                         $(EXPORT_LIB_DIR)/sa-jdi.jar
+
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+      ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+  else
+    ifeq ($(OS_VENDOR), Darwin)
+        ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+    else
+        ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+    endif
+  endif
+endif
+
 ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
                         $(EXPORT_LIB_DIR)/sa-jdi.jar
 ADD_SA_BINARIES/universal = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
                             $(EXPORT_LIB_DIR)/sa-jdi.jar
+
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+      ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+  else
+    ifeq ($(OS_VENDOR), Darwin)
+        ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+    else
+        ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+    endif
+  endif
+endif
+
 ADD_SA_BINARIES/ppc   =
 ADD_SA_BINARIES/ia64  =
 ADD_SA_BINARIES/arm   =
@@ -225,6 +397,19 @@
     # Files to simply copy in place
     UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
     UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      ifeq ($(ZIP_DEBUGINFO_FILES),1)
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.diz
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.diz
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.diz
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.diz
+      else
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+      endif
+    endif
 
   endif
 endif
--- a/hotspot/make/bsd/makefiles/dtrace.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/dtrace.make	Fri Oct 18 21:30:42 2013 -0700
@@ -39,9 +39,15 @@
 JVM_DB = libjvm_db
 LIBJVM_DB = libjvm_db.dylib
 
+LIBJVM_DB_DEBUGINFO   = libjvm_db.dylib.dSYM
+LIBJVM_DB_DIZ         = libjvm_db.diz
+
 JVM_DTRACE = jvm_dtrace
 LIBJVM_DTRACE = libjvm_dtrace.dylib
 
+LIBJVM_DTRACE_DEBUGINFO   = libjvm_dtrace.dylib.dSYM
+LIBJVM_DTRACE_DIZ         = libjvm_dtrace.diz
+
 JVMOFFS = JvmOffsets
 JVMOFFS.o = $(JVMOFFS).o
 GENOFFS = generate$(JVMOFFS)
@@ -76,21 +82,87 @@
 # Making 64/libjvm_db.so: 64-bit version of libjvm_db.so which handles 32-bit libjvm.so
 ifneq ("${ISA}","${BUILDARCH}")
 
-XLIBJVM_DB = 64/$(LIBJVM_DB)
-XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
+XLIBJVM_DIR = 64
+XLIBJVM_DB = $(XLIBJVM_DIR)/$(LIBJVM_DB)
+XLIBJVM_DTRACE = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE)
 XARCH = $(subst sparcv9,v9,$(shell echo $(ISA)))
 
+XLIBJVM_DB_DEBUGINFO       = $(XLIBJVM_DIR)/$(LIBJVM_DB_DEBUGINFO)
+XLIBJVM_DB_DIZ             = $(XLIBJVM_DIR)/$(LIBJVM_DB_DIZ)
+XLIBJVM_DTRACE_DEBUGINFO   = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
+XLIBJVM_DTRACE_DIZ         = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
+
 $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
-	$(QUIETLY) mkdir -p 64/ ; \
+	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
 	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c #-lc
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
+	$(RM) -r $(XLIBJVM_DB_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the link name:
+        $(QUIETLY) ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
+	$(RM) $(XLIBJVM_DB_DEBUGINFO)
+    endif
+  endif
+endif
 
 $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
-	$(QUIETLY) mkdir -p 64/ ; \
+	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
 	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
+	$(RM) -r $(XLIBJVM_DTRACE_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the link name:
+	( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
+	$(RM) $(XLIBJVM_DTRACE_DEBUGINFO)
+    endif
+  endif
+endif
 
 endif # ifneq ("${ISA}","${BUILDARCH}")
 
@@ -134,11 +206,59 @@
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -Wall # -lc
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
+	$(RM) -r $(LIBJVM_DB_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
+	$(RM) $(LIBJVM_DB_DEBUGINFO)
+    endif
+  endif
+endif
 
 $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) 
+	$(RM) -r $(LIBJVM_DTRACE_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) 
+	$(RM) $(LIBJVM_DTRACE_DEBUGINFO)
+    endif
+  endif
+endif
 
 #$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
 #             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
--- a/hotspot/make/bsd/makefiles/gcc.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/gcc.make	Fri Oct 18 21:30:42 2013 -0700
@@ -83,6 +83,11 @@
   AS   = $(CC) -c 
 endif
 
+ifeq ($(OS_VENDOR), Darwin)
+  ifeq ($(DSYMUTIL),)
+    DSYMUTIL=dsymutil
+  endif
+endif
 
 ifeq ($(USE_CLANG), true)
   CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
@@ -434,6 +439,36 @@
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
   DEBUG_CFLAGS += -gstabs
   endif
+  
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    FASTDEBUG_CFLAGS/ia64  = -g
+    FASTDEBUG_CFLAGS/amd64 = -g
+    FASTDEBUG_CFLAGS/arm   = -g
+    FASTDEBUG_CFLAGS/ppc   = -g
+    FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
+    ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
+      ifeq ($(USE_CLANG), true)
+        # Clang doesn't understand -gstabs
+        FASTDEBUG_CFLAGS += -g
+      else
+        FASTDEBUG_CFLAGS += -gstabs
+      endif
+    endif
+  
+    OPT_CFLAGS/ia64  = -g
+    OPT_CFLAGS/amd64 = -g
+    OPT_CFLAGS/arm   = -g
+    OPT_CFLAGS/ppc   = -g
+    OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
+    ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
+      ifeq ($(USE_CLANG), true)
+        # Clang doesn't understand -gstabs
+        OPT_CFLAGS += -g
+      else
+        OPT_CFLAGS += -gstabs
+      endif
+    endif
+  endif
 endif
 
 # If we are building HEADLESS, pass on to VM
--- a/hotspot/make/bsd/makefiles/jsig.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/jsig.make	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -29,13 +29,21 @@
 
 ifeq ($(OS_VENDOR), Darwin)
   LIBJSIG   = lib$(JSIG).dylib
+
+  LIBJSIG_DEBUGINFO   = lib$(JSIG).dylib.dSYM
+  LIBJSIG_DIZ         = lib$(JSIG).diz
 else
   LIBJSIG   = lib$(JSIG).so
+
+  LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
+  LIBJSIG_DIZ         = lib$(JSIG).diz
 endif
 
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
-DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG           = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
+DEST_JSIG_DIZ       = $(JDK_LIBDIR)/$(LIBJSIG_DIZ)
 
 LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
 
@@ -55,9 +63,42 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $<
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
+	$(RM) -r $(LIBJSIG_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
+	$(RM) $(LIBJSIG_DEBUGINFO)
+    endif
+  endif
+endif
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
+ifeq ($(OS_VENDOR), Darwin)
+	$(QUIETLY) test -d $(LIBJSIG_DEBUGINFO) && \
+	    cp -f -r $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
+else
+	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
+endif
+	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
+	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
 	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
 
 .PHONY: install_jsig
--- a/hotspot/make/bsd/makefiles/product.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/product.make	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -43,15 +43,17 @@
 SYSDEFS += -DPRODUCT
 VERSION = optimized
 
-# use -g to strip library as -x will discard its symbol table; -x is fine for
-# executables.
-ifdef CROSS_COMPILE_ARCH
-  STRIP = $(ALT_COMPILER_PATH)/strip
-else
-  STRIP = strip
+ifneq ($(OS_VENDOR), Darwin)
+  # use -g to strip library as -x will discard its symbol table; -x is fine for
+  # executables.
+  ifdef CROSS_COMPILE_ARCH
+    STRIP = $(ALT_COMPILER_PATH)/strip
+  else
+    STRIP = strip
+  endif
+  STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
+  STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
+
+  # Don't strip in VM build; JDK build will strip libraries later
+  # LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
 endif
-STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
-STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
-
-# Don't strip in VM build; JDK build will strip libraries later
-# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
--- a/hotspot/make/bsd/makefiles/saproc.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/saproc.make	Fri Oct 18 21:30:42 2013 -0700
@@ -28,9 +28,15 @@
 SAPROC   = saproc
 
 ifeq ($(OS_VENDOR), Darwin)
-  LIBSAPROC   = lib$(SAPROC).dylib
+  LIBSAPROC           = lib$(SAPROC).dylib
+
+  LIBSAPROC_DEBUGINFO = lib$(SAPROC).dylib.dSYM
+  LIBSAPROC_DIZ       = lib$(SAPROC).diz
 else
-  LIBSAPROC   = lib$(SAPROC).so
+  LIBSAPROC           = lib$(SAPROC).so
+
+  LIBSAPROC_DEBUGINFO = lib$(SAPROC).debuginfo
+  LIBSAPROC_DIZ       = lib$(SAPROC).diz
 endif
 
 AGENT_DIR = $(GAMMADIR)/agent
@@ -70,7 +76,9 @@
 
 SAMAPFILE = $(SASRCDIR)/mapfile
 
-DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC           = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
+DEST_SAPROC_DIZ       = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ)
 
 # DEBUG_BINARIES overrides everything, use full -g debug information
 ifeq ($(DEBUG_BINARIES), true)
@@ -117,11 +125,42 @@
 	           $(SA_DEBUG_CFLAGS)                                   \
 	           -o $@                                                \
 	           $(SALIBS)
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
+	$(RM) -r $(LIBSAPROC_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
+	$(RM) $(LIBSAPROC_DEBUGINFO)
+    endif
+  endif
+endif
 
 install_saproc: $(BUILDLIBSAPROC)
-	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
-	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
-	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
-	fi
+	@echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"
+ifeq ($(OS_VENDOR), Darwin)
+	$(QUIETLY) test -d $(LIBSAPROC_DEBUGINFO) && \
+	    cp -f -r $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
+else
+	$(QUIETLY) test -f $(LIBSAPROC_DEBUGINFO) && \
+	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
+endif
+	$(QUIETLY) test -f $(LIBSAPROC_DIZ) && \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ)
+	$(QUIETLY) cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"
 
 .PHONY: install_saproc
--- a/hotspot/make/bsd/makefiles/universal.gmk	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/universal.gmk	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # macosx universal builds
@@ -35,15 +35,15 @@
 all_product_universal:
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
-	$(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize
+	$(QUIETLY) $(MAKE) BUILD_FLAVOR=product EXPORT_SUBDIR= universalize
 all_fastdebug_universal:
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
-	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize
+	$(QUIETLY) $(MAKE) BUILD_FLAVOR=fastdebug EXPORT_SUBDIR=/fastdebug universalize
 all_debug_universal:
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS)
-	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize
+	$(QUIETLY) $(MAKE) BUILD_FLAVOR=debug EXPORT_SUBDIR=/debug universalize
 
 
 # Consolidate architecture builds into a single Universal binary
@@ -57,18 +57,18 @@
 	if [ -n "$${BUILT_LIPO_FILES}" ]; then \
 	  $(MKDIR) -p $(shell dirname $@); \
 	  lipo -create -output $@ $${BUILT_LIPO_FILES}; \
-	fi	
+	fi
 
 
 # Copy built non-universal binaries in place
+# - copies directories; including empty dirs
+# - copies files, symlinks, other non-directory files
 $(UNIVERSAL_COPY_LIST):
-	BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
+	BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`"; \
 	if [ -n "$${BUILT_COPY_FILES}" ]; then \
 	  for i in $${BUILT_COPY_FILES}; do \
-	    if [ -f $${i} ]; then \
-	      $(MKDIR) -p $(shell dirname $@); \
-	      $(CP) $${i} $@; \
-	    fi; \
+	    $(MKDIR) -p $(shell dirname $@); \
+	    $(CP) -R $${i} $@; \
 	  done; \
 	fi
 
--- a/hotspot/make/bsd/makefiles/vm.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/vm.make	Fri Oct 18 21:30:42 2013 -0700
@@ -60,10 +60,16 @@
 # The order is important for the precompiled headers to work.
 INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
 
-ifeq (${VERSION}, debug)
+# SYMFLAG is used by {jsig,saproc}.make
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  # always build with debug info when we can create .dSYM/.debuginfo files
   SYMFLAG = -g
 else
-  SYMFLAG =
+  ifeq (${VERSION}, debug)
+    SYMFLAG = -g
+  else
+    SYMFLAG =
+  endif
 endif
 
 # HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
@@ -147,8 +153,14 @@
   ifeq (${VERSION}, $(filter ${VERSION}, debug fastdebug))
     CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
   endif
+
+  LIBJVM_DEBUGINFO   = lib$(JVM).dylib.dSYM
+  LIBJVM_DIZ         = lib$(JVM).diz
 else
   LIBJVM   = lib$(JVM).so
+
+  LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
+  LIBJVM_DIZ         = lib$(JVM).diz
 endif
 
 SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
@@ -322,10 +334,47 @@
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
 	}
 
-DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
+	$(RM) -r $(LIBJVM_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
+	$(RM) $(LIBJVM_DEBUGINFO)
+    endif
+  endif
+endif
+
+DEST_SUBDIR        = $(JDK_LIBDIR)/$(VM_SUBDIR)
+DEST_JVM           = $(DEST_SUBDIR)/$(LIBJVM)
+DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
+DEST_JVM_DIZ       = $(DEST_SUBDIR)/$(LIBJVM_DIZ)
 
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
+ifeq ($(OS_VENDOR), Darwin)
+	$(QUIETLY) test -d $(LIBJVM_DEBUGINFO) && \
+	    cp -f -r $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
+else
+	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
+endif
+	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
+	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
 	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
 
 #----------------------------------------------------------------------
@@ -340,11 +389,8 @@
 #----------------------------------------------------------------------
 
 ifeq ($(OS_VENDOR), Darwin)
-$(LIBJVM).dSYM: $(LIBJVM)
-	dsymutil $(LIBJVM)
-
 # no libjvm_db for macosx
-build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck
 	echo "Doing vm.make build:"
 else
 build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
--- a/hotspot/make/defs.make	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/defs.make	Fri Oct 18 21:30:42 2013 -0700
@@ -77,6 +77,16 @@
 @$(RM) $@
 $(CP) $< $@
 endef
+
+# MacOS X strongly discourages 'cp -r' and provides 'cp -R' instead.
+# May need to have a MacOS X specific definition of install-dir
+# sometime in the future.
+define install-dir
+@$(MKDIR) -p $(@D)
+@$(RM) -r $@
+$(CP) -r $< $@
+endef
+
 define prep-target
 @$(MKDIR) -p $(@D)
 @$(RM) $@
--- a/hotspot/make/hotspot_version	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/make/hotspot_version	Fri Oct 18 21:30:42 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=54
+HS_BUILD_NUMBER=55
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -3100,6 +3100,10 @@
   }
 }
 
+void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
+  fatal("Type profiling not implemented on this platform");
+}
+
 void LIR_Assembler::align_backward_branch_target() {
   __ align(OptoLoopAlignment);
 }
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1076,6 +1076,25 @@
 
   __ verify_not_null_oop(Oexception);
 
+#ifdef ASSERT
+  // check that fields in JavaThread for exception oop and issuing pc are
+  // empty before writing to them
+  Label oop_empty;
+  Register scratch = I7;  // We can use I7 here because it's overwritten later anyway.
+  __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);
+  __ br_null(scratch, false, Assembler::pt, oop_empty);
+  __ delayed()->nop();
+  __ stop("exception oop already set");
+  __ bind(oop_empty);
+
+  Label pc_empty;
+  __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);
+  __ br_null(scratch, false, Assembler::pt, pc_empty);
+  __ delayed()->nop();
+  __ stop("exception pc already set");
+  __ bind(pc_empty);
+#endif
+
   // save the exception and issuing pc in the thread
   __ st_ptr(Oexception,  G2_thread, in_bytes(JavaThread::exception_oop_offset()));
   __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -76,6 +76,8 @@
 // GC Ergo Flags
 define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
 
+define_pd_global(uintx, TypeProfileLevel, 0);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
                                                                             \
   product(intx, UseVIS, 99,                                                 \
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -3581,6 +3581,7 @@
   // the pending exception will be picked up the interpreter.
   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
+  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
   __ bind(noException);
 
   // deallocate the deoptimization frame taking care to preserve the return values
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -3632,6 +3632,161 @@
   }
 }
 
+void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
+  Register obj = op->obj()->as_register();
+  Register tmp = op->tmp()->as_pointer_register();
+  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
+  ciKlass* exact_klass = op->exact_klass();
+  intptr_t current_klass = op->current_klass();
+  bool not_null = op->not_null();
+  bool no_conflict = op->no_conflict();
+
+  Label update, next, none;
+
+  bool do_null = !not_null;
+  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
+  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
+
+  assert(do_null || do_update, "why are we here?");
+  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
+
+  __ verify_oop(obj);
+
+  if (tmp != obj) {
+    __ mov(tmp, obj);
+  }
+  if (do_null) {
+    __ testptr(tmp, tmp);
+    __ jccb(Assembler::notZero, update);
+    if (!TypeEntries::was_null_seen(current_klass)) {
+      __ orptr(mdo_addr, TypeEntries::null_seen);
+    }
+    if (do_update) {
+#ifndef ASSERT
+      __ jmpb(next);
+    }
+#else
+      __ jmp(next);
+    }
+  } else {
+    __ testptr(tmp, tmp);
+    __ jccb(Assembler::notZero, update);
+    __ stop("unexpect null obj");
+#endif
+  }
+
+  __ bind(update);
+
+  if (do_update) {
+#ifdef ASSERT
+    if (exact_klass != NULL) {
+      Label ok;
+      __ load_klass(tmp, tmp);
+      __ push(tmp);
+      __ mov_metadata(tmp, exact_klass->constant_encoding());
+      __ cmpptr(tmp, Address(rsp, 0));
+      __ jccb(Assembler::equal, ok);
+      __ stop("exact klass and actual klass differ");
+      __ bind(ok);
+      __ pop(tmp);
+    }
+#endif
+    if (!no_conflict) {
+      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
+        if (exact_klass != NULL) {
+          __ mov_metadata(tmp, exact_klass->constant_encoding());
+        } else {
+          __ load_klass(tmp, tmp);
+        }
+
+        __ xorptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_klass_mask);
+        // klass seen before, nothing to do. The unknown bit may have been
+        // set already but no need to check.
+        __ jccb(Assembler::zero, next);
+
+        __ testptr(tmp, TypeEntries::type_unknown);
+        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+
+        if (TypeEntries::is_type_none(current_klass)) {
+          __ cmpptr(mdo_addr, 0);
+          __ jccb(Assembler::equal, none);
+          __ cmpptr(mdo_addr, TypeEntries::null_seen);
+          __ jccb(Assembler::equal, none);
+          // There is a chance that the checks above (re-reading profiling
+          // data from memory) fail if another thread has just set the
+          // profiling to this obj's klass
+          __ xorptr(tmp, mdo_addr);
+          __ testptr(tmp, TypeEntries::type_klass_mask);
+          __ jccb(Assembler::zero, next);
+        }
+      } else {
+        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
+               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
+
+        __ movptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_unknown);
+        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+      }
+
+      // different than before. Cannot keep accurate profile.
+      __ orptr(mdo_addr, TypeEntries::type_unknown);
+
+      if (TypeEntries::is_type_none(current_klass)) {
+        __ jmpb(next);
+
+        __ bind(none);
+        // first time here. Set profile type.
+        __ movptr(mdo_addr, tmp);
+      }
+    } else {
+      // There's a single possible klass at this profile point
+      assert(exact_klass != NULL, "should be");
+      if (TypeEntries::is_type_none(current_klass)) {
+        __ mov_metadata(tmp, exact_klass->constant_encoding());
+        __ xorptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_klass_mask);
+#ifdef ASSERT
+        __ jcc(Assembler::zero, next);
+
+        {
+          Label ok;
+          __ push(tmp);
+          __ cmpptr(mdo_addr, 0);
+          __ jcc(Assembler::equal, ok);
+          __ cmpptr(mdo_addr, TypeEntries::null_seen);
+          __ jcc(Assembler::equal, ok);
+          // may have been set by another thread
+          __ mov_metadata(tmp, exact_klass->constant_encoding());
+          __ xorptr(tmp, mdo_addr);
+          __ testptr(tmp, TypeEntries::type_mask);
+          __ jcc(Assembler::zero, ok);
+
+          __ stop("unexpected profiling mismatch");
+          __ bind(ok);
+          __ pop(tmp);
+        }
+#else
+        __ jccb(Assembler::zero, next);
+#endif
+        // first time here. Set profile type.
+        __ movptr(mdo_addr, tmp);
+      } else {
+        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
+               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
+
+        __ movptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_unknown);
+        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+
+        __ orptr(mdo_addr, TypeEntries::type_unknown);
+      }
+    }
+
+    __ bind(next);
+  }
+}
+
 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
   Unimplemented();
 }
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -79,6 +79,8 @@
 // GC Ergo Flags
 define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
 
+define_pd_global(uintx, TypeProfileLevel, 11);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
                                                                             \
   develop(bool, IEEEPrecision, true,                                        \
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1046,6 +1046,158 @@
   }
 }
 
+void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
+  Label update, next, none;
+
+  verify_oop(obj);
+
+  testptr(obj, obj);
+  jccb(Assembler::notZero, update);
+  orptr(mdo_addr, TypeEntries::null_seen);
+  jmpb(next);
+
+  bind(update);
+  load_klass(obj, obj);
+
+  xorptr(obj, mdo_addr);
+  testptr(obj, TypeEntries::type_klass_mask);
+  jccb(Assembler::zero, next); // klass seen before, nothing to
+                               // do. The unknown bit may have been
+                               // set already but no need to check.
+
+  testptr(obj, TypeEntries::type_unknown);
+  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+
+  cmpptr(mdo_addr, 0);
+  jccb(Assembler::equal, none);
+  cmpptr(mdo_addr, TypeEntries::null_seen);
+  jccb(Assembler::equal, none);
+  // There is a chance that the checks above (re-reading profiling
+  // data from memory) fail if another thread has just set the
+  // profiling to this obj's klass
+  xorptr(obj, mdo_addr);
+  testptr(obj, TypeEntries::type_klass_mask);
+  jccb(Assembler::zero, next);
+
+  // different than before. Cannot keep accurate profile.
+  orptr(mdo_addr, TypeEntries::type_unknown);
+  jmpb(next);
+
+  bind(none);
+  // first time here. Set profile type.
+  movptr(mdo_addr, obj);
+
+  bind(next);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
+  if (!ProfileInterpreter) {
+    return;
+  }
+
+  if (MethodData::profile_arguments() || MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(mdp, profile_continue);
+
+    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
+    jcc(Assembler::notEqual, profile_continue);
+
+    if (MethodData::profile_arguments()) {
+      Label done;
+      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+      addptr(mdp, off_to_args);
+
+      for (int i = 0; i < TypeProfileArgsLimit; i++) {
+        if (i > 0 || MethodData::profile_return()) {
+          // If return value type is profiled we may have no argument to profile
+          movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
+          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
+          jcc(Assembler::less, done);
+        }
+        movptr(tmp, Address(callee, Method::const_offset()));
+        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
+        // stack offset o (zero based) from the start of the argument
+        // list, for n arguments translates into offset n - o - 1 from
+        // the end of the argument list
+        subl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
+        subl(tmp, 1);
+        Address arg_addr = argument_address(tmp);
+        movptr(tmp, arg_addr);
+
+        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
+        profile_obj_type(tmp, mdo_arg_addr);
+
+        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+        addptr(mdp, to_add);
+        off_to_args += to_add;
+      }
+
+      if (MethodData::profile_return()) {
+        movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+      }
+
+      bind(done);
+
+      if (MethodData::profile_return()) {
+        // We're right after the type profile for the last
+        // argument. tmp is the number of cell left in the
+        // CallTypeData/VirtualCallTypeData to reach its end. Non null
+        // if there's a return to profile.
+        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+        shll(tmp, exact_log2(DataLayout::cell_size));
+        addptr(mdp, tmp);
+      }
+      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
+    } else {
+      assert(MethodData::profile_return(), "either profile call args or call ret");
+      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
+    }
+
+    // mdp points right after the end of the
+    // CallTypeData/VirtualCallTypeData, right after the cells for the
+    // return value type if there's one
+
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
+  assert_different_registers(mdp, ret, tmp, rsi);
+  if (ProfileInterpreter && MethodData::profile_return()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(mdp, profile_continue);
+
+    if (MethodData::profile_return_jsr292_only()) {
+      // If we don't profile all invoke bytecodes we must make sure
+      // it's a bytecode we indeed profile. We can't go back to the
+      // begining of the ProfileData we intend to update to check its
+      // type because we're right after it and we don't known its
+      // length
+      Label do_profile;
+      cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
+      jcc(Assembler::equal, do_profile);
+      cmpb(Address(rsi, 0), Bytecodes::_invokehandle);
+      jcc(Assembler::equal, do_profile);
+      get_method(tmp);
+      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
+      jcc(Assembler::notEqual, profile_continue);
+
+      bind(do_profile);
+    }
+
+    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
+    mov(tmp, ret);
+    profile_obj_type(tmp, mdo_ret_addr);
+
+    bind(profile_continue);
+  }
+}
 
 void InterpreterMacroAssembler::profile_call(Register mdp) {
   if (ProfileInterpreter) {
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -215,6 +215,9 @@
 
   void profile_taken_branch(Register mdp, Register bumped_count);
   void profile_not_taken_branch(Register mdp);
+  void profile_obj_type(Register obj, const Address& mdo_addr);
+  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
+  void profile_return_type(Register mdp, Register ret, Register tmp);
   void profile_call(Register mdp);
   void profile_final_call(Register mdp);
   void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1067,6 +1067,159 @@
   }
 }
 
+void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
+  Label update, next, none;
+
+  verify_oop(obj);
+
+  testptr(obj, obj);
+  jccb(Assembler::notZero, update);
+  orptr(mdo_addr, TypeEntries::null_seen);
+  jmpb(next);
+
+  bind(update);
+  load_klass(obj, obj);
+
+  xorptr(obj, mdo_addr);
+  testptr(obj, TypeEntries::type_klass_mask);
+  jccb(Assembler::zero, next); // klass seen before, nothing to
+                               // do. The unknown bit may have been
+                               // set already but no need to check.
+
+  testptr(obj, TypeEntries::type_unknown);
+  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+
+  // There is a chance that by the time we do these checks (re-reading
+  // profiling data from memory) another thread has set the profling
+  // to this obj's klass and we set the profiling as unknow
+  // erroneously
+  cmpptr(mdo_addr, 0);
+  jccb(Assembler::equal, none);
+  cmpptr(mdo_addr, TypeEntries::null_seen);
+  jccb(Assembler::equal, none);
+  // There is a chance that the checks above (re-reading profiling
+  // data from memory) fail if another thread has just set the
+  // profiling to this obj's klass
+  xorptr(obj, mdo_addr);
+  testptr(obj, TypeEntries::type_klass_mask);
+  jccb(Assembler::zero, next);
+
+  // different than before. Cannot keep accurate profile.
+  orptr(mdo_addr, TypeEntries::type_unknown);
+  jmpb(next);
+
+  bind(none);
+  // first time here. Set profile type.
+  movptr(mdo_addr, obj);
+
+  bind(next);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
+  if (!ProfileInterpreter) {
+    return;
+  }
+
+  if (MethodData::profile_arguments() || MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(mdp, profile_continue);
+
+    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
+    jcc(Assembler::notEqual, profile_continue);
+
+    if (MethodData::profile_arguments()) {
+      Label done;
+      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+      addptr(mdp, off_to_args);
+
+      for (int i = 0; i < TypeProfileArgsLimit; i++) {
+        if (i > 0 || MethodData::profile_return()) {
+          // If return value type is profiled we may have no argument to profile
+          movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
+          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
+          jcc(Assembler::less, done);
+        }
+        movptr(tmp, Address(callee, Method::const_offset()));
+        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
+        subq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
+        subl(tmp, 1);
+        Address arg_addr = argument_address(tmp);
+        movptr(tmp, arg_addr);
+
+        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
+        profile_obj_type(tmp, mdo_arg_addr);
+
+        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+        addptr(mdp, to_add);
+        off_to_args += to_add;
+      }
+
+      if (MethodData::profile_return()) {
+        movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+      }
+
+      bind(done);
+
+      if (MethodData::profile_return()) {
+        // We're right after the type profile for the last
+        // argument. tmp is the number of cell left in the
+        // CallTypeData/VirtualCallTypeData to reach its end. Non null
+        // if there's a return to profile.
+        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+        shll(tmp, exact_log2(DataLayout::cell_size));
+        addptr(mdp, tmp);
+      }
+      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
+    } else {
+      assert(MethodData::profile_return(), "either profile call args or call ret");
+      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
+    }
+
+    // mdp points right after the end of the
+    // CallTypeData/VirtualCallTypeData, right after the cells for the
+    // return value type if there's one
+
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
+  assert_different_registers(mdp, ret, tmp, r13);
+  if (ProfileInterpreter && MethodData::profile_return()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(mdp, profile_continue);
+
+    if (MethodData::profile_return_jsr292_only()) {
+      // If we don't profile all invoke bytecodes we must make sure
+      // it's a bytecode we indeed profile. We can't go back to the
+      // begining of the ProfileData we intend to update to check its
+      // type because we're right after it and we don't known its
+      // length
+      Label do_profile;
+      cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
+      jcc(Assembler::equal, do_profile);
+      cmpb(Address(r13, 0), Bytecodes::_invokehandle);
+      jcc(Assembler::equal, do_profile);
+      get_method(tmp);
+      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
+      jcc(Assembler::notEqual, profile_continue);
+
+      bind(do_profile);
+    }
+
+    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
+    mov(tmp, ret);
+    profile_obj_type(tmp, mdo_ret_addr);
+
+    bind(profile_continue);
+  }
+}
 
 void InterpreterMacroAssembler::profile_call(Register mdp) {
   if (ProfileInterpreter) {
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -224,6 +224,9 @@
 
   void profile_taken_branch(Register mdp, Register bumped_count);
   void profile_not_taken_branch(Register mdp);
+  void profile_obj_type(Register obj, const Address& mdo_addr);
+  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
+  void profile_return_type(Register mdp, Register ret, Register tmp);
   void profile_call(Register mdp);
   void profile_final_call(Register mdp);
   void profile_virtual_call(Register receiver, Register mdp,
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -773,6 +773,7 @@
   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+  void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 
   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
   void testptr(Register src1, Register src2);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -194,6 +194,12 @@
   __ restore_bcp();
   __ restore_locals();
 
+  if (incoming_state == atos) {
+    Register mdp = rbx;
+    Register tmp = rcx;
+    __ profile_return_type(mdp, rax, tmp);
+  }
+
   Label L_got_cache, L_giant_index;
   if (EnableInvokeDynamic) {
     __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -177,6 +177,12 @@
   __ restore_bcp();
   __ restore_locals();
 
+  if (state == atos) {
+    Register mdp = rbx;
+    Register tmp = rcx;
+    __ profile_return_type(mdp, rax, tmp);
+  }
+
   Label L_got_cache, L_giant_index;
   if (EnableInvokeDynamic) {
     __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -2970,6 +2970,7 @@
 
   // profile this call
   __ profile_final_call(rax);
+  __ profile_arguments_type(rax, method, rsi, true);
 
   __ jump_from_interpreted(method, rax);
 
@@ -2984,6 +2985,7 @@
 
   // get target Method* & entry point
   __ lookup_virtual_method(rax, index, method);
+  __ profile_arguments_type(rdx, method, rsi, true);
   __ jump_from_interpreted(method, rdx);
 }
 
@@ -3013,6 +3015,7 @@
   __ null_check(rcx);
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, rsi, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3023,6 +3026,7 @@
   prepare_invoke(byte_no, rbx);  // get f1 Method*
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, rsi, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3082,6 +3086,8 @@
   __ testptr(rbx, rbx);
   __ jcc(Assembler::zero, no_such_method);
 
+  __ profile_arguments_type(rdx, rbx, rsi, true);
+
   // do the call
   // rcx: receiver
   // rbx,: Method*
@@ -3138,6 +3144,7 @@
 
   // FIXME: profile the LambdaForm also
   __ profile_final_call(rax);
+  __ profile_arguments_type(rdx, rbx_method, rsi, true);
 
   __ jump_from_interpreted(rbx_method, rdx);
 }
@@ -3171,6 +3178,7 @@
   // %%% should make a type profile for any invokedynamic that takes a ref argument
   // profile this call
   __ profile_call(rsi);
+  __ profile_arguments_type(rdx, rbx, rsi, false);
 
   __ verify_oop(rax_callsite);
 
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -3026,6 +3026,7 @@
 
   // profile this call
   __ profile_final_call(rax);
+  __ profile_arguments_type(rax, method, r13, true);
 
   __ jump_from_interpreted(method, rax);
 
@@ -3040,6 +3041,7 @@
 
   // get target Method* & entry point
   __ lookup_virtual_method(rax, index, method);
+  __ profile_arguments_type(rdx, method, r13, true);
   __ jump_from_interpreted(method, rdx);
 }
 
@@ -3069,6 +3071,7 @@
   __ null_check(rcx);
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, r13, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3079,6 +3082,7 @@
   prepare_invoke(byte_no, rbx);  // get f1 Method*
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, r13, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3136,6 +3140,8 @@
   __ testptr(rbx, rbx);
   __ jcc(Assembler::zero, no_such_method);
 
+  __ profile_arguments_type(rdx, rbx, r13, true);
+
   // do the call
   // rcx: receiver
   // rbx,: Method*
@@ -3193,6 +3199,7 @@
 
   // FIXME: profile the LambdaForm also
   __ profile_final_call(rax);
+  __ profile_arguments_type(rdx, rbx_method, r13, true);
 
   __ jump_from_interpreted(rbx_method, rdx);
 }
@@ -3226,6 +3233,7 @@
   // %%% should make a type profile for any invokedynamic that takes a ref argument
   // profile this call
   __ profile_call(r13);
+  __ profile_arguments_type(rdx, rbx_method, r13, false);
 
   __ verify_oop(rax_callsite);
 
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -159,9 +159,21 @@
   return Bsd::available_memory();
 }
 
+// available here means free
 julong os::Bsd::available_memory() {
-  // XXXBSD: this is just a stopgap implementation
-  return physical_memory() >> 2;
+  uint64_t available = physical_memory() >> 2;
+#ifdef __APPLE__
+  mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
+  vm_statistics64_data_t vmstat;
+  kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64,
+                                         (host_info64_t)&vmstat, &count);
+  assert(kerr == KERN_SUCCESS,
+         "host_statistics64 failed - check mach_host_self() and count");
+  if (kerr == KERN_SUCCESS) {
+    available = vmstat.free_count * os::vm_page_size();
+  }
+#endif
+  return available;
 }
 
 julong os::physical_memory() {
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -935,6 +935,7 @@
 void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
 void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
 void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
+void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {}
 void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
 void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
 void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -104,6 +104,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
+  virtual void do_ProfileReturnType (ProfileReturnType*  x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -601,6 +601,17 @@
   }
 }
 
+ciKlass* Compilation::cha_exact_type(ciType* type) {
+  if (type != NULL && type->is_loaded() && type->is_instance_klass()) {
+    ciInstanceKlass* ik = type->as_instance_klass();
+    assert(ik->exact_klass() == NULL, "no cha for final klass");
+    if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
+      dependency_recorder()->assert_leaf_type(ik);
+      return ik;
+    }
+  }
+  return NULL;
+}
 
 void Compilation::print_timers() {
   // tty->print_cr("    Native methods         : %6.3f s, Average : %2.3f", CompileBroker::_t_native_compilation.seconds(), CompileBroker::_t_native_compilation.seconds() / CompileBroker::_total_native_compile_count);
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -246,6 +246,8 @@
       (RangeCheckElimination || UseLoopInvariantCodeMotion) &&
       method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
   }
+
+  ciKlass* cha_exact_type(ciType* type);
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -42,26 +42,16 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/sharedRuntime.hpp"
 
-volatile int Compiler::_runtimes = uninitialized;
-
-Compiler::Compiler() {
-}
-
 
-Compiler::~Compiler() {
-  Unimplemented();
-}
+Compiler::Compiler () {}
 
-
-void Compiler::initialize_all() {
+void Compiler::init_c1_runtime() {
   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   Arena* arena = new (mtCompiler) Arena();
   Runtime1::initialize(buffer_blob);
   FrameMap::initialize();
   // initialize data structures
   ValueType::initialize(arena);
-  // Instruction::initialize();
-  // BlockBegin::initialize();
   GraphBuilder::initialize();
   // note: to use more than one instance of LinearScan at a time this function call has to
   //       be moved somewhere outside of this constructor:
@@ -70,32 +60,33 @@
 
 
 void Compiler::initialize() {
-  if (_runtimes != initialized) {
-    initialize_runtimes( initialize_all, &_runtimes);
+  // Buffer blob must be allocated per C1 compiler thread at startup
+  BufferBlob* buffer_blob = init_buffer_blob();
+
+  if (should_perform_init()) {
+    if (buffer_blob == NULL) {
+      // When we come here we are in state 'initializing'; entire C1 compilation
+      // can be shut down.
+      set_state(failed);
+    } else {
+      init_c1_runtime();
+      set_state(initialized);
+    }
   }
-  mark_initialized();
 }
 
-
-BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
+BufferBlob* Compiler::init_buffer_blob() {
   // Allocate buffer blob once at startup since allocation for each
   // compilation seems to be too expensive (at least on Intel win32).
-  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
-  if (buffer_blob != NULL) {
-    return buffer_blob;
-  }
+  assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
 
   // setup CodeBuffer.  Preallocate a BufferBlob of size
   // NMethodSizeLimit plus some extra space for constants.
   int code_buffer_size = Compilation::desired_max_code_buffer_size() +
     Compilation::desired_max_constant_size();
 
-  buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
-                                   code_buffer_size);
-  if (buffer_blob == NULL) {
-    CompileBroker::handle_full_code_cache();
-    env->record_failure("CodeCache is full");
-  } else {
+  BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size);
+  if (buffer_blob != NULL) {
     CompilerThread::current()->set_buffer_blob(buffer_blob);
   }
 
@@ -104,15 +95,8 @@
 
 
 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
-  BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
-  if (buffer_blob == NULL) {
-    return;
-  }
-
-  if (!is_initialized()) {
-    initialize();
-  }
-
+  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+  assert(buffer_blob != NULL, "Must exist");
   // invoke compilation
   {
     // We are nested here because we need for the destructor
--- a/hotspot/src/share/vm/c1/c1_Compiler.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -30,11 +30,9 @@
 // There is one instance of the Compiler per CompilerThread.
 
 class Compiler: public AbstractCompiler {
-
  private:
-
- // Tracks whether runtime has been initialized
- static volatile int _runtimes;
+  static void init_c1_runtime();
+  BufferBlob* init_buffer_blob();
 
  public:
   // Creation
@@ -46,19 +44,12 @@
 
   virtual bool is_c1()                           { return true; };
 
-  BufferBlob* get_buffer_blob(ciEnv* env);
-
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
   virtual bool supports_osr   ()                 { return true; }
 
-  // Customization
-  virtual bool needs_adapters         ()         { return false; }
-  virtual bool needs_stubs            ()         { return false; }
-
   // Initialization
   virtual void initialize();
-  static  void initialize_all();
 
   // Compilation entry point for methods
   virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1466,9 +1466,22 @@
     // State at end of inlined method is the state of the caller
     // without the method parameters on stack, including the
     // return value, if any, of the inlined method on operand stack.
+    int invoke_bci = state()->caller_state()->bci();
     set_state(state()->caller_state()->copy_for_parsing());
     if (x != NULL) {
       state()->push(x->type(), x);
+      if (profile_calls() && MethodData::profile_return() && x->type()->is_object_kind()) {
+        ciMethod* caller = state()->scope()->method();
+        ciMethodData* md = caller->method_data_or_null();
+        ciProfileData* data = md->bci_to_data(invoke_bci);
+        if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+          bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
+          // May not be true in case of an inlined call through a method handle intrinsic.
+          if (has_return) {
+            profile_return_type(x, method(), caller, invoke_bci);
+          }
+        }
+      }
     }
     Goto* goto_callee = new Goto(continuation(), false);
 
@@ -1658,6 +1671,42 @@
   return compilation()->dependency_recorder();
 }
 
+// How many arguments do we want to profile?
+Values* GraphBuilder::args_list_for_profiling(int& start, bool may_have_receiver) {
+  int n = 0;
+  assert(start == 0, "should be initialized");
+  if (MethodData::profile_arguments()) {
+    ciProfileData* data = method()->method_data()->bci_to_data(bci());
+    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+      n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
+      bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
+      start = has_receiver ? 1 : 0;
+    }
+  }
+  if (n > 0) {
+    return new Values(n);
+  }
+  return NULL;
+}
+
+// Collect arguments that we want to profile in a list
+Values* GraphBuilder::collect_args_for_profiling(Values* args, bool may_have_receiver) {
+  int start = 0;
+  Values* obj_args = args_list_for_profiling(start, may_have_receiver);
+  if (obj_args == NULL) {
+    return NULL;
+  }
+  int s = obj_args->size();
+  for (int i = start, j = 0; j < s; i++) {
+    if (args->at(i)->type()->is_object_kind()) {
+      obj_args->push(args->at(i));
+      j++;
+    }
+  }
+  assert(s == obj_args->length(), "missed on arg?");
+  return obj_args;
+}
+
 
 void GraphBuilder::invoke(Bytecodes::Code code) {
   bool will_link;
@@ -1957,7 +2006,7 @@
       } else if (exact_target != NULL) {
         target_klass = exact_target->holder();
       }
-      profile_call(target, recv, target_klass);
+      profile_call(target, recv, target_klass, collect_args_for_profiling(args, false), false);
     }
   }
 
@@ -1972,6 +2021,9 @@
       push(result_type, result);
     }
   }
+  if (profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
+    profile_return_type(result, target);
+  }
 }
 
 
@@ -3509,7 +3561,7 @@
           recv = args->at(0);
           null_check(recv);
         }
-        profile_call(callee, recv, NULL);
+        profile_call(callee, recv, NULL, collect_args_for_profiling(args, true), true);
       }
     }
   }
@@ -3520,6 +3572,10 @@
   Value value = append_split(result);
   if (result_type != voidType) push(result_type, value);
 
+  if (callee != method() && profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
+    profile_return_type(result, callee);
+  }
+
   // done
   return true;
 }
@@ -3763,7 +3819,28 @@
     compilation()->set_would_profile(true);
 
     if (profile_calls()) {
-      profile_call(callee, recv, holder_known ? callee->holder() : NULL);
+      int start = 0;
+      Values* obj_args = args_list_for_profiling(start, has_receiver);
+      if (obj_args != NULL) {
+        int s = obj_args->size();
+        // if called through method handle invoke, some arguments may have been popped
+        for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) {
+          Value v = state()->stack_at_inc(i);
+          if (v->type()->is_object_kind()) {
+            obj_args->push(v);
+            j++;
+          }
+        }
+#ifdef ASSERT
+        {
+          bool ignored_will_link;
+          ciSignature* declared_signature = NULL;
+          ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
+          assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
+        }
+#endif
+      }
+      profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
     }
   }
 
@@ -4251,8 +4328,23 @@
 }
 #endif // PRODUCT
 
-void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) {
-  append(new ProfileCall(method(), bci(), callee, recv, known_holder));
+void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
+  append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
+}
+
+void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
+  assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
+  if (m == NULL) {
+    m = method();
+  }
+  if (invoke_bci < 0) {
+    invoke_bci = bci();
+  }
+  ciMethodData* md = m->method_data_or_null();
+  ciProfileData* data = md->bci_to_data(invoke_bci);
+  if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+    append(new ProfileReturnType(m , invoke_bci, callee, ret));
+  }
 }
 
 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -374,7 +374,8 @@
 
   void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
 
-  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
+  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
+  void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
   void profile_invocation(ciMethod* inlinee, ValueStack* state);
 
   // Shortcuts to profiling control.
@@ -386,6 +387,9 @@
   bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
   bool profile_checkcasts()    { return _compilation->profile_checkcasts();    }
 
+  Values* args_list_for_profiling(int& start, bool may_have_receiver);
+  Values* collect_args_for_profiling(Values* args, bool may_have_receiver);
+
  public:
   NOT_PRODUCT(void print_stats();)
 
--- a/hotspot/src/share/vm/c1/c1_Instruction.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -104,6 +104,14 @@
   }
 }
 
+ciType* Instruction::exact_type() const {
+  ciType* t =  declared_type();
+  if (t != NULL && t->is_klass()) {
+    return t->as_klass()->exact_klass();
+  }
+  return NULL;
+}
+
 
 #ifndef PRODUCT
 void Instruction::check_state(ValueStack* state) {
@@ -135,9 +143,7 @@
 
 // perform constant and interval tests on index value
 bool AccessIndexed::compute_needs_range_check() {
-
   if (length()) {
-
     Constant* clength = length()->as_Constant();
     Constant* cindex = index()->as_Constant();
     if (clength && cindex) {
@@ -157,34 +163,8 @@
 }
 
 
-ciType* Local::exact_type() const {
-  ciType* type = declared_type();
-
-  // for primitive arrays, the declared type is the exact type
-  if (type->is_type_array_klass()) {
-    return type;
-  } else if (type->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)type;
-    if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) {
-      return type;
-    }
-  } else if (type->is_obj_array_klass()) {
-    ciObjArrayKlass* oak = (ciObjArrayKlass*)type;
-    ciType* base = oak->base_element_type();
-    if (base->is_instance_klass()) {
-      ciInstanceKlass* ik = base->as_instance_klass();
-      if (ik->is_loaded() && ik->is_final()) {
-        return type;
-      }
-    } else if (base->is_primitive_type()) {
-      return type;
-    }
-  }
-  return NULL;
-}
-
 ciType* Constant::exact_type() const {
-  if (type()->is_object()) {
+  if (type()->is_object() && type()->as_ObjectType()->is_loaded()) {
     return type()->as_ObjectType()->exact_type();
   }
   return NULL;
@@ -192,19 +172,18 @@
 
 ciType* LoadIndexed::exact_type() const {
   ciType* array_type = array()->exact_type();
-  if (array_type == NULL) {
-    return NULL;
-  }
-  assert(array_type->is_array_klass(), "what else?");
-  ciArrayKlass* ak = (ciArrayKlass*)array_type;
+  if (array_type != NULL) {
+    assert(array_type->is_array_klass(), "what else?");
+    ciArrayKlass* ak = (ciArrayKlass*)array_type;
 
-  if (ak->element_type()->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
-    if (ik->is_loaded() && ik->is_final()) {
-      return ik;
+    if (ak->element_type()->is_instance_klass()) {
+      ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
+      if (ik->is_loaded() && ik->is_final()) {
+        return ik;
+      }
     }
   }
-  return NULL;
+  return Instruction::exact_type();
 }
 
 
@@ -224,22 +203,6 @@
 }
 
 
-ciType* LoadField::exact_type() const {
-  ciType* type = declared_type();
-  // for primitive arrays, the declared type is the exact type
-  if (type->is_type_array_klass()) {
-    return type;
-  }
-  if (type->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)type;
-    if (ik->is_loaded() && ik->is_final()) {
-      return type;
-    }
-  }
-  return NULL;
-}
-
-
 ciType* NewTypeArray::exact_type() const {
   return ciTypeArrayKlass::make(elt_type());
 }
@@ -264,16 +227,6 @@
   return klass();
 }
 
-ciType* CheckCast::exact_type() const {
-  if (klass()->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)klass();
-    if (ik->is_loaded() && ik->is_final()) {
-      return ik;
-    }
-  }
-  return NULL;
-}
-
 // Implementation of ArithmeticOp
 
 bool ArithmeticOp::is_commutative() const {
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -107,6 +107,7 @@
 class         UnsafePrefetchRead;
 class         UnsafePrefetchWrite;
 class   ProfileCall;
+class   ProfileReturnType;
 class   ProfileInvoke;
 class   RuntimeCall;
 class   MemBar;
@@ -211,6 +212,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) = 0;
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
   virtual void do_ProfileCall    (ProfileCall*     x) = 0;
+  virtual void do_ProfileReturnType (ProfileReturnType*  x) = 0;
   virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   virtual void do_RuntimeCall    (RuntimeCall*     x) = 0;
   virtual void do_MemBar         (MemBar*          x) = 0;
@@ -322,6 +324,36 @@
     _type = type;
   }
 
+  // Helper class to keep track of which arguments need a null check
+  class ArgsNonNullState {
+  private:
+    int _nonnull_state; // mask identifying which args are nonnull
+  public:
+    ArgsNonNullState()
+      : _nonnull_state(AllBits) {}
+
+    // Does argument number i needs a null check?
+    bool arg_needs_null_check(int i) const {
+      // No data is kept for arguments starting at position 33 so
+      // conservatively assume that they need a null check.
+      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
+        return is_set_nth_bit(_nonnull_state, i);
+      }
+      return true;
+    }
+
+    // Set whether argument number i needs a null check or not
+    void set_arg_needs_null_check(int i, bool check) {
+      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
+        if (check) {
+          _nonnull_state |= nth_bit(i);
+        } else {
+          _nonnull_state &= ~(nth_bit(i));
+        }
+      }
+    }
+  };
+
  public:
   void* operator new(size_t size) throw() {
     Compilation* c = Compilation::current();
@@ -566,7 +598,7 @@
   virtual void other_values_do(ValueVisitor* f)   { /* usually no other - override on demand */ }
           void       values_do(ValueVisitor* f)   { input_values_do(f); state_values_do(f); other_values_do(f); }
 
-  virtual ciType* exact_type() const             { return NULL; }
+  virtual ciType* exact_type() const;
   virtual ciType* declared_type() const          { return NULL; }
 
   // hashing
@@ -689,7 +721,6 @@
   int java_index() const                         { return _java_index; }
 
   virtual ciType* declared_type() const          { return _declared_type; }
-  virtual ciType* exact_type() const;
 
   // generic
   virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
@@ -806,7 +837,6 @@
   {}
 
   ciType* declared_type() const;
-  ciType* exact_type() const;
 
   // generic
   HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset())  // cannot be eliminated if needs patching or if volatile
@@ -1299,6 +1329,7 @@
 
   virtual bool needs_exception_state() const     { return false; }
 
+  ciType* exact_type() const                     { return NULL; }
   ciType* declared_type() const;
 
   // generic
@@ -1422,7 +1453,6 @@
   }
 
   ciType* declared_type() const;
-  ciType* exact_type() const;
 };
 
 
@@ -1490,7 +1520,7 @@
   vmIntrinsics::ID _id;
   Values*          _args;
   Value            _recv;
-  int              _nonnull_state; // mask identifying which args are nonnull
+  ArgsNonNullState _nonnull_state;
 
  public:
   // preserves_state can be set to true for Intrinsics
@@ -1511,7 +1541,6 @@
   , _id(id)
   , _args(args)
   , _recv(NULL)
-  , _nonnull_state(AllBits)
   {
     assert(args != NULL, "args must exist");
     ASSERT_VALUES
@@ -1537,21 +1566,12 @@
   Value receiver() const                         { assert(has_receiver(), "must have receiver"); return _recv; }
   bool preserves_state() const                   { return check_flag(PreservesStateFlag); }
 
-  bool arg_needs_null_check(int i) {
-    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
-      return is_set_nth_bit(_nonnull_state, i);
-    }
-    return true;
+  bool arg_needs_null_check(int i) const {
+    return _nonnull_state.arg_needs_null_check(i);
   }
 
   void set_arg_needs_null_check(int i, bool check) {
-    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
-      if (check) {
-        _nonnull_state |= nth_bit(i);
-      } else {
-        _nonnull_state &= ~(nth_bit(i));
-      }
-    }
+    _nonnull_state.set_arg_needs_null_check(i, check);
   }
 
   // generic
@@ -2450,34 +2470,87 @@
 
 LEAF(ProfileCall, Instruction)
  private:
-  ciMethod* _method;
-  int       _bci_of_invoke;
-  ciMethod* _callee;         // the method that is called at the given bci
-  Value     _recv;
-  ciKlass*  _known_holder;
+  ciMethod*        _method;
+  int              _bci_of_invoke;
+  ciMethod*        _callee;         // the method that is called at the given bci
+  Value            _recv;
+  ciKlass*         _known_holder;
+  Values*          _obj_args;       // arguments for type profiling
+  ArgsNonNullState _nonnull_state;  // Do we know whether some arguments are never null?
+  bool             _inlined;        // Are we profiling a call that is inlined
 
  public:
-  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder)
+  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined)
     : Instruction(voidType)
     , _method(method)
     , _bci_of_invoke(bci)
     , _callee(callee)
     , _recv(recv)
     , _known_holder(known_holder)
+    , _obj_args(obj_args)
+    , _inlined(inlined)
   {
     // The ProfileCall has side-effects and must occur precisely where located
     pin();
   }
 
-  ciMethod* method()      { return _method; }
-  int bci_of_invoke()     { return _bci_of_invoke; }
-  ciMethod* callee()      { return _callee; }
-  Value recv()            { return _recv; }
-  ciKlass* known_holder() { return _known_holder; }
-
-  virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
+  ciMethod* method()             const { return _method; }
+  int bci_of_invoke()            const { return _bci_of_invoke; }
+  ciMethod* callee()             const { return _callee; }
+  Value recv()                   const { return _recv; }
+  ciKlass* known_holder()        const { return _known_holder; }
+  int nb_profiled_args()         const { return _obj_args == NULL ? 0 : _obj_args->length(); }
+  Value profiled_arg_at(int i)   const { return _obj_args->at(i); }
+  bool arg_needs_null_check(int i) const {
+    return _nonnull_state.arg_needs_null_check(i);
+  }
+  bool inlined()                 const { return _inlined; }
+
+  void set_arg_needs_null_check(int i, bool check) {
+    _nonnull_state.set_arg_needs_null_check(i, check);
+  }
+
+  virtual void input_values_do(ValueVisitor* f)   {
+    if (_recv != NULL) {
+      f->visit(&_recv);
+    }
+    for (int i = 0; i < nb_profiled_args(); i++) {
+      f->visit(_obj_args->adr_at(i));
+    }
+  }
 };
 
+LEAF(ProfileReturnType, Instruction)
+ private:
+  ciMethod*        _method;
+  ciMethod*        _callee;
+  int              _bci_of_invoke;
+  Value            _ret;
+
+ public:
+  ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret)
+    : Instruction(voidType)
+    , _method(method)
+    , _callee(callee)
+    , _bci_of_invoke(bci)
+    , _ret(ret)
+  {
+    set_needs_null_check(true);
+    // The ProfileType has side-effects and must occur precisely where located
+    pin();
+  }
+
+  ciMethod* method()             const { return _method; }
+  ciMethod* callee()             const { return _callee; }
+  int bci_of_invoke()            const { return _bci_of_invoke; }
+  Value ret()                    const { return _ret; }
+
+  virtual void input_values_do(ValueVisitor* f)   {
+    if (_ret != NULL) {
+      f->visit(&_ret);
+    }
+  }
+};
 
 // Call some C runtime function that doesn't safepoint,
 // optionally passing the current thread as the first argument.
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -892,10 +892,24 @@
   if (x->known_holder() != NULL) {
     output()->print(", ");
     print_klass(x->known_holder());
+    output()->print(" ");
+  }
+  for (int i = 0; i < x->nb_profiled_args(); i++) {
+    if (i > 0) output()->print(", ");
+    print_value(x->profiled_arg_at(i));
+    if (x->arg_needs_null_check(i)) {
+      output()->print(" [NC]");
+    }
   }
   output()->put(')');
 }
 
+void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) {
+  output()->print("profile ret type ");
+  print_value(x->ret());
+  output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8());
+  output()->put(')');
+}
 void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
   output()->print("profile_invoke ");
   output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -132,6 +132,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
+  virtual void do_ProfileReturnType (ProfileReturnType*  x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1001,6 +1001,17 @@
       assert(opProfileCall->_tmp1->is_valid(), "used");  do_temp(opProfileCall->_tmp1);
       break;
     }
+
+// LIR_OpProfileType:
+    case lir_profile_type: {
+      assert(op->as_OpProfileType() != NULL, "must be");
+      LIR_OpProfileType* opProfileType = (LIR_OpProfileType*)op;
+
+      do_input(opProfileType->_mdp); do_temp(opProfileType->_mdp);
+      do_input(opProfileType->_obj);
+      do_temp(opProfileType->_tmp);
+      break;
+    }
   default:
     ShouldNotReachHere();
   }
@@ -1151,6 +1162,10 @@
   masm->emit_profile_call(this);
 }
 
+void LIR_OpProfileType::emit_code(LIR_Assembler* masm) {
+  masm->emit_profile_type(this);
+}
+
 // LIR_List
 LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
   : _operations(8)
@@ -1803,6 +1818,8 @@
      case lir_cas_int:               s = "cas_int";      break;
      // LIR_OpProfileCall
      case lir_profile_call:          s = "profile_call";  break;
+     // LIR_OpProfileType
+     case lir_profile_type:          s = "profile_type";  break;
      // LIR_OpAssert
 #ifdef ASSERT
      case lir_assert:                s = "assert";        break;
@@ -2086,6 +2103,15 @@
   tmp1()->print(out);          out->print(" ");
 }
 
+// LIR_OpProfileType
+void LIR_OpProfileType::print_instr(outputStream* out) const {
+  out->print("exact = "); exact_klass()->print_name_on(out);
+  out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
+  mdp()->print(out);          out->print(" ");
+  obj()->print(out);          out->print(" ");
+  tmp()->print(out);          out->print(" ");
+}
+
 #endif // PRODUCT
 
 // Implementation of LIR_InsertionBuffer
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -882,6 +882,7 @@
 class    LIR_OpTypeCheck;
 class    LIR_OpCompareAndSwap;
 class    LIR_OpProfileCall;
+class    LIR_OpProfileType;
 #ifdef ASSERT
 class    LIR_OpAssert;
 #endif
@@ -1005,6 +1006,7 @@
   , end_opCompareAndSwap
   , begin_opMDOProfile
     , lir_profile_call
+    , lir_profile_type
   , end_opMDOProfile
   , begin_opAssert
     , lir_assert
@@ -1145,6 +1147,7 @@
   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
+  virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
 #ifdef ASSERT
   virtual LIR_OpAssert* as_OpAssert() { return NULL; }
 #endif
@@ -1925,8 +1928,8 @@
 
  public:
   // Destroys recv
-  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
-    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
+  LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
+    : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
     , _profiled_method(profiled_method)
     , _profiled_bci(profiled_bci)
     , _profiled_callee(profiled_callee)
@@ -1948,6 +1951,45 @@
   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
 };
 
+// LIR_OpProfileType
+class LIR_OpProfileType : public LIR_Op {
+ friend class LIR_OpVisitState;
+
+ private:
+  LIR_Opr      _mdp;
+  LIR_Opr      _obj;
+  LIR_Opr      _tmp;
+  ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
+  intptr_t     _current_klass; // what the profiling currently reports
+  bool         _not_null;      // true if we know statically that _obj cannot be null
+  bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
+                               // _exact_klass it the only possible type for this parameter in any context.
+
+ public:
+  // Destroys recv
+  LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
+    : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
+    , _mdp(mdp)
+    , _obj(obj)
+    , _exact_klass(exact_klass)
+    , _current_klass(current_klass)
+    , _tmp(tmp)
+    , _not_null(not_null)
+    , _no_conflict(no_conflict) { }
+
+  LIR_Opr      mdp()              const             { return _mdp;              }
+  LIR_Opr      obj()              const             { return _obj;              }
+  LIR_Opr      tmp()              const             { return _tmp;              }
+  ciKlass*     exact_klass()      const             { return _exact_klass;      }
+  intptr_t     current_klass()    const             { return _current_klass;    }
+  bool         not_null()         const             { return _not_null;         }
+  bool         no_conflict()      const             { return _no_conflict;      }
+
+  virtual void emit_code(LIR_Assembler* masm);
+  virtual LIR_OpProfileType* as_OpProfileType() { return this; }
+  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+};
+
 class LIR_InsertionBuffer;
 
 //--------------------------------LIR_List---------------------------------------------------
@@ -2247,7 +2289,10 @@
                   ciMethod* profiled_method, int profiled_bci);
   // MethodData* profiling
   void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
-    append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
+    append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
+  }
+  void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
+    append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
   }
 
   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -208,6 +208,7 @@
   void emit_call(LIR_OpJavaCall* op);
   void emit_rtcall(LIR_OpRTCall* op);
   void emit_profile_call(LIR_OpProfileCall* op);
+  void emit_profile_type(LIR_OpProfileType* op);
   void emit_delay(LIR_OpDelay* op);
 
   void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -2571,6 +2571,78 @@
 }
 
 
+ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
+  ciKlass* result = NULL;
+  bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
+  bool do_update = !TypeEntries::is_type_unknown(profiled_k);
+  // known not to be null or null bit already set and already set to
+  // unknown: nothing we can do to improve profiling
+  if (!do_null && !do_update) {
+    return result;
+  }
+
+  ciKlass* exact_klass = NULL;
+  Compilation* comp = Compilation::current();
+  if (do_update) {
+    // try to find exact type, using CHA if possible, so that loading
+    // the klass from the object can be avoided
+    ciType* type = arg->exact_type();
+    if (type == NULL) {
+      type = arg->declared_type();
+      type = comp->cha_exact_type(type);
+    }
+    assert(type == NULL || type->is_klass(), "type should be class");
+    exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
+
+    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
+  }
+
+  if (!do_null && !do_update) {
+    return result;
+  }
+
+  ciKlass* exact_signature_k = NULL;
+  if (do_update) {
+    // Is the type from the signature exact (the only one possible)?
+    exact_signature_k = signature_k->exact_klass();
+    if (exact_signature_k == NULL) {
+      exact_signature_k = comp->cha_exact_type(signature_k);
+    } else {
+      result = exact_signature_k;
+      do_update = false;
+      // Known statically. No need to emit any code: prevent
+      // LIR_Assembler::emit_profile_type() from emitting useless code
+      profiled_k = ciTypeEntries::with_status(result, profiled_k);
+    }
+    if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
+      assert(exact_klass == NULL, "arg and signature disagree?");
+      // sometimes the type of the signature is better than the best type
+      // the compiler has
+      exact_klass = exact_signature_k;
+      do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
+    }
+  }
+
+  if (!do_null && !do_update) {
+    return result;
+  }
+
+  if (mdp == LIR_OprFact::illegalOpr) {
+    mdp = new_register(T_METADATA);
+    __ metadata2reg(md->constant_encoding(), mdp);
+    if (md_base_offset != 0) {
+      LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
+      mdp = new_pointer_register();
+      __ leal(LIR_OprFact::address(base_type_address), mdp);
+    }
+  }
+  LIRItem value(arg, this);
+  value.load_item();
+  __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
+                  value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
+  return result;
+}
+
 void LIRGenerator::do_Base(Base* x) {
   __ std_entry(LIR_OprFact::illegalOpr);
   // Emit moves from physical registers / stack slots to virtual registers
@@ -3004,12 +3076,52 @@
   }
 }
 
+void LIRGenerator::profile_arguments(ProfileCall* x) {
+  if (MethodData::profile_arguments()) {
+    int bci = x->bci_of_invoke();
+    ciMethodData* md = x->method()->method_data_or_null();
+    ciProfileData* data = md->bci_to_data(bci);
+    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+      ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
+      int base_offset = md->byte_offset_of_slot(data, extra);
+      LIR_Opr mdp = LIR_OprFact::illegalOpr;
+      ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
+
+      Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
+      int start = 0;
+      int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
+      if (x->nb_profiled_args() < stop) {
+        // if called through method handle invoke, some arguments may have been popped
+        stop = x->nb_profiled_args();
+      }
+      ciSignature* sig = x->callee()->signature();
+      // method handle call to virtual method
+      bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
+      ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
+      for (int i = 0; i < stop; i++) {
+        int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
+        ciKlass* exact = profile_arg_type(md, base_offset, off,
+                                          args->type(i), x->profiled_arg_at(i+start), mdp,
+                                          !x->arg_needs_null_check(i+start), sig_stream.next_klass());
+        if (exact != NULL) {
+          md->set_argument_type(bci, i, exact);
+        }
+      }
+    }
+  }
+}
+
 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
   // Need recv in a temporary register so it interferes with the other temporaries
   LIR_Opr recv = LIR_OprFact::illegalOpr;
   LIR_Opr mdo = new_register(T_OBJECT);
   // tmp is used to hold the counters on SPARC
   LIR_Opr tmp = new_pointer_register();
+
+  if (x->nb_profiled_args() > 0) {
+    profile_arguments(x);
+  }
+
   if (x->recv() != NULL) {
     LIRItem value(x->recv(), this);
     value.load_item();
@@ -3019,6 +3131,21 @@
   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
 }
 
+void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
+  int bci = x->bci_of_invoke();
+  ciMethodData* md = x->method()->method_data_or_null();
+  ciProfileData* data = md->bci_to_data(bci);
+  assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
+  ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
+  LIR_Opr mdp = LIR_OprFact::illegalOpr;
+  ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
+                                    ret->type(), x->ret(), mdp,
+                                    !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
+  if (exact != NULL) {
+    md->set_return_type(bci, exact);
+  }
+}
+
 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
   // We can safely ignore accessors here, since c2 will inline them anyway,
   // accessors are also always mature.
@@ -3053,7 +3180,11 @@
   int offset = -1;
   LIR_Opr counter_holder;
   if (level == CompLevel_limited_profile) {
-    address counters_adr = method->ensure_method_counters();
+    MethodCounters* counters_adr = method->ensure_method_counters();
+    if (counters_adr == NULL) {
+      bailout("method counters allocation failed");
+      return;
+    }
     counter_holder = new_pointer_register();
     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -434,6 +434,8 @@
   void do_ThreadIDIntrinsic(Intrinsic* x);
   void do_ClassIDIntrinsic(Intrinsic* x);
 #endif
+  ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
+  void profile_arguments(ProfileCall* x);
 
  public:
   Compilation*  compilation() const              { return _compilation; }
@@ -534,6 +536,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
+  virtual void do_ProfileReturnType (ProfileReturnType* x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -531,6 +531,7 @@
   void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   void do_ProfileCall    (ProfileCall*     x);
+  void do_ProfileReturnType (ProfileReturnType*  x);
   void do_ProfileInvoke  (ProfileInvoke*   x);
   void do_RuntimeCall    (RuntimeCall*     x);
   void do_MemBar         (MemBar*          x);
@@ -657,6 +658,8 @@
   void handle_Intrinsic       (Intrinsic* x);
   void handle_ExceptionObject (ExceptionObject* x);
   void handle_Phi             (Phi* x);
+  void handle_ProfileCall     (ProfileCall* x);
+  void handle_ProfileReturnType (ProfileReturnType* x);
 };
 
 
@@ -715,7 +718,9 @@
 void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
 void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
 void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
-void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
+void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check();
+                                                                nce()->handle_ProfileCall(x); }
+void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
 void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
 void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
 void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
@@ -1134,6 +1139,15 @@
   }
 }
 
+void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) {
+  for (int i = 0; i < x->nb_profiled_args(); i++) {
+    x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i)));
+  }
+}
+
+void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
+  x->set_needs_null_check(!set_contains(x->ret()));
+}
 
 void Optimizer::eliminate_null_checks() {
   ResourceMark rm;
--- a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -162,7 +162,8 @@
     void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ };
     void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
     void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ };
-    void do_ProfileInvoke  (ProfileInvoke*  x)  { /* nothing to do */ };
+    void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ };
+    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
     void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
     void do_MemBar         (MemBar*          x) { /* nothing to do */ };
     void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -542,8 +542,7 @@
     // exception handler can cause class loading, which might throw an
     // exception and those fields are expected to be clear during
     // normal bytecode execution.
-    thread->set_exception_oop(NULL);
-    thread->set_exception_pc(NULL);
+    thread->clear_exception_oop_and_pc();
 
     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
     // If an exception was thrown during exception dispatch, the exception oop may have changed
--- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -203,6 +203,7 @@
   void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
   void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
   void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
+  void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ }
   void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   void do_MemBar         (MemBar*          x) { /* nothing to do */ };
--- a/hotspot/src/share/vm/ci/ciClassList.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciClassList.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -102,6 +102,7 @@
 friend class ciMethodHandle;           \
 friend class ciMethodType;             \
 friend class ciReceiverTypeData;       \
+friend class ciTypeEntries;            \
 friend class ciSymbol;                 \
 friend class ciArray;                  \
 friend class ciObjArray;               \
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1154,9 +1154,12 @@
   GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
 }
 
-void ciEnv::dump_replay_data(outputStream* out) {
-  VM_ENTRY_MARK;
-  MutexLocker ml(Compile_lock);
+// ------------------------------------------------------------------
+// ciEnv::dump_replay_data*
+
+// Don't change thread state and acquire any locks.
+// Safe to call from VM error reporter.
+void ciEnv::dump_replay_data_unsafe(outputStream* out) {
   ResourceMark rm;
 #if INCLUDE_JVMTI
   out->print_cr("JvmtiExport can_access_local_variables %d",     _jvmti_can_access_local_variables);
@@ -1181,3 +1184,10 @@
                 entry_bci, comp_level);
   out->flush();
 }
+
+void ciEnv::dump_replay_data(outputStream* out) {
+  GUARDED_VM_ENTRY(
+    MutexLocker ml(Compile_lock);
+    dump_replay_data_unsafe(out);
+  )
+}
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -452,6 +452,7 @@
 
   // Dump the compilation replay data for the ciEnv to the stream.
   void dump_replay_data(outputStream* out);
+  void dump_replay_data_unsafe(outputStream* out);
 };
 
 #endif // SHARE_VM_CI_CIENV_HPP
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -671,7 +671,6 @@
 
 
 void ciInstanceKlass::dump_replay_data(outputStream* out) {
-  ASSERT_IN_VM;
   ResourceMark rm;
 
   InstanceKlass* ik = get_instanceKlass();
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -235,6 +235,13 @@
   bool is_instance_klass() const { return true; }
   bool is_java_klass() const     { return true; }
 
+  virtual ciKlass* exact_klass() {
+    if (is_loaded() && is_final() && !is_interface()) {
+      return this;
+    }
+    return NULL;
+  }
+
   // Dump the current state of this klass for compilation replay.
   virtual void dump_replay_data(outputStream* out);
 };
--- a/hotspot/src/share/vm/ci/ciKlass.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciKlass.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -66,7 +66,9 @@
 // ------------------------------------------------------------------
 // ciKlass::is_subtype_of
 bool ciKlass::is_subtype_of(ciKlass* that) {
-  assert(is_loaded() && that->is_loaded(), "must be loaded");
+  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
+  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
+
   // Check to see if the klasses are identical.
   if (this == that) {
     return true;
@@ -83,8 +85,8 @@
 // ------------------------------------------------------------------
 // ciKlass::is_subclass_of
 bool ciKlass::is_subclass_of(ciKlass* that) {
-  assert(is_loaded() && that->is_loaded(), "must be loaded");
-  // Check to see if the klasses are identical.
+  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
+  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
 
   VM_ENTRY_MARK;
   Klass* this_klass = get_Klass();
--- a/hotspot/src/share/vm/ci/ciKlass.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciKlass.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -41,6 +41,7 @@
   friend class ciEnv;
   friend class ciField;
   friend class ciMethod;
+  friend class ciMethodData;
   friend class ciObjArrayKlass;
 
 private:
@@ -121,6 +122,8 @@
   // What kind of ciObject is this?
   bool is_klass() const { return true; }
 
+  virtual ciKlass* exact_klass() = 0;
+
   void print_name_on(outputStream* st);
 };
 
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -846,7 +846,9 @@
 // Return true if allocation was successful or no MDO is required.
 bool ciMethod::ensure_method_data(methodHandle h_m) {
   EXCEPTION_CONTEXT;
-  if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
+  if (is_native() || is_abstract() || h_m()->is_accessor()) {
+    return true;
+  }
   if (h_m()->method_data() == NULL) {
     Method::build_interpreter_method_data(h_m, THREAD);
     if (HAS_PENDING_EXCEPTION) {
@@ -903,22 +905,21 @@
 // NULL otherwise.
 ciMethodData* ciMethod::method_data_or_null() {
   ciMethodData *md = method_data();
-  if (md->is_empty()) return NULL;
+  if (md->is_empty()) {
+    return NULL;
+  }
   return md;
 }
 
 // ------------------------------------------------------------------
 // ciMethod::ensure_method_counters
 //
-address ciMethod::ensure_method_counters() {
+MethodCounters* ciMethod::ensure_method_counters() {
   check_is_loaded();
   VM_ENTRY_MARK;
   methodHandle mh(THREAD, get_Method());
-  MethodCounters *counter = mh->method_counters();
-  if (counter == NULL) {
-    counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL);
-  }
-  return (address)counter;
+  MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL);
+  return method_counters;
 }
 
 // ------------------------------------------------------------------
@@ -1247,7 +1248,6 @@
 #undef FETCH_FLAG_FROM_VM
 
 void ciMethod::dump_replay_data(outputStream* st) {
-  ASSERT_IN_VM;
   ResourceMark rm;
   Method* method = get_Method();
   MethodCounters* mcs = method->method_counters();
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -265,7 +265,7 @@
   bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
   bool check_call(int refinfo_index, bool is_static) const;
   bool ensure_method_data();  // make sure it exists in the VM also
-  address ensure_method_counters();
+  MethodCounters* ensure_method_counters();
   int instructions_size();
   int scale_count(int count, float prof_factor = 1.);  // make MDO count commensurate with IIC
 
--- a/hotspot/src/share/vm/ci/ciMethodData.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -78,7 +78,9 @@
 
 void ciMethodData::load_data() {
   MethodData* mdo = get_MethodData();
-  if (mdo == NULL) return;
+  if (mdo == NULL) {
+    return;
+  }
 
   // To do: don't copy the data if it is not "ripe" -- require a minimum #
   // of invocations.
@@ -123,7 +125,7 @@
 #endif
 }
 
-void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) {
+void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
   for (uint row = 0; row < row_limit(); row++) {
     Klass* k = data->as_ReceiverTypeData()->receiver(row);
     if (k != NULL) {
@@ -134,6 +136,18 @@
 }
 
 
+void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
+  for (int i = 0; i < _number_of_entries; i++) {
+    intptr_t k = entries->type(i);
+    TypeStackSlotEntries::set_type(i, translate_klass(k));
+  }
+}
+
+void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
+  intptr_t k = ret->type();
+  set_type(translate_klass(k));
+}
+
 // Get the data at an arbitrary (sort of) data index.
 ciProfileData* ciMethodData::data_at(int data_index) {
   if (out_of_bounds(data_index)) {
@@ -164,6 +178,10 @@
     return new ciMultiBranchData(data_layout);
   case DataLayout::arg_info_data_tag:
     return new ciArgInfoData(data_layout);
+  case DataLayout::call_type_data_tag:
+    return new ciCallTypeData(data_layout);
+  case DataLayout::virtual_call_type_data_tag:
+    return new ciVirtualCallTypeData(data_layout);
   };
 }
 
@@ -286,6 +304,34 @@
   }
 }
 
+void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
+  VM_ENTRY_MARK;
+  MethodData* mdo = get_MethodData();
+  if (mdo != NULL) {
+    ProfileData* data = mdo->bci_to_data(bci);
+    if (data->is_CallTypeData()) {
+      data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
+    } else {
+      assert(data->is_VirtualCallTypeData(), "no arguments!");
+      data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
+    }
+  }
+}
+
+void ciMethodData::set_return_type(int bci, ciKlass* k) {
+  VM_ENTRY_MARK;
+  MethodData* mdo = get_MethodData();
+  if (mdo != NULL) {
+    ProfileData* data = mdo->bci_to_data(bci);
+    if (data->is_CallTypeData()) {
+      data->as_CallTypeData()->set_return_type(k->get_Klass());
+    } else {
+      assert(data->is_VirtualCallTypeData(), "no arguments!");
+      data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
+    }
+  }
+}
+
 bool ciMethodData::has_escape_info() {
   return eflag_set(MethodData::estimated);
 }
@@ -373,7 +419,6 @@
 }
 
 void ciMethodData::dump_replay_data(outputStream* out) {
-  ASSERT_IN_VM;
   ResourceMark rm;
   MethodData* mdo = get_MethodData();
   Method* method = mdo->method();
@@ -477,7 +522,50 @@
   }
 }
 
-void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {
+void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) {
+  if (TypeEntries::is_type_none(k)) {
+    st->print("none");
+  } else if (TypeEntries::is_type_unknown(k)) {
+    st->print("unknown");
+  } else {
+    valid_ciklass(k)->print_name_on(st);
+  }
+  if (TypeEntries::was_null_seen(k)) {
+    st->print(" (null seen)");
+  }
+}
+
+void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
+  for (int i = 0; i < _number_of_entries; i++) {
+    _pd->tab(st);
+    st->print("%d: stack (%u) ", i, stack_slot(i));
+    print_ciklass(st, type(i));
+    st->cr();
+  }
+}
+
+void ciReturnTypeEntry::print_data_on(outputStream* st) const {
+  _pd->tab(st);
+  st->print("ret ");
+  print_ciklass(st, type());
+  st->cr();
+}
+
+void ciCallTypeData::print_data_on(outputStream* st) const {
+  print_shared(st, "ciCallTypeData");
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    args()->print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    ret()->print_data_on(st);
+  }
+}
+
+void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
   uint row;
   int entries = 0;
   for (row = 0; row < row_limit(); row++) {
@@ -493,13 +581,28 @@
   }
 }
 
-void ciReceiverTypeData::print_data_on(outputStream* st) {
+void ciReceiverTypeData::print_data_on(outputStream* st) const {
   print_shared(st, "ciReceiverTypeData");
   print_receiver_data_on(st);
 }
 
-void ciVirtualCallData::print_data_on(outputStream* st) {
+void ciVirtualCallData::print_data_on(outputStream* st) const {
   print_shared(st, "ciVirtualCallData");
   rtd_super()->print_receiver_data_on(st);
 }
+
+void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
+  print_shared(st, "ciVirtualCallTypeData");
+  rtd_super()->print_receiver_data_on(st);
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    args()->print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    ret()->print_data_on(st);
+  }
+}
 #endif
--- a/hotspot/src/share/vm/ci/ciMethodData.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -41,6 +41,8 @@
 class ciArrayData;
 class ciMultiBranchData;
 class ciArgInfoData;
+class ciCallTypeData;
+class ciVirtualCallTypeData;
 
 typedef ProfileData ciProfileData;
 
@@ -59,6 +61,103 @@
   ciJumpData(DataLayout* layout) : JumpData(layout) {};
 };
 
+class ciTypeEntries {
+protected:
+  static intptr_t translate_klass(intptr_t k) {
+    Klass* v = TypeEntries::valid_klass(k);
+    if (v != NULL) {
+      ciKlass* klass = CURRENT_ENV->get_klass(v);
+      return with_status(klass, k);
+    }
+    return with_status(NULL, k);
+  }
+
+public:
+  static ciKlass* valid_ciklass(intptr_t k) {
+    if (!TypeEntries::is_type_none(k) &&
+        !TypeEntries::is_type_unknown(k)) {
+      return (ciKlass*)TypeEntries::klass_part(k);
+    } else {
+      return NULL;
+    }
+  }
+
+  static intptr_t with_status(ciKlass* k, intptr_t in) {
+    return TypeEntries::with_status((intptr_t)k, in);
+  }
+
+#ifndef PRODUCT
+  static void print_ciklass(outputStream* st, intptr_t k);
+#endif
+};
+
+class ciTypeStackSlotEntries : public TypeStackSlotEntries, ciTypeEntries {
+public:
+  void translate_type_data_from(const TypeStackSlotEntries* args);
+
+  ciKlass* valid_type(int i) const {
+    return valid_ciklass(type(i));
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries {
+public:
+  void translate_type_data_from(const ReturnTypeEntry* ret);
+
+  ciKlass* valid_type() const {
+    return valid_ciklass(type());
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+class ciCallTypeData : public CallTypeData {
+public:
+  ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {}
+
+  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
+  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
+
+  void translate_type_data_from(const ProfileData* data) {
+    if (has_arguments()) {
+      args()->translate_type_data_from(data->as_CallTypeData()->args());
+    }
+    if (has_return()) {
+      ret()->translate_type_data_from(data->as_CallTypeData()->ret());
+    }
+  }
+
+  intptr_t argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->type(i);
+  }
+
+  ciKlass* valid_argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->valid_type(i);
+  }
+
+  intptr_t return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->type();
+  }
+
+  ciKlass* valid_return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->valid_type();
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
 class ciReceiverTypeData : public ReceiverTypeData {
 public:
   ciReceiverTypeData(DataLayout* layout) : ReceiverTypeData(layout) {};
@@ -69,7 +168,7 @@
                   (intptr_t) recv);
   }
 
-  ciKlass* receiver(uint row) {
+  ciKlass* receiver(uint row) const {
     assert((uint)row < row_limit(), "oob");
     ciKlass* recv = (ciKlass*)intptr_at(receiver0_offset + row * receiver_type_row_cell_count);
     assert(recv == NULL || recv->is_klass(), "wrong type");
@@ -77,19 +176,19 @@
   }
 
   // Copy & translate from oop based ReceiverTypeData
-  virtual void translate_from(ProfileData* data) {
+  virtual void translate_from(const ProfileData* data) {
     translate_receiver_data_from(data);
   }
-  void translate_receiver_data_from(ProfileData* data);
+  void translate_receiver_data_from(const ProfileData* data);
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
-  void print_receiver_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+  void print_receiver_data_on(outputStream* st) const;
 #endif
 };
 
 class ciVirtualCallData : public VirtualCallData {
   // Fake multiple inheritance...  It's a ciReceiverTypeData also.
-  ciReceiverTypeData* rtd_super() { return (ciReceiverTypeData*) this; }
+  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
 
 public:
   ciVirtualCallData(DataLayout* layout) : VirtualCallData(layout) {};
@@ -103,11 +202,65 @@
   }
 
   // Copy & translate from oop based VirtualCallData
-  virtual void translate_from(ProfileData* data) {
+  virtual void translate_from(const ProfileData* data) {
     rtd_super()->translate_receiver_data_from(data);
   }
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+class ciVirtualCallTypeData : public VirtualCallTypeData {
+private:
+  // Fake multiple inheritance...  It's a ciReceiverTypeData also.
+  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
+public:
+  ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {}
+
+  void set_receiver(uint row, ciKlass* recv) {
+    rtd_super()->set_receiver(row, recv);
+  }
+
+  ciKlass* receiver(uint row) const {
+    return rtd_super()->receiver(row);
+  }
+
+  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
+  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); }
+
+  // Copy & translate from oop based VirtualCallData
+  virtual void translate_from(const ProfileData* data) {
+    rtd_super()->translate_receiver_data_from(data);
+    if (has_arguments()) {
+      args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
+    }
+    if (has_return()) {
+      ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret());
+    }
+  }
+
+  intptr_t argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->type(i);
+  }
+
+  ciKlass* valid_argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->valid_type(i);
+  }
+
+  intptr_t return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->type();
+  }
+
+  ciKlass* valid_return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->valid_type();
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -232,8 +385,6 @@
 public:
   bool is_method_data() const { return true; }
 
-  void set_mature() { _state = mature_state; }
-
   bool is_empty()  { return _state == empty_state; }
   bool is_mature() { return _state == mature_state; }
 
@@ -249,6 +400,10 @@
   // Also set the numer of loops and blocks in the method.
   // Again, this is used to determine if a method is trivial.
   void set_compilation_stats(short loops, short blocks);
+  // If the compiler finds a profiled type that is known statically
+  // for sure, set it in the MethodData
+  void set_argument_type(int bci, int i, ciKlass* k);
+  void set_return_type(int bci, ciKlass* k);
 
   void load_data();
 
--- a/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -179,3 +179,16 @@
 ciObjArrayKlass* ciObjArrayKlass::make(ciKlass* element_klass) {
   GUARDED_VM_ENTRY(return make_impl(element_klass);)
 }
+
+ciKlass* ciObjArrayKlass::exact_klass() {
+  ciType* base = base_element_type();
+  if (base->is_instance_klass()) {
+    ciInstanceKlass* ik = base->as_instance_klass();
+    if (ik->exact_klass() != NULL) {
+      return this;
+    }
+  } else if (base->is_primitive_type()) {
+    return this;
+  }
+  return NULL;
+}
--- a/hotspot/src/share/vm/ci/ciObjArrayKlass.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciObjArrayKlass.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -73,6 +73,8 @@
   bool is_obj_array_klass() const { return true; }
 
   static ciObjArrayKlass* make(ciKlass* element_klass);
+
+  virtual ciKlass* exact_klass();
 };
 
 #endif // SHARE_VM_CI_CIOBJARRAYKLASS_HPP
--- a/hotspot/src/share/vm/ci/ciReplay.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciReplay.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -965,14 +965,12 @@
     tty->cr();
   } else {
     EXCEPTION_CONTEXT;
-    MethodCounters* mcs = method->method_counters();
     // m->_instructions_size = rec->instructions_size;
     m->_instructions_size = -1;
     m->_interpreter_invocation_count = rec->interpreter_invocation_count;
     m->_interpreter_throwout_count = rec->interpreter_throwout_count;
-    if (mcs == NULL) {
-      mcs = Method::build_method_counters(method, CHECK_AND_CLEAR);
-    }
+    MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
+    guarantee(mcs != NULL, "method counters allocation failed");
     mcs->invocation_counter()->_counter = rec->invocation_counter;
     mcs->backedge_counter()->_counter = rec->backedge_counter;
   }
--- a/hotspot/src/share/vm/ci/ciStreams.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -277,11 +277,14 @@
 class ciSignatureStream : public StackObj {
 private:
   ciSignature* _sig;
-  int    _pos;
+  int          _pos;
+  // holder is a method's holder
+  ciKlass*     _holder;
 public:
-  ciSignatureStream(ciSignature* signature) {
+  ciSignatureStream(ciSignature* signature, ciKlass* holder = NULL) {
     _sig = signature;
     _pos = 0;
+    _holder = holder;
   }
 
   bool at_return_type() { return _pos == _sig->count(); }
@@ -301,6 +304,23 @@
       return _sig->type_at(_pos);
     }
   }
+
+  // next klass in the signature
+  ciKlass* next_klass() {
+    ciKlass* sig_k;
+    if (_holder != NULL) {
+      sig_k = _holder;
+      _holder = NULL;
+    } else {
+      while (!type()->is_klass()) {
+        next();
+      }
+      assert(!at_return_type(), "passed end of signature");
+      sig_k = type()->as_klass();
+      next();
+    }
+    return sig_k;
+  }
 };
 
 
--- a/hotspot/src/share/vm/ci/ciTypeArrayKlass.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciTypeArrayKlass.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -57,6 +57,10 @@
 
   // Make an array klass corresponding to the specified primitive type.
   static ciTypeArrayKlass* make(BasicType type);
+
+  virtual ciKlass* exact_klass() {
+    return this;
+  }
 };
 
 #endif // SHARE_VM_CI_CITYPEARRAYKLASS_HPP
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -857,7 +857,6 @@
   m->set_max_locals(params);
   m->constMethod()->set_stackmap_data(NULL);
   m->set_code(code_start);
-  m->set_force_inline(true);
 
   return m;
 }
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -2439,19 +2439,19 @@
              && !ref_class_type.equals(current_type())
              && !ref_class_type.equals(VerificationType::reference_type(
                   current_class()->super()->name()))) {
-    bool subtype = ref_class_type.is_assignable_from(
-      current_type(), this, CHECK_VERIFY(this));
+    bool subtype = false;
+    if (!current_class()->is_anonymous()) {
+      subtype = ref_class_type.is_assignable_from(
+                 current_type(), this, CHECK_VERIFY(this));
+    } else {
+      subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
+                 current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
+    }
     if (!subtype) {
-      if (current_class()->is_anonymous()) {
-        subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
-                   current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
-      }
-      if (!subtype) {
-        verify_error(ErrorContext::bad_code(bci),
-            "Bad invokespecial instruction: "
-            "current class isn't assignable to reference class.");
-         return;
-      }
+      verify_error(ErrorContext::bad_code(bci),
+          "Bad invokespecial instruction: "
+          "current class isn't assignable to reference class.");
+       return;
     }
   }
   // Match method descriptor with operand stack
@@ -2470,17 +2470,13 @@
         if (!current_class()->is_anonymous()) {
           current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
         } else {
-          // anonymous class invokespecial calls: either the
-          // operand stack/objectref  is a subtype of the current class OR
-          // the objectref is a subtype of the host_klass of the current class
+          // anonymous class invokespecial calls: check if the
+          // objectref is a subtype of the host_klass of the current class
           // to allow an anonymous class to reference methods in the host_klass
           VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
-          bool subtype = current_type().is_assignable_from(top, this, CHECK_VERIFY(this));
-          if (!subtype) {
-            VerificationType hosttype =
-              VerificationType::reference_type(current_class()->host_klass()->name());
-            subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
-          }
+          VerificationType hosttype =
+            VerificationType::reference_type(current_class()->host_klass()->name());
+          bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
           if (!subtype) {
             verify_error( ErrorContext::bad_type(current_frame->offset(),
               current_frame->stack_top_ctx(),
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -245,8 +245,8 @@
 }
 
 
-void* BufferBlob::operator new(size_t s, unsigned size) throw() {
-  void* p = CodeCache::allocate(size);
+void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
+  void* p = CodeCache::allocate(size, is_critical);
   return p;
 }
 
@@ -277,7 +277,10 @@
   unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) AdapterBlob(size, cb);
+    // The parameter 'true' indicates a critical memory allocation.
+    // This means that CodeCacheMinimumFreeSpace is used, if necessary
+    const bool is_critical = true;
+    blob = new (size, is_critical) AdapterBlob(size, cb);
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
@@ -299,7 +302,10 @@
   size += round_to(buffer_size, oopSize);
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) MethodHandlesAdapterBlob(size);
+    // The parameter 'true' indicates a critical memory allocation.
+    // This means that CodeCacheMinimumFreeSpace is used, if necessary
+    const bool is_critical = true;
+    blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -209,7 +209,7 @@
   BufferBlob(const char* name, int size);
   BufferBlob(const char* name, int size, CodeBuffer* cb);
 
-  void* operator new(size_t s, unsigned size) throw();
+  void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
 
  public:
   // Creation
@@ -253,7 +253,6 @@
 class MethodHandlesAdapterBlob: public BufferBlob {
 private:
   MethodHandlesAdapterBlob(int size)                 : BufferBlob("MethodHandles adapters", size) {}
-  MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
 
 public:
   // Creation
--- a/hotspot/src/share/vm/compiler/abstractCompiler.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/compiler/abstractCompiler.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -24,41 +24,42 @@
 
 #include "precompiled.hpp"
 #include "compiler/abstractCompiler.hpp"
+#include "compiler/compileBroker.hpp"
 #include "runtime/mutexLocker.hpp"
-void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
-  if (*state != initialized) {
+
+bool AbstractCompiler::should_perform_init() {
+  if (_compiler_state != initialized) {
+    MutexLocker only_one(CompileThread_lock);
 
-    // We are thread in native here...
-    CompilerThread* thread = CompilerThread::current();
-    bool do_initialization = false;
-    {
-      ThreadInVMfromNative tv(thread);
-      ResetNoHandleMark rnhm;
-      MutexLocker only_one(CompileThread_lock, thread);
-      if ( *state == uninitialized) {
-        do_initialization = true;
-        *state = initializing;
-      } else {
-        while (*state == initializing ) {
-          CompileThread_lock->wait();
-        }
+    if (_compiler_state == uninitialized) {
+      _compiler_state = initializing;
+      return true;
+    } else {
+      while (_compiler_state == initializing) {
+        CompileThread_lock->wait();
       }
     }
-    if (do_initialization) {
-      // We can not hold any locks here since JVMTI events may call agents
+  }
+  return false;
+}
 
-      // Compiler(s) run as native
-
-      (*f)();
-
-      // To in_vm so we can use the lock
+bool AbstractCompiler::should_perform_shutdown() {
+  // Since this method can be called by multiple threads, the lock ensures atomicity of
+  // decrementing '_num_compiler_threads' and the following operations.
+  MutexLocker only_one(CompileThread_lock);
+  _num_compiler_threads--;
+  assert (CompileBroker::is_compilation_disabled_forever(), "Must be set, otherwise thread waits forever");
 
-      ThreadInVMfromNative tv(thread);
-      ResetNoHandleMark rnhm;
-      MutexLocker only_one(CompileThread_lock, thread);
-      assert(*state == initializing, "wrong state");
-      *state = initialized;
-      CompileThread_lock->notify_all();
-    }
+  // Only the last thread will perform shutdown operations
+  if (_num_compiler_threads == 0) {
+    return true;
   }
+  return false;
 }
+
+void AbstractCompiler::set_state(int state) {
+  // Ensure that ste is only set by one thread at a time
+  MutexLocker only_one(CompileThread_lock);
+  _compiler_state =  state;
+  CompileThread_lock->notify_all();
+}
--- a/hotspot/src/share/vm/compiler/abstractCompiler.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/compiler/abstractCompiler.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -27,22 +27,25 @@
 
 #include "ci/compilerInterface.hpp"
 
-typedef void (*initializer)(void);
-
 class AbstractCompiler : public CHeapObj<mtCompiler> {
  private:
-  bool _is_initialized; // Mark whether compiler object is initialized
+  volatile int _num_compiler_threads;
 
  protected:
+  volatile int _compiler_state;
   // Used for tracking global state of compiler runtime initialization
-  enum { uninitialized, initializing, initialized };
+  enum { uninitialized, initializing, initialized, failed, shut_down };
 
-  // This method will call the initialization method "f" once (per compiler class/subclass)
-  // and do so without holding any locks
-  void initialize_runtimes(initializer f, volatile int* state);
+  // This method returns true for the first compiler thread that reaches that methods.
+  // This thread will initialize the compiler runtime.
+  bool should_perform_init();
 
  public:
-  AbstractCompiler() : _is_initialized(false)    {}
+  AbstractCompiler() : _compiler_state(uninitialized), _num_compiler_threads(0) {}
+
+  // This function determines the compiler thread that will perform the
+  // shutdown of the corresponding compiler runtime.
+  bool should_perform_shutdown();
 
   // Name of this compiler
   virtual const char* name() = 0;
@@ -74,17 +77,18 @@
 #endif // TIERED
 
   // Customization
-  virtual bool needs_stubs            ()         = 0;
+  virtual void initialize () = 0;
 
-  void mark_initialized()                        { _is_initialized = true; }
-  bool is_initialized()                          { return _is_initialized; }
+  void set_num_compiler_threads(int num) { _num_compiler_threads = num;  }
+  int num_compiler_threads()             { return _num_compiler_threads; }
 
-  virtual void initialize()                      = 0;
-
+  // Get/set state of compiler objects
+  bool is_initialized()           { return _compiler_state == initialized; }
+  bool is_failed     ()           { return _compiler_state == failed;}
+  void set_state     (int state);
+  void set_shut_down ()           { set_state(shut_down); }
   // Compilation entry point for methods
-  virtual void compile_method(ciEnv* env,
-                              ciMethod* target,
-                              int entry_bci) {
+  virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
     ShouldNotReachHere();
   }
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -186,7 +186,7 @@
 CompileQueue* CompileBroker::_c1_method_queue    = NULL;
 CompileTask*  CompileBroker::_task_free_list     = NULL;
 
-GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
+GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
 
 
 class CompilationLog : public StringEventLog {
@@ -587,9 +587,6 @@
 
 
 
-// ------------------------------------------------------------------
-// CompileQueue::add
-//
 // Add a CompileTask to a CompileQueue
 void CompileQueue::add(CompileTask* task) {
   assert(lock()->owned_by_self(), "must own lock");
@@ -626,6 +623,16 @@
   lock()->notify_all();
 }
 
+void CompileQueue::delete_all() {
+  assert(lock()->owned_by_self(), "must own lock");
+  if (_first != NULL) {
+    for (CompileTask* task = _first; task != NULL; task = task->next()) {
+      delete task;
+    }
+    _first = NULL;
+  }
+}
+
 // ------------------------------------------------------------------
 // CompileQueue::get
 //
@@ -640,6 +647,11 @@
   // case we perform code cache sweeps to free memory such that we can re-enable
   // compilation.
   while (_first == NULL) {
+    // Exit loop if compilation is disabled forever
+    if (CompileBroker::is_compilation_disabled_forever()) {
+      return NULL;
+    }
+
     if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
       // Wait a certain amount of time to possibly do another sweep.
       // We must wait until stack scanning has happened so that we can
@@ -664,9 +676,17 @@
       // remains unchanged. This behavior is desired, since we want to keep
       // the stable state, i.e., we do not want to evict methods from the
       // code cache if it is unnecessary.
-      lock()->wait();
+      // We need a timed wait here, since compiler threads can exit if compilation
+      // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
+      // is not critical and we do not want idle compiler threads to wake up too often.
+      lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
     }
   }
+
+  if (CompileBroker::is_compilation_disabled_forever()) {
+    return NULL;
+  }
+
   CompileTask* task = CompilationPolicy::policy()->select_task(this);
   remove(task);
   return task;
@@ -891,10 +911,8 @@
 }
 
 
-
-// ------------------------------------------------------------------
-// CompileBroker::make_compiler_thread
-CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS) {
+CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
+                                                    AbstractCompiler* comp, TRAPS) {
   CompilerThread* compiler_thread = NULL;
 
   Klass* k =
@@ -961,6 +979,7 @@
     java_lang_Thread::set_daemon(thread_oop());
 
     compiler_thread->set_threadObj(thread_oop());
+    compiler_thread->set_compiler(comp);
     Threads::add(compiler_thread);
     Thread::start(compiler_thread);
   }
@@ -972,25 +991,24 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::init_compiler_threads
-//
-// Initialize the compilation queue
 void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
   EXCEPTION_MARK;
 #if !defined(ZERO) && !defined(SHARK)
   assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
 #endif // !ZERO && !SHARK
+  // Initialize the compilation queue
   if (c2_compiler_count > 0) {
     _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
+    _compilers[1]->set_num_compiler_threads(c2_compiler_count);
   }
   if (c1_compiler_count > 0) {
     _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
+    _compilers[0]->set_num_compiler_threads(c1_compiler_count);
   }
 
   int compiler_count = c1_compiler_count + c2_compiler_count;
 
-  _method_threads =
+  _compiler_threads =
     new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
 
   char name_buffer[256];
@@ -998,21 +1016,22 @@
     // Create a name for our thread.
     sprintf(name_buffer, "C2 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
-    _method_threads->append(new_thread);
+    // Shark and C2
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
+    _compiler_threads->append(new_thread);
   }
 
   for (int i = c2_compiler_count; i < compiler_count; i++) {
     // Create a name for our thread.
     sprintf(name_buffer, "C1 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
-    _method_threads->append(new_thread);
+    // C1
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
+    _compiler_threads->append(new_thread);
   }
 
   if (UsePerfData) {
-    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
-                                     compiler_count, CHECK);
+    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
   }
 }
 
@@ -1029,27 +1048,6 @@
 }
 
 // ------------------------------------------------------------------
-// CompileBroker::is_idle
-bool CompileBroker::is_idle() {
-  if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
-    return false;
-  } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
-    return false;
-  } else {
-    int num_threads = _method_threads->length();
-    for (int i=0; i<num_threads; i++) {
-      if (_method_threads->at(i)->task() != NULL) {
-        return false;
-      }
-    }
-
-    // No pending or active compilations.
-    return true;
-  }
-}
-
-
-// ------------------------------------------------------------------
 // CompileBroker::compile_method
 //
 // Request compilation of a method.
@@ -1551,6 +1549,101 @@
   free_task(task);
 }
 
+// Initialize compiler thread(s) + compiler object(s). The postcondition
+// of this function is that the compiler runtimes are initialized and that
+//compiler threads can start compiling.
+bool CompileBroker::init_compiler_runtime() {
+  CompilerThread* thread = CompilerThread::current();
+  AbstractCompiler* comp = thread->compiler();
+  // Final sanity check - the compiler object must exist
+  guarantee(comp != NULL, "Compiler object must exist");
+
+  int system_dictionary_modification_counter;
+  {
+    MutexLocker locker(Compile_lock, thread);
+    system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
+  }
+
+  {
+    // Must switch to native to allocate ci_env
+    ThreadToNativeFromVM ttn(thread);
+    ciEnv ci_env(NULL, system_dictionary_modification_counter);
+    // Cache Jvmti state
+    ci_env.cache_jvmti_state();
+    // Cache DTrace flags
+    ci_env.cache_dtrace_flags();
+
+    // Switch back to VM state to do compiler initialization
+    ThreadInVMfromNative tv(thread);
+    ResetNoHandleMark rnhm;
+
+
+    if (!comp->is_shark()) {
+      // Perform per-thread and global initializations
+      comp->initialize();
+    }
+  }
+
+  if (comp->is_failed()) {
+    disable_compilation_forever();
+    // If compiler initialization failed, no compiler thread that is specific to a
+    // particular compiler runtime will ever start to compile methods.
+
+    shutdown_compiler_runtime(comp, thread);
+    return false;
+  }
+
+  // C1 specific check
+  if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) {
+    warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
+    return false;
+  }
+
+  return true;
+}
+
+// If C1 and/or C2 initialization failed, we shut down all compilation.
+// We do this to keep things simple. This can be changed if it ever turns out to be
+// a problem.
+void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
+  // Free buffer blob, if allocated
+  if (thread->get_buffer_blob() != NULL) {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    CodeCache::free(thread->get_buffer_blob());
+  }
+
+  if (comp->should_perform_shutdown()) {
+    // There are two reasons for shutting down the compiler
+    // 1) compiler runtime initialization failed
+    // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
+    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
+
+    // Only one thread per compiler runtime object enters here
+    // Set state to shut down
+    comp->set_shut_down();
+
+    MutexLocker mu(MethodCompileQueue_lock, thread);
+    CompileQueue* queue;
+    if (_c1_method_queue != NULL) {
+      _c1_method_queue->delete_all();
+      queue = _c1_method_queue;
+      _c1_method_queue = NULL;
+      delete _c1_method_queue;
+    }
+
+    if (_c2_method_queue != NULL) {
+      _c2_method_queue->delete_all();
+      queue = _c2_method_queue;
+      _c2_method_queue = NULL;
+      delete _c2_method_queue;
+    }
+
+    // We could delete compiler runtimes also. However, there are references to
+    // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
+    // fail. This can be done later if necessary.
+  }
+}
+
 // ------------------------------------------------------------------
 // CompileBroker::compiler_thread_loop
 //
@@ -1558,7 +1651,6 @@
 void CompileBroker::compiler_thread_loop() {
   CompilerThread* thread = CompilerThread::current();
   CompileQueue* queue = thread->queue();
-
   // For the thread that initializes the ciObjectFactory
   // this resource mark holds all the shared objects
   ResourceMark rm;
@@ -1587,65 +1679,78 @@
     log->end_elem();
   }
 
-  while (true) {
-    {
-      // We need this HandleMark to avoid leaking VM handles.
-      HandleMark hm(thread);
+  // If compiler thread/runtime initialization fails, exit the compiler thread
+  if (!init_compiler_runtime()) {
+    return;
+  }
 
-      if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
-        // the code cache is really full
-        handle_full_code_cache();
-      }
+  // Poll for new compilation tasks as long as the JVM runs. Compilation
+  // should only be disabled if something went wrong while initializing the
+  // compiler runtimes. This, in turn, should not happen. The only known case
+  // when compiler runtime initialization fails is if there is not enough free
+  // space in the code cache to generate the necessary stubs, etc.
+  while (!is_compilation_disabled_forever()) {
+    // We need this HandleMark to avoid leaking VM handles.
+    HandleMark hm(thread);
 
-      CompileTask* task = queue->get();
+    if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
+      // the code cache is really full
+      handle_full_code_cache();
+    }
 
-      // Give compiler threads an extra quanta.  They tend to be bursty and
-      // this helps the compiler to finish up the job.
-      if( CompilerThreadHintNoPreempt )
-        os::hint_no_preempt();
+    CompileTask* task = queue->get();
+    if (task == NULL) {
+      continue;
+    }
 
-      // trace per thread time and compile statistics
-      CompilerCounters* counters = ((CompilerThread*)thread)->counters();
-      PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
+    // Give compiler threads an extra quanta.  They tend to be bursty and
+    // this helps the compiler to finish up the job.
+    if( CompilerThreadHintNoPreempt )
+      os::hint_no_preempt();
 
-      // Assign the task to the current thread.  Mark this compilation
-      // thread as active for the profiler.
-      CompileTaskWrapper ctw(task);
-      nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
-      task->set_code_handle(&result_handle);
-      methodHandle method(thread, task->method());
+    // trace per thread time and compile statistics
+    CompilerCounters* counters = ((CompilerThread*)thread)->counters();
+    PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
 
-      // Never compile a method if breakpoints are present in it
-      if (method()->number_of_breakpoints() == 0) {
-        // Compile the method.
-        if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
+    // Assign the task to the current thread.  Mark this compilation
+    // thread as active for the profiler.
+    CompileTaskWrapper ctw(task);
+    nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
+    task->set_code_handle(&result_handle);
+    methodHandle method(thread, task->method());
+
+    // Never compile a method if breakpoints are present in it
+    if (method()->number_of_breakpoints() == 0) {
+      // Compile the method.
+      if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
 #ifdef COMPILER1
-          // Allow repeating compilations for the purpose of benchmarking
-          // compile speed. This is not useful for customers.
-          if (CompilationRepeat != 0) {
-            int compile_count = CompilationRepeat;
-            while (compile_count > 0) {
-              invoke_compiler_on_method(task);
-              nmethod* nm = method->code();
-              if (nm != NULL) {
-                nm->make_zombie();
-                method->clear_code();
-              }
-              compile_count--;
+        // Allow repeating compilations for the purpose of benchmarking
+        // compile speed. This is not useful for customers.
+        if (CompilationRepeat != 0) {
+          int compile_count = CompilationRepeat;
+          while (compile_count > 0) {
+            invoke_compiler_on_method(task);
+            nmethod* nm = method->code();
+            if (nm != NULL) {
+              nm->make_zombie();
+              method->clear_code();
             }
+            compile_count--;
           }
+        }
 #endif /* COMPILER1 */
-          invoke_compiler_on_method(task);
-        } else {
-          // After compilation is disabled, remove remaining methods from queue
-          method->clear_queued_for_compilation();
-        }
+        invoke_compiler_on_method(task);
+      } else {
+        // After compilation is disabled, remove remaining methods from queue
+        method->clear_queued_for_compilation();
       }
     }
   }
+
+  // Shut down compiler runtime
+  shutdown_compiler_runtime(thread->compiler(), thread);
 }
 
-
 // ------------------------------------------------------------------
 // CompileBroker::init_compiler_thread_log
 //
@@ -1953,11 +2058,14 @@
       // Since code cache is full, immediately stop new compiles
       if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
         NMethodSweeper::log_sweep("disable_compiler");
+
+        // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
+        // without having to consider the state in which the current thread is.
+        ThreadInVMfromUnknown in_vm;
         NMethodSweeper::possibly_sweep();
       }
     } else {
-      UseCompiler               = false;
-      AlwaysCompileLoopMethods  = false;
+      disable_compilation_forever();
     }
   }
   codecache_print(/* detailed= */ true);
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -213,8 +213,12 @@
 
   // Redefine Classes support
   void mark_on_stack();
+  void delete_all();
+  void         print();
 
-  void         print();
+  ~CompileQueue() {
+    assert (is_empty(), " Compile Queue must be empty");
+  }
 };
 
 // CompileTaskWrapper
@@ -266,7 +270,7 @@
   static CompileQueue* _c1_method_queue;
   static CompileTask* _task_free_list;
 
-  static GrowableArray<CompilerThread*>* _method_threads;
+  static GrowableArray<CompilerThread*>* _compiler_threads;
 
   // performance counters
   static PerfCounter* _perf_total_compilation;
@@ -311,7 +315,7 @@
   static int _sum_nmethod_code_size;
   static long _peak_compilation_time;
 
-  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
+  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
   static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
@@ -351,6 +355,9 @@
     if (is_c1_compile(comp_level)) return _c1_method_queue;
     return NULL;
   }
+  static bool init_compiler_runtime();
+  static void shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread);
+
  public:
   enum {
     // The entry bci used for non-OSR compilations.
@@ -378,9 +385,7 @@
                                  const char* comment, Thread* thread);
 
   static void compiler_thread_loop();
-
   static uint get_compilation_id() { return _compilation_id; }
-  static bool is_idle();
 
   // Set _should_block.
   // Call this from the VM, with Threads_lock held and a safepoint requested.
@@ -391,8 +396,9 @@
 
   enum {
     // Flags for toggling compiler activity
-    stop_compilation = 0,
-    run_compilation  = 1
+    stop_compilation    = 0,
+    run_compilation     = 1,
+    shutdown_compilaton = 2
   };
 
   static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
@@ -401,6 +407,16 @@
     jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
     return (old == (1-new_state));
   }
+
+  static void disable_compilation_forever() {
+    UseCompiler               = false;
+    AlwaysCompileLoopMethods  = false;
+    Atomic::xchg(shutdown_compilaton, &_should_compile_new_jobs);
+  }
+
+  static bool is_compilation_disabled_forever() {
+    return _should_compile_new_jobs == shutdown_compilaton;
+  }
   static void handle_full_code_cache();
 
   // Return total compilation ticks
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -344,6 +344,10 @@
     }
   }
 
+  if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) {
+    vm_exit_during_initialization("Initial young gen size set larger than the maximum young gen size");
+  }
+
   if (FLAG_IS_CMDLINE(NewSize)) {
     _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
                                      1U);
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -211,7 +211,7 @@
   // a GC that freed space for the allocation.
   if (!MetadataAllocationFailALot) {
     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-    }
+  }
 
   if (_result == NULL) {
     if (UseConcMarkSweepGC) {
@@ -223,9 +223,7 @@
         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
     }
     if (_result == NULL) {
-      // Don't clear the soft refs.  This GC is for reclaiming metadata
-      // and is unrelated to the fullness of the Java heap which should
-      // be the criteria for clearing SoftReferences.
+      // Don't clear the soft refs yet.
       if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
         gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
       }
@@ -235,7 +233,7 @@
       _result =
         _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
     }
-    if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
+    if (_result == NULL) {
       // If still failing, allow the Metaspace to expand.
       // See delta_capacity_until_GC() for explanation of the
       // amount of the expansion.
@@ -243,7 +241,16 @@
       // or a MaxMetaspaceSize has been specified on the command line.
       _result =
         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
-
+      if (_result == NULL) {
+        // If expansion failed, do a last-ditch collection and try allocating
+        // again.  A last-ditch collection will clear softrefs.  This
+        // behavior is similar to the last-ditch collection done for perm
+        // gen when it was full and a collection for failed allocation
+        // did not free perm gen space.
+        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
+        _result =
+          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+      }
     }
     if (Verbose && PrintGCDetails && _result == NULL) {
       gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -158,6 +159,22 @@
     index = vt->index_of_miranda(resolved_method->name(),
                                  resolved_method->signature());
     kind = CallInfo::vtable_call;
+  } else if (resolved_method->has_vtable_index()) {
+    // Can occur if an interface redeclares a method of Object.
+
+#ifdef ASSERT
+    // Ensure that this is really the case.
+    KlassHandle object_klass = SystemDictionary::Object_klass();
+    Method * object_resolved_method = object_klass()->vtable()->method_at(index);
+    assert(object_resolved_method->name() == resolved_method->name(),
+      err_msg("Object and interface method names should match at vtable index %d, %s != %s",
+      index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string()));
+    assert(object_resolved_method->signature() == resolved_method->signature(),
+      err_msg("Object and interface method signatures should match at vtable index %d, %s != %s",
+      index, object_resolved_method->signature()->as_C_string(), resolved_method->signature()->as_C_string()));
+#endif // ASSERT
+
+    kind = CallInfo::vtable_call;
   } else {
     // A regular interface call.
     kind = CallInfo::itable_call;
@@ -454,7 +471,7 @@
     Symbol* method_name = vmSymbols::invoke_name();
     Symbol* method_signature = pool->signature_ref_at(index);
     KlassHandle  current_klass(THREAD, pool->pool_holder());
-    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
     return;
   }
 
@@ -476,22 +493,34 @@
 
   if (code == Bytecodes::_invokeinterface) {
     resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+  } else if (code == Bytecodes::_invokevirtual) {
+    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
   } else {
-    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
   }
 }
 
 void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass,
                                   Symbol* method_name, Symbol* method_signature,
-                                  KlassHandle current_klass, bool check_access, TRAPS) {
+                                  KlassHandle current_klass, bool check_access,
+                                  bool require_methodref, TRAPS) {
 
   Handle nested_exception;
 
-  // 1. lookup method in resolved klass and its super klasses
+  // 1. check if methodref required, that resolved_klass is not interfacemethodref
+  if (require_methodref && resolved_klass->is_interface()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
+        resolved_klass()->external_name());
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
+  // 2. lookup method in resolved klass and its super klasses
   lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
   if (resolved_method.is_null()) { // not found in the class hierarchy
-    // 2. lookup method in all the interfaces implemented by the resolved klass
+    // 3. lookup method in all the interfaces implemented by the resolved klass
     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
     if (resolved_method.is_null()) {
@@ -505,7 +534,7 @@
     }
 
     if (resolved_method.is_null()) {
-      // 3. method lookup failed
+      // 4. method lookup failed
       ResourceMark rm(THREAD);
       THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
                       Method::name_and_sig_as_C_string(resolved_klass(),
@@ -515,15 +544,6 @@
     }
   }
 
-  // 4. check if klass is not interface
-  if (resolved_klass->is_interface() && resolved_method->is_abstract()) {
-    ResourceMark rm(THREAD);
-    char buf[200];
-    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
-        resolved_klass()->external_name());
-    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
-  }
-
   // 5. check if method is concrete
   if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
     ResourceMark rm(THREAD);
@@ -833,7 +853,7 @@
                                                   Symbol* method_name, Symbol* method_signature,
                                                   KlassHandle current_klass, bool check_access, TRAPS) {
 
-  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
   assert(resolved_method->name() != vmSymbols::class_initializer_name(), "should have been checked in verifier");
 
   // check if static
@@ -867,7 +887,7 @@
   // and the selected method is recalculated relative to the direct superclass
   // superinterface.method, which explicitly does not check shadowing
 
-  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
 
   // check if method name is <init>, that it is found in same klass as static type
   if (resolved_method->name() == vmSymbols::object_initializer_name() &&
@@ -1013,7 +1033,7 @@
                                                    Symbol* method_name, Symbol* method_signature,
                                                    KlassHandle current_klass, bool check_access, TRAPS) {
   // normal method resolution
-  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, true, CHECK);
 
   assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
   assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -136,7 +136,7 @@
   static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
 
   static void resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
-  static void resolve_method          (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
+  static void resolve_method          (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool require_methodref, TRAPS);
 
   static void linktime_resolve_static_method    (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
   static void linktime_resolve_special_method   (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -28,7 +28,6 @@
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
 #include "memory/freeBlockDictionary.hpp"
-#include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
--- a/hotspot/src/share/vm/memory/freeBlockDictionary.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/freeBlockDictionary.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -28,7 +28,6 @@
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
 #include "memory/freeBlockDictionary.hpp"
-#include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/macros.hpp"
--- a/hotspot/src/share/vm/memory/freeList.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/freeList.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "memory/freeBlockDictionary.hpp"
 #include "memory/freeList.hpp"
-#include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "memory/sharedHeap.hpp"
 #include "runtime/globals.hpp"
--- a/hotspot/src/share/vm/memory/metablock.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "memory/allocation.hpp"
-#include "memory/metablock.hpp"
-#include "utilities/copy.hpp"
-#include "utilities/debug.hpp"
-
-// Blocks of space for metadata are allocated out of Metachunks.
-//
-// Metachunk are allocated out of MetadataVirtualspaces and once
-// allocated there is no explicit link between a Metachunk and
-// the MetadataVirtualspaces from which it was allocated.
-//
-// Each SpaceManager maintains a
-// list of the chunks it is using and the current chunk.  The current
-// chunk is the chunk from which allocations are done.  Space freed in
-// a chunk is placed on the free list of blocks (BlockFreelist) and
-// reused from there.
-//
-// Future modification
-//
-// The Metachunk can conceivable be replaced by the Chunk in
-// allocation.hpp.  Note that the latter Chunk is the space for
-// allocation (allocations from the chunk are out of the space in
-// the Chunk after the header for the Chunk) where as Metachunks
-// point to space in a VirtualSpace.  To replace Metachunks with
-// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
-size_t Metablock::_min_block_byte_size = sizeof(Metablock);
-
-// New blocks returned by the Metaspace are zero initialized.
-// We should fix the constructors to not assume this instead.
-Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
-  if (p == NULL) {
-    return NULL;
-  }
-
-  Metablock* result = (Metablock*) p;
-
-  // Clear the memory
-  Copy::fill_to_aligned_words((HeapWord*)result, word_size);
-#ifdef ASSERT
-  result->set_word_size(word_size);
-#endif
-  return result;
-}
--- a/hotspot/src/share/vm/memory/metablock.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
-#define SHARE_VM_MEMORY_METABLOCK_HPP
-
-// Metablock are the unit of allocation from a Chunk.  It is initialized
-// with the size of the requested allocation.  That size is overwritten
-// once the allocation returns.
-//
-// A Metablock may be reused by its SpaceManager but are never moved between
-// SpaceManagers.  There is no explicit link to the Metachunk
-// from which it was allocated.  Metablock may be deallocated and
-// put on a freelist but the space is never freed, rather
-// the Metachunk it is a part of will be deallocated when it's
-// associated class loader is collected.
-
-class Metablock VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
- private:
-  // Used to align the allocation (see below).
-  union block_t {
-    void* _data[3];
-    struct header_t {
-      size_t _word_size;
-      Metablock* _next;
-      Metablock* _prev;
-    } _header;
-  } _block;
-  static size_t _min_block_byte_size;
-
-  typedef union block_t Block;
-  typedef struct header_t Header;
-  const Block* block() const { return &_block; }
-  const Block::header_t* header() const { return &(block()->_header); }
- public:
-
-  static Metablock* initialize(MetaWord* p, size_t word_size);
-
-  // This places the body of the block at a 2 word boundary
-  // because every block starts on a 2 word boundary.  Work out
-  // how to make the body on a 2 word boundary if the block
-  // starts on a arbitrary boundary.  JJJ
-
-  size_t word_size() const  { return header()->_word_size; }
-  void set_word_size(size_t v) { _block._header._word_size = v; }
-  size_t size() const volatile { return _block._header._word_size; }
-  void set_size(size_t v) { _block._header._word_size = v; }
-  Metablock* next() const { return header()->_next; }
-  void set_next(Metablock* v) { _block._header._next = v; }
-  Metablock* prev() const { return header()->_prev; }
-  void set_prev(Metablock* v) { _block._header._prev = v; }
-
-  static size_t min_block_byte_size() { return _min_block_byte_size; }
-
-  bool is_free()                 { return header()->_word_size != 0; }
-  void clear_next()              { set_next(NULL); }
-  void link_prev(Metablock* ptr) { set_prev(ptr); }
-  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
-  bool cantCoalesce() const     { return false; }
-  void link_next(Metablock* ptr) { set_next(ptr); }
-  void link_after(Metablock* ptr){
-    link_next(ptr);
-    if (ptr != NULL) ptr->link_prev(this);
-  }
-
-  // Should not be needed in a free list of Metablocks
-  void markNotFree()            { ShouldNotReachHere(); }
-
-  // Debug support
-#ifdef ASSERT
-  void* prev_addr() const { return (void*)&_block._header._prev; }
-  void* next_addr() const { return (void*)&_block._header._next; }
-  void* size_addr() const { return (void*)&_block._header._word_size; }
-#endif
-  bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
-  bool verify_par_locked() { return true; }
-
-  void assert_is_mangled() const {/* Don't check "\*/}
-};
-#endif // SHARE_VM_MEMORY_METABLOCK_HPP
--- a/hotspot/src/share/vm/memory/metachunk.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metachunk.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,42 +29,39 @@
 #include "utilities/debug.hpp"
 
 class VirtualSpaceNode;
-//
-// Future modification
-//
-// The Metachunk can conceivable be replaced by the Chunk in
-// allocation.hpp.  Note that the latter Chunk is the space for
-// allocation (allocations from the chunk are out of the space in
-// the Chunk after the header for the Chunk) where as Metachunks
-// point to space in a VirtualSpace.  To replace Metachunks with
-// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
 
 const size_t metadata_chunk_initialize = 0xf7f7f7f7;
 
-size_t Metachunk::_overhead =
-  Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
+size_t Metachunk::object_alignment() {
+  // Must align pointers and sizes to 8,
+  // so that 64 bit types get correctly aligned.
+  const size_t alignment = 8;
+
+  // Make sure that the Klass alignment also agree.
+  STATIC_ASSERT(alignment == (size_t)KlassAlignmentInBytes);
+
+  return alignment;
+}
+
+size_t Metachunk::overhead() {
+  return align_size_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
+}
 
 // Metachunk methods
 
 Metachunk::Metachunk(size_t word_size,
-                     VirtualSpaceNode* container) :
-    _word_size(word_size),
-    _bottom(NULL),
-    _end(NULL),
+                     VirtualSpaceNode* container)
+    : Metabase<Metachunk>(word_size),
     _top(NULL),
-    _next(NULL),
-    _prev(NULL),
     _container(container)
 {
-  _bottom = (MetaWord*)this;
-  _top = (MetaWord*)this + _overhead;
-  _end = (MetaWord*)this + word_size;
+  _top = initial_top();
 #ifdef ASSERT
-  set_is_free(false);
+  set_is_tagged_free(false);
   size_t data_word_size = pointer_delta(end(),
-                                        top(),
+                                        _top,
                                         sizeof(MetaWord));
-  Copy::fill_to_words((HeapWord*) top(),
+  Copy::fill_to_words((HeapWord*)_top,
                       data_word_size,
                       metadata_chunk_initialize);
 #endif
@@ -82,22 +79,18 @@
 
 // _bottom points to the start of the chunk including the overhead.
 size_t Metachunk::used_word_size() const {
-  return pointer_delta(_top, _bottom, sizeof(MetaWord));
+  return pointer_delta(_top, bottom(), sizeof(MetaWord));
 }
 
 size_t Metachunk::free_word_size() const {
-  return pointer_delta(_end, _top, sizeof(MetaWord));
-}
-
-size_t Metachunk::capacity_word_size() const {
-  return pointer_delta(_end, _bottom, sizeof(MetaWord));
+  return pointer_delta(end(), _top, sizeof(MetaWord));
 }
 
 void Metachunk::print_on(outputStream* st) const {
   st->print_cr("Metachunk:"
                " bottom " PTR_FORMAT " top " PTR_FORMAT
                " end " PTR_FORMAT " size " SIZE_FORMAT,
-               bottom(), top(), end(), word_size());
+               bottom(), _top, end(), word_size());
   if (Verbose) {
     st->print_cr("    used " SIZE_FORMAT " free " SIZE_FORMAT,
                  used_word_size(), free_word_size());
@@ -109,8 +102,8 @@
   // Mangle the payload of the chunk and not the links that
   // maintain list of chunks.
   HeapWord* start = (HeapWord*)(bottom() + overhead());
-  size_t word_size = capacity_word_size() - overhead();
-  Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
+  size_t size = word_size() - overhead();
+  Copy::fill_to_words(start, size, metadata_chunk_initialize);
 }
 #endif // PRODUCT
 
@@ -118,9 +111,68 @@
 #ifdef ASSERT
   // Cannot walk through the blocks unless the blocks have
   // headers with sizes.
-  assert(_bottom <= _top &&
-         _top <= _end,
+  assert(bottom() <= _top &&
+         _top <= (MetaWord*)end(),
          "Chunk has been smashed");
 #endif
   return;
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestMetachunk {
+ public:
+  static void test() {
+    size_t size = 2 * 1024 * 1024;
+    void* memory = malloc(size);
+    assert(memory != NULL, "Failed to malloc 2MB");
+
+    Metachunk* metachunk = ::new (memory) Metachunk(size / BytesPerWord, NULL);
+
+    assert(metachunk->bottom() == (MetaWord*)metachunk, "assert");
+    assert(metachunk->end() == (uintptr_t*)metachunk + metachunk->size(), "assert");
+
+    // Check sizes
+    assert(metachunk->size() == metachunk->word_size(), "assert");
+    assert(metachunk->word_size() == pointer_delta(metachunk->end(), metachunk->bottom(),
+        sizeof(MetaWord*)), "assert");
+
+    // Check usage
+    assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
+    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
+    assert(metachunk->top() == metachunk->initial_top(), "assert");
+    assert(metachunk->is_empty(), "assert");
+
+    // Allocate
+    size_t alloc_size = 64; // Words
+    assert(is_size_aligned(alloc_size, Metachunk::object_alignment()), "assert");
+
+    MetaWord* mem = metachunk->allocate(alloc_size);
+
+    // Check post alloc
+    assert(mem == metachunk->initial_top(), "assert");
+    assert(mem + alloc_size == metachunk->top(), "assert");
+    assert(metachunk->used_word_size() == metachunk->overhead() + alloc_size, "assert");
+    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
+    assert(!metachunk->is_empty(), "assert");
+
+    // Clear chunk
+    metachunk->reset_empty();
+
+    // Check post clear
+    assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
+    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
+    assert(metachunk->top() == metachunk->initial_top(), "assert");
+    assert(metachunk->is_empty(), "assert");
+
+    free(memory);
+  }
+};
+
+void TestMetachunk_test() {
+  TestMetachunk::test();
+}
+
+#endif
--- a/hotspot/src/share/vm/memory/metachunk.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metachunk.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,89 +24,44 @@
 #ifndef SHARE_VM_MEMORY_METACHUNK_HPP
 #define SHARE_VM_MEMORY_METACHUNK_HPP
 
-//  Metachunk - Quantum of allocation from a Virtualspace
-//    Metachunks are reused (when freed are put on a global freelist) and
-//    have no permanent association to a SpaceManager.
-
-//            +--------------+ <- end
-//            |              |          --+       ---+
-//            |              |            | free     |
-//            |              |            |          |
-//            |              |            |          | capacity
-//            |              |            |          |
-//            |              | <- top   --+          |
-//            |              |           ---+        |
-//            |              |              | used   |
-//            |              |              |        |
-//            |              |              |        |
-//            +--------------+ <- bottom ---+     ---+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 class VirtualSpaceNode;
 
-class Metachunk VALUE_OBJ_CLASS_SPEC {
-  // link to support lists of chunks
-  Metachunk* _next;
-  Metachunk* _prev;
-  VirtualSpaceNode* _container;
-
-  MetaWord* _bottom;
-  MetaWord* _end;
-  MetaWord* _top;
+// Super class of Metablock and Metachunk to allow them to
+// be put on the FreeList and in the BinaryTreeDictionary.
+template <class T>
+class Metabase VALUE_OBJ_CLASS_SPEC {
   size_t _word_size;
-  // Used in a guarantee() so included in the Product builds
-  // even through it is only for debugging.
-  bool _is_free;
+  T*     _next;
+  T*     _prev;
 
-  // Metachunks are allocated out of a MetadataVirtualSpace and
-  // and use some of its space to describe itself (plus alignment
-  // considerations).  Metadata is allocated in the rest of the chunk.
-  // This size is the overhead of maintaining the Metachunk within
-  // the space.
-  static size_t _overhead;
+ protected:
+  Metabase(size_t word_size) : _word_size(word_size), _next(NULL), _prev(NULL) {}
 
  public:
-  Metachunk(size_t word_size , VirtualSpaceNode* container);
-
-  // Used to add a Metachunk to a list of Metachunks
-  void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
-  void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
-  void set_container(VirtualSpaceNode* v) { _container = v; }
-
-  MetaWord* allocate(size_t word_size);
+  T* next() const         { return _next; }
+  T* prev() const         { return _prev; }
+  void set_next(T* v)     { _next = v; assert(v != this, "Boom");}
+  void set_prev(T* v)     { _prev = v; assert(v != this, "Boom");}
+  void clear_next()       { set_next(NULL); }
+  void clear_prev()       { set_prev(NULL); }
 
-  // Accessors
-  Metachunk* next() const { return _next; }
-  Metachunk* prev() const { return _prev; }
-  VirtualSpaceNode* container() const { return _container; }
-  MetaWord* bottom() const { return _bottom; }
-  MetaWord* end() const { return _end; }
-  MetaWord* top() const { return _top; }
-  size_t word_size() const { return _word_size; }
   size_t size() const volatile { return _word_size; }
   void set_size(size_t v) { _word_size = v; }
-  bool is_free() { return _is_free; }
-  void set_is_free(bool v) { _is_free = v; }
-  static size_t overhead() { return _overhead; }
-  void clear_next()              { set_next(NULL); }
-  void link_prev(Metachunk* ptr) { set_prev(ptr); }
-  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
-  bool cantCoalesce() const     { return false; }
-  void link_next(Metachunk* ptr) { set_next(ptr); }
-  void link_after(Metachunk* ptr){
+
+  void link_next(T* ptr)  { set_next(ptr); }
+  void link_prev(T* ptr)  { set_prev(ptr); }
+  void link_after(T* ptr) {
     link_next(ptr);
-    if (ptr != NULL) ptr->link_prev(this);
+    if (ptr != NULL) ptr->link_prev((T*)this);
   }
 
-  // Reset top to bottom so chunk can be reused.
-  void reset_empty() { _top = (_bottom + _overhead); _next = NULL; _prev = NULL; }
-  bool is_empty() { return _top == (_bottom + _overhead); }
+  uintptr_t* end() const        { return ((uintptr_t*) this) + size(); }
 
-  // used (has been allocated)
-  // free (available for future allocations)
-  // capacity (total size of chunk)
-  size_t used_word_size() const;
-  size_t free_word_size() const;
-  size_t capacity_word_size()const;
+  bool cantCoalesce() const     { return false; }
 
   // Debug support
 #ifdef ASSERT
@@ -114,14 +69,99 @@
   void* next_addr() const { return (void*)&_next; }
   void* size_addr() const { return (void*)&_word_size; }
 #endif
-  bool verify_chunk_in_free_list(Metachunk* tc) const { return true; }
+  bool verify_chunk_in_free_list(T* tc) const { return true; }
   bool verify_par_locked() { return true; }
 
   void assert_is_mangled() const {/* Don't check "\*/}
 
+  bool is_free()                 { return true; }
+};
+
+//  Metachunk - Quantum of allocation from a Virtualspace
+//    Metachunks are reused (when freed are put on a global freelist) and
+//    have no permanent association to a SpaceManager.
+
+//            +--------------+ <- end    --+       --+
+//            |              |             |         |
+//            |              |             | free    |
+//            |              |             |         |
+//            |              |             |         | size | capacity
+//            |              |             |         |
+//            |              | <- top   -- +         |
+//            |              |             |         |
+//            |              |             | used    |
+//            |              |             |         |
+//            |              |             |         |
+//            +--------------+ <- bottom --+       --+
+
+class Metachunk : public Metabase<Metachunk> {
+  friend class TestMetachunk;
+  // The VirtualSpaceNode containing this chunk.
+  VirtualSpaceNode* _container;
+
+  // Current allocation top.
+  MetaWord* _top;
+
+  DEBUG_ONLY(bool _is_tagged_free;)
+
+  MetaWord* initial_top() const { return (MetaWord*)this + overhead(); }
+  MetaWord* top() const         { return _top; }
+
+ public:
+  // Metachunks are allocated out of a MetadataVirtualSpace and
+  // and use some of its space to describe itself (plus alignment
+  // considerations).  Metadata is allocated in the rest of the chunk.
+  // This size is the overhead of maintaining the Metachunk within
+  // the space.
+
+  // Alignment of each allocation in the chunks.
+  static size_t object_alignment();
+
+  // Size of the Metachunk header, including alignment.
+  static size_t overhead();
+
+  Metachunk(size_t word_size , VirtualSpaceNode* container);
+
+  MetaWord* allocate(size_t word_size);
+
+  VirtualSpaceNode* container() const { return _container; }
+
+  MetaWord* bottom() const { return (MetaWord*) this; }
+
+  // Reset top to bottom so chunk can be reused.
+  void reset_empty() { _top = initial_top(); clear_next(); clear_prev(); }
+  bool is_empty() { return _top == initial_top(); }
+
+  // used (has been allocated)
+  // free (available for future allocations)
+  size_t word_size() const { return size(); }
+  size_t used_word_size() const;
+  size_t free_word_size() const;
+
+#ifdef ASSERT
+  bool is_tagged_free() { return _is_tagged_free; }
+  void set_is_tagged_free(bool v) { _is_tagged_free = v; }
+#endif
+
   NOT_PRODUCT(void mangle();)
 
   void print_on(outputStream* st) const;
   void verify();
 };
+
+// Metablock is the unit of allocation from a Chunk.
+//
+// A Metablock may be reused by its SpaceManager but are never moved between
+// SpaceManagers.  There is no explicit link to the Metachunk
+// from which it was allocated.  Metablock may be deallocated and
+// put on a freelist but the space is never freed, rather
+// the Metachunk it is a part of will be deallocated when it's
+// associated class loader is collected.
+
+class Metablock : public Metabase<Metablock> {
+  friend class VMStructs;
+ public:
+  Metablock(size_t word_size) : Metabase<Metablock>(word_size) {}
+};
+
 #endif  // SHARE_VM_MEMORY_METACHUNK_HPP
--- a/hotspot/src/share/vm/memory/metadataFactory.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metadataFactory.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,6 +65,7 @@
   static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
     if (data != NULL) {
       assert(loader_data != NULL, "shouldn't pass null");
+      assert(!data->is_shared(), "cannot deallocate array in shared spaces");
       int size = data->size();
       if (DumpSharedSpaces) {
         loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false);
@@ -83,6 +84,7 @@
       // Call metadata's deallocate function which will call deallocate fields
       assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive");
       assert(!md->on_stack(), "can't deallocate things on stack");
+      assert(!md->is_shared(), "cannot deallocate if in shared spaces");
       md->deallocate_contents(loader_data);
       loader_data->metaspace_non_null()->deallocate((MetaWord*)md, size, md->is_klass());
     }
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -30,7 +30,6 @@
 #include "memory/filemap.hpp"
 #include "memory/freeList.hpp"
 #include "memory/gcLocker.hpp"
-#include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -49,13 +48,10 @@
 
 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
-// Define this macro to enable slow integrity checking of
-// the free chunk lists
+
+// Set this constant to enable slow integrity checking of the free chunk lists
 const bool metaspace_slow_verify = false;
 
-// Parameters for stress mode testing
-const uint metadata_deallocate_a_lot_block = 10;
-const uint metadata_deallocate_a_lock_chunk = 3;
 size_t const allocation_from_dictionary_limit = 4 * K;
 
 MetaWord* last_allocated = 0;
@@ -92,24 +88,9 @@
 uint MetaspaceGC::_shrink_factor = 0;
 bool MetaspaceGC::_should_concurrent_collect = false;
 
-// Blocks of space for metadata are allocated out of Metachunks.
-//
-// Metachunk are allocated out of MetadataVirtualspaces and once
-// allocated there is no explicit link between a Metachunk and
-// the MetadataVirtualspaces from which it was allocated.
-//
-// Each SpaceManager maintains a
-// list of the chunks it is using and the current chunk.  The current
-// chunk is the chunk from which allocations are done.  Space freed in
-// a chunk is placed on the free list of blocks (BlockFreelist) and
-// reused from there.
-
 typedef class FreeList<Metachunk> ChunkList;
 
 // Manages the global free lists of chunks.
-// Has three lists of free chunks, and a total size and
-// count that includes all three
-
 class ChunkManager : public CHeapObj<mtInternal> {
 
   // Free list of chunks of different sizes.
@@ -119,7 +100,6 @@
   //   HumongousChunk
   ChunkList _free_chunks[NumberOfFreeLists];
 
-
   //   HumongousChunk
   ChunkTreeDictionary _humongous_dictionary;
 
@@ -166,7 +146,6 @@
 
   // add or delete (return) a chunk to the global freelist.
   Metachunk* chunk_freelist_allocate(size_t word_size);
-  void chunk_freelist_deallocate(Metachunk* chunk);
 
   // Map a size to a list index assuming that there are lists
   // for special, small, medium, and humongous chunks.
@@ -200,9 +179,7 @@
   // Returns the list for the given chunk word size.
   ChunkList* find_free_chunks_list(size_t word_size);
 
-  // Add and remove from a list by size.  Selects
-  // list based on size of chunk.
-  void free_chunks_put(Metachunk* chuck);
+  // Remove from a list by size.  Selects list based on size of chunk.
   Metachunk* free_chunks_get(size_t chunk_word_size);
 
   // Debug support
@@ -230,7 +207,6 @@
 // to the allocation of a quantum of metadata).
 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
   BlockTreeDictionary* _dictionary;
-  static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
 
   // Only allocate and split from freelist if the size of the allocation
   // is at least 1/4th the size of the available block.
@@ -258,6 +234,7 @@
   void print_on(outputStream* st) const;
 };
 
+// A VirtualSpaceList node.
 class VirtualSpaceNode : public CHeapObj<mtClass> {
   friend class VirtualSpaceList;
 
@@ -414,13 +391,13 @@
   Metachunk* chunk = first_chunk();
   Metachunk* invalid_chunk = (Metachunk*) top();
   while (chunk < invalid_chunk ) {
-    assert(chunk->is_free(), "Should be marked free");
-      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
-      chunk_manager->remove_chunk(chunk);
-      assert(chunk->next() == NULL &&
-             chunk->prev() == NULL,
-             "Was not removed from its list");
-      chunk = (Metachunk*) next;
+    assert(chunk->is_tagged_free(), "Should be tagged free");
+    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+    chunk_manager->remove_chunk(chunk);
+    assert(chunk->next() == NULL &&
+           chunk->prev() == NULL,
+           "Was not removed from its list");
+    chunk = (Metachunk*) next;
   }
 }
 
@@ -434,7 +411,7 @@
     // Don't count the chunks on the free lists.  Those are
     // still part of the VirtualSpaceNode but not currently
     // counted.
-    if (!chunk->is_free()) {
+    if (!chunk->is_tagged_free()) {
       count++;
     }
     chunk = (Metachunk*) next;
@@ -550,44 +527,16 @@
 
 class Metadebug : AllStatic {
   // Debugging support for Metaspaces
-  static int _deallocate_block_a_lot_count;
-  static int _deallocate_chunk_a_lot_count;
   static int _allocation_fail_alot_count;
 
  public:
-  static int deallocate_block_a_lot_count() {
-    return _deallocate_block_a_lot_count;
-  }
-  static void set_deallocate_block_a_lot_count(int v) {
-    _deallocate_block_a_lot_count = v;
-  }
-  static void inc_deallocate_block_a_lot_count() {
-    _deallocate_block_a_lot_count++;
-  }
-  static int deallocate_chunk_a_lot_count() {
-    return _deallocate_chunk_a_lot_count;
-  }
-  static void reset_deallocate_chunk_a_lot_count() {
-    _deallocate_chunk_a_lot_count = 1;
-  }
-  static void inc_deallocate_chunk_a_lot_count() {
-    _deallocate_chunk_a_lot_count++;
-  }
 
   static void init_allocation_fail_alot_count();
 #ifdef ASSERT
   static bool test_metadata_failure();
 #endif
-
-  static void deallocate_chunk_a_lot(SpaceManager* sm,
-                                     size_t chunk_word_size);
-  static void deallocate_block_a_lot(SpaceManager* sm,
-                                     size_t chunk_word_size);
-
 };
 
-int Metadebug::_deallocate_block_a_lot_count = 0;
-int Metadebug::_deallocate_chunk_a_lot_count = 0;
 int Metadebug::_allocation_fail_alot_count = 0;
 
 //  SpaceManager - used by Metaspace to handle allocations
@@ -753,14 +702,11 @@
 #endif
 
   size_t get_raw_word_size(size_t word_size) {
-    // If only the dictionary is going to be used (i.e., no
-    // indexed free list), then there is a minimum size requirement.
-    // MinChunkSize is a placeholder for the real minimum size JJJ
     size_t byte_size = word_size * BytesPerWord;
 
-    size_t raw_bytes_size = MAX2(byte_size,
-                                 Metablock::min_block_byte_size());
-    raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
+    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
+    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
+
     size_t raw_word_size = raw_bytes_size / BytesPerWord;
     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 
@@ -813,17 +759,8 @@
   }
 }
 
-Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
-  Metablock* block = (Metablock*) p;
-  block->set_word_size(word_size);
-  block->set_prev(NULL);
-  block->set_next(NULL);
-
-  return block;
-}
-
 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
-  Metablock* free_chunk = initialize_free_chunk(p, word_size);
+  Metablock* free_chunk = ::new (p) Metablock(word_size);
   if (dictionary() == NULL) {
    _dictionary = new BlockTreeDictionary();
   }
@@ -1069,7 +1006,7 @@
   }
 
   // Chunk is being removed from the chunks free list.
-  dec_free_chunks_total(chunk->capacity_word_size());
+  dec_free_chunks_total(chunk->word_size());
 }
 
 // Walk the list of VirtualSpaceNodes and delete
@@ -1563,54 +1500,6 @@
 
 // Metadebug methods
 
-void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
-                                       size_t chunk_word_size){
-#ifdef ASSERT
-  VirtualSpaceList* vsl = sm->vs_list();
-  if (MetaDataDeallocateALot &&
-      Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
-    Metadebug::reset_deallocate_chunk_a_lot_count();
-    for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
-      Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
-      if (dummy_chunk == NULL) {
-        break;
-      }
-      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
-
-      if (TraceMetadataChunkAllocation && Verbose) {
-        gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
-                               sm->sum_count_in_chunks_in_use());
-        dummy_chunk->print_on(gclog_or_tty);
-        gclog_or_tty->print_cr("  Free chunks total %d  count %d",
-                               sm->chunk_manager()->free_chunks_total_words(),
-                               sm->chunk_manager()->free_chunks_count());
-      }
-    }
-  } else {
-    Metadebug::inc_deallocate_chunk_a_lot_count();
-  }
-#endif
-}
-
-void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
-                                       size_t raw_word_size){
-#ifdef ASSERT
-  if (MetaDataDeallocateALot &&
-        Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
-    Metadebug::set_deallocate_block_a_lot_count(0);
-    for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
-      MetaWord* dummy_block = sm->allocate_work(raw_word_size);
-      if (dummy_block == 0) {
-        break;
-      }
-      sm->deallocate(dummy_block, raw_word_size);
-    }
-  } else {
-    Metadebug::inc_deallocate_block_a_lot_count();
-  }
-#endif
-}
-
 void Metadebug::init_allocation_fail_alot_count() {
   if (MetadataAllocationFailALot) {
     _allocation_fail_alot_count =
@@ -1754,31 +1643,6 @@
   return free_chunks(index);
 }
 
-void ChunkManager::free_chunks_put(Metachunk* chunk) {
-  assert_lock_strong(SpaceManager::expand_lock());
-  ChunkList* free_list = find_free_chunks_list(chunk->word_size());
-  chunk->set_next(free_list->head());
-  free_list->set_head(chunk);
-  // chunk is being returned to the chunk free list
-  inc_free_chunks_total(chunk->capacity_word_size());
-  slow_locked_verify();
-}
-
-void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
-  // The deallocation of a chunk originates in the freelist
-  // manangement code for a Metaspace and does not hold the
-  // lock.
-  assert(chunk != NULL, "Deallocating NULL");
-  assert_lock_strong(SpaceManager::expand_lock());
-  slow_locked_verify();
-  if (TraceMetadataChunkAllocation) {
-    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
-                           PTR_FORMAT "  size " SIZE_FORMAT,
-                           chunk, chunk->word_size());
-  }
-  free_chunks_put(chunk);
-}
-
 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
 
@@ -1822,7 +1686,7 @@
   }
 
   // Chunk is being removed from the chunks free list.
-  dec_free_chunks_total(chunk->capacity_word_size());
+  dec_free_chunks_total(chunk->word_size());
 
   // Remove it from the links to this freelist
   chunk->set_next(NULL);
@@ -1830,7 +1694,7 @@
 #ifdef ASSERT
   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
   // work.
-  chunk->set_is_free(false);
+  chunk->set_is_tagged_free(false);
 #endif
   chunk->container()->inc_container_count();
 
@@ -1962,7 +1826,7 @@
     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
       Metachunk* chunk = chunks_in_use(i);
       while (chunk != NULL) {
-        sum += chunk->capacity_word_size();
+        sum += chunk->word_size();
         chunk = chunk->next();
       }
     }
@@ -2098,10 +1962,6 @@
   size_t grow_chunks_by_words = calc_chunk_size(word_size);
   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
 
-  if (next != NULL) {
-    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
-  }
-
   MetaWord* mem = NULL;
 
   // If a chunk was available, add it to the in-use chunk list
@@ -2210,7 +2070,7 @@
     // Capture the next link before it is changed
     // by the call to return_chunk_at_head();
     Metachunk* next = cur->next();
-    cur->set_is_free(true);
+    DEBUG_ONLY(cur->set_is_tagged_free(true);)
     list->return_chunk_at_head(cur);
     cur = next;
   }
@@ -2282,7 +2142,7 @@
 
   while (humongous_chunks != NULL) {
 #ifdef ASSERT
-    humongous_chunks->set_is_free(true);
+    humongous_chunks->set_is_tagged_free(true);
 #endif
     if (TraceMetadataChunkAllocation && Verbose) {
       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
@@ -2446,7 +2306,6 @@
   if (p == NULL) {
     p = allocate_work(raw_word_size);
   }
-  Metadebug::deallocate_block_a_lot(this, raw_word_size);
 
   return p;
 }
@@ -2545,7 +2404,7 @@
       curr->print_on(out);
       curr_total += curr->word_size();
       used += curr->used_word_size();
-      capacity += curr->capacity_word_size();
+      capacity += curr->word_size();
       waste += curr->free_word_size() + curr->overhead();;
     }
   }
@@ -3396,7 +3255,7 @@
 }
 
 
-Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
+MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
                               bool read_only, MetaspaceObj::Type type, TRAPS) {
   if (HAS_PENDING_EXCEPTION) {
     assert(false, "Should not allocate with exception pending");
@@ -3415,10 +3274,14 @@
     MetaWord* result = space->allocate(word_size, NonClassType);
     if (result == NULL) {
       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
-    } else {
-      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
     }
-    return Metablock::initialize(result, word_size);
+
+    space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+
+    // Zero initialize.
+    Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
+
+    return result;
   }
 
   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
@@ -3443,7 +3306,10 @@
     return NULL;
   }
 
-  return Metablock::initialize(result, word_size);
+  // Zero initialize.
+  Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
+
+  return result;
 }
 
 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -139,7 +139,6 @@
   // Allocate space for metadata of type mdtype. This is space
   // within a Metachunk and is used by
   //   allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
-  // which returns a Metablock.
   MetaWord* allocate(size_t word_size, MetadataType mdtype);
 
   // Virtual Space lists for both classes and other metadata
@@ -217,8 +216,8 @@
   size_t used_bytes_slow(MetadataType mdtype) const;
   size_t capacity_bytes_slow(MetadataType mdtype) const;
 
-  static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
-                             bool read_only, MetaspaceObj::Type type, TRAPS);
+  static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
+                            bool read_only, MetaspaceObj::Type type, TRAPS);
   void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
 
   MetaWord* expand_and_allocate(size_t size,
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -869,18 +869,9 @@
 bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
        int index2, TRAPS) {
 
-  jbyte t1 = tag_at(index1).value();
-  jbyte t2 = cp2->tag_at(index2).value();
-
-
-  // JVM_CONSTANT_UnresolvedClassInError is equal to JVM_CONSTANT_UnresolvedClass
-  // when comparing
-  if (t1 == JVM_CONSTANT_UnresolvedClassInError) {
-    t1 = JVM_CONSTANT_UnresolvedClass;
-  }
-  if (t2 == JVM_CONSTANT_UnresolvedClassInError) {
-    t2 = JVM_CONSTANT_UnresolvedClass;
-  }
+  // The error tags are equivalent to non-error tags when comparing
+  jbyte t1 = tag_at(index1).non_error_value();
+  jbyte t2 = cp2->tag_at(index2).non_error_value();
 
   if (t1 != t2) {
     // Not the same entry type so there is nothing else to check. Note
@@ -1001,8 +992,8 @@
 
   case JVM_CONSTANT_MethodType:
   {
-    int k1 = method_type_index_at(index1);
-    int k2 = cp2->method_type_index_at(index2);
+    int k1 = method_type_index_at_error_ok(index1);
+    int k2 = cp2->method_type_index_at_error_ok(index2);
     bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
     if (match) {
       return true;
@@ -1011,11 +1002,11 @@
 
   case JVM_CONSTANT_MethodHandle:
   {
-    int k1 = method_handle_ref_kind_at(index1);
-    int k2 = cp2->method_handle_ref_kind_at(index2);
+    int k1 = method_handle_ref_kind_at_error_ok(index1);
+    int k2 = cp2->method_handle_ref_kind_at_error_ok(index2);
     if (k1 == k2) {
-      int i1 = method_handle_index_at(index1);
-      int i2 = cp2->method_handle_index_at(index2);
+      int i1 = method_handle_index_at_error_ok(index1);
+      int i2 = cp2->method_handle_index_at_error_ok(index2);
       bool match = compare_entry_to(i1, cp2, i2, CHECK_false);
       if (match) {
         return true;
@@ -1329,14 +1320,6 @@
     }
   } break;
 
-  case JVM_CONSTANT_UnresolvedClassInError:
-  {
-    Symbol* k = from_cp->unresolved_klass_at(from_i);
-    to_cp->unresolved_klass_at_put(to_i, k);
-    to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError);
-  } break;
-
-
   case JVM_CONSTANT_String:
   {
     Symbol* s = from_cp->unresolved_string_at(from_i);
@@ -1352,15 +1335,17 @@
   } break;
 
   case JVM_CONSTANT_MethodType:
+  case JVM_CONSTANT_MethodTypeInError:
   {
-    jint k = from_cp->method_type_index_at(from_i);
+    jint k = from_cp->method_type_index_at_error_ok(from_i);
     to_cp->method_type_index_at_put(to_i, k);
   } break;
 
   case JVM_CONSTANT_MethodHandle:
+  case JVM_CONSTANT_MethodHandleInError:
   {
-    int k1 = from_cp->method_handle_ref_kind_at(from_i);
-    int k2 = from_cp->method_handle_index_at(from_i);
+    int k1 = from_cp->method_handle_ref_kind_at_error_ok(from_i);
+    int k2 = from_cp->method_handle_index_at_error_ok(from_i);
     to_cp->method_handle_index_at_put(to_i, k1, k2);
   } break;
 
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -320,7 +320,8 @@
 
 void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
                                        Array<Method*>* methods) {
-  if (methods != NULL && methods != Universe::the_empty_method_array()) {
+  if (methods != NULL && methods != Universe::the_empty_method_array() &&
+      !methods->is_shared()) {
     for (int i = 0; i < methods->length(); i++) {
       Method* method = methods->at(i);
       if (method == NULL) continue;  // maybe null if error processing
@@ -344,13 +345,14 @@
     // check that the interfaces don't come from super class
     Array<Klass*>* sti = (super_klass == NULL) ? NULL :
                     InstanceKlass::cast(super_klass)->transitive_interfaces();
-    if (ti != sti) {
+    if (ti != sti && ti != NULL && !ti->is_shared()) {
       MetadataFactory::free_array<Klass*>(loader_data, ti);
     }
   }
 
   // local interfaces can be empty
-  if (local_interfaces != Universe::the_empty_klass_array()) {
+  if (local_interfaces != Universe::the_empty_klass_array() &&
+      local_interfaces != NULL && !local_interfaces->is_shared()) {
     MetadataFactory::free_array<Klass*>(loader_data, local_interfaces);
   }
 }
@@ -380,21 +382,25 @@
   deallocate_methods(loader_data, methods());
   set_methods(NULL);
 
-  if (method_ordering() != Universe::the_empty_int_array()) {
+  if (method_ordering() != NULL &&
+      method_ordering() != Universe::the_empty_int_array() &&
+      !method_ordering()->is_shared()) {
     MetadataFactory::free_array<int>(loader_data, method_ordering());
   }
   set_method_ordering(NULL);
 
   // default methods can be empty
   if (default_methods() != NULL &&
-      default_methods() != Universe::the_empty_method_array()) {
+      default_methods() != Universe::the_empty_method_array() &&
+      !default_methods()->is_shared()) {
     MetadataFactory::free_array<Method*>(loader_data, default_methods());
   }
   // Do NOT deallocate the default methods, they are owned by superinterfaces.
   set_default_methods(NULL);
 
   // default methods vtable indices can be empty
-  if (default_vtable_indices() != NULL) {
+  if (default_vtable_indices() != NULL &&
+      !default_vtable_indices()->is_shared()) {
     MetadataFactory::free_array<int>(loader_data, default_vtable_indices());
   }
   set_default_vtable_indices(NULL);
@@ -403,8 +409,10 @@
   // This array is in Klass, but remove it with the InstanceKlass since
   // this place would be the only caller and it can share memory with transitive
   // interfaces.
-  if (secondary_supers() != Universe::the_empty_klass_array() &&
-      secondary_supers() != transitive_interfaces()) {
+  if (secondary_supers() != NULL &&
+      secondary_supers() != Universe::the_empty_klass_array() &&
+      secondary_supers() != transitive_interfaces() &&
+      !secondary_supers()->is_shared()) {
     MetadataFactory::free_array<Klass*>(loader_data, secondary_supers());
   }
   set_secondary_supers(NULL);
@@ -413,24 +421,32 @@
   set_transitive_interfaces(NULL);
   set_local_interfaces(NULL);
 
-  MetadataFactory::free_array<jushort>(loader_data, fields());
+  if (fields() != NULL && !fields()->is_shared()) {
+    MetadataFactory::free_array<jushort>(loader_data, fields());
+  }
   set_fields(NULL, 0);
 
   // If a method from a redefined class is using this constant pool, don't
   // delete it, yet.  The new class's previous version will point to this.
   if (constants() != NULL) {
     assert (!constants()->on_stack(), "shouldn't be called if anything is onstack");
-    MetadataFactory::free_metadata(loader_data, constants());
+    if (!constants()->is_shared()) {
+      MetadataFactory::free_metadata(loader_data, constants());
+    }
     set_constants(NULL);
   }
 
-  if (inner_classes() != Universe::the_empty_short_array()) {
+  if (inner_classes() != NULL &&
+      inner_classes() != Universe::the_empty_short_array() &&
+      !inner_classes()->is_shared()) {
     MetadataFactory::free_array<jushort>(loader_data, inner_classes());
   }
   set_inner_classes(NULL);
 
-  // We should deallocate the Annotations instance
-  MetadataFactory::free_metadata(loader_data, annotations());
+  // We should deallocate the Annotations instance if it's not in shared spaces.
+  if (annotations() != NULL && !annotations()->is_shared()) {
+    MetadataFactory::free_metadata(loader_data, annotations());
+  }
   set_annotations(NULL);
 }
 
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -805,6 +805,7 @@
  private:
   void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
 
+ public:
   MethodCounters* get_method_counters(TRAPS) {
     if (_method_counters == NULL) {
       build_method_counters(this, CHECK_AND_CLEAR_NULL);
@@ -812,7 +813,6 @@
     return _method_counters;
   }
 
- public:
   bool   is_not_c1_compilable() const         { return access_flags().is_not_c1_compilable();  }
   void  set_not_c1_compilable()               {       _access_flags.set_not_c1_compilable();   }
   void clear_not_c1_compilable()              {       _access_flags.clear_not_c1_compilable(); }
--- a/hotspot/src/share/vm/oops/methodData.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -56,6 +56,11 @@
   if (needs_array_len(tag)) {
     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
   }
+  if (tag == call_type_data_tag) {
+    CallTypeData::initialize(this, cell_count);
+  } else if (tag == virtual_call_type_data_tag) {
+    VirtualCallTypeData::initialize(this, cell_count);
+  }
 }
 
 void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) {
@@ -76,7 +81,7 @@
 }
 
 #ifndef PRODUCT
-void ProfileData::print_shared(outputStream* st, const char* name) {
+void ProfileData::print_shared(outputStream* st, const char* name) const {
   st->print("bci: %d", bci());
   st->fill_to(tab_width_one);
   st->print("%s", name);
@@ -91,8 +96,8 @@
     st->print("flags(%d) ", flags);
 }
 
-void ProfileData::tab(outputStream* st) {
-  st->fill_to(tab_width_two);
+void ProfileData::tab(outputStream* st, bool first) const {
+  st->fill_to(first ? tab_width_one : tab_width_two);
 }
 #endif // !PRODUCT
 
@@ -104,7 +109,7 @@
 
 
 #ifndef PRODUCT
-void BitData::print_data_on(outputStream* st) {
+void BitData::print_data_on(outputStream* st) const {
   print_shared(st, "BitData");
 }
 #endif // !PRODUCT
@@ -115,7 +120,7 @@
 // A CounterData corresponds to a simple counter.
 
 #ifndef PRODUCT
-void CounterData::print_data_on(outputStream* st) {
+void CounterData::print_data_on(outputStream* st) const {
   print_shared(st, "CounterData");
   st->print_cr("count(%u)", count());
 }
@@ -145,12 +150,207 @@
 }
 
 #ifndef PRODUCT
-void JumpData::print_data_on(outputStream* st) {
+void JumpData::print_data_on(outputStream* st) const {
   print_shared(st, "JumpData");
   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 }
 #endif // !PRODUCT
 
+int TypeStackSlotEntries::compute_cell_count(Symbol* signature, int max) {
+  ResourceMark rm;
+  SignatureStream ss(signature);
+  int args_count = MIN2(ss.reference_parameter_count(), max);
+  return args_count * per_arg_cell_count;
+}
+
+int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
+  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+  assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
+  Bytecode_invoke inv(stream->method(), stream->bci());
+  int args_cell = 0;
+  if (arguments_profiling_enabled()) {
+    args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), TypeProfileArgsLimit);
+  }
+  int ret_cell = 0;
+  if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
+    ret_cell = ReturnTypeEntry::static_cell_count();
+  }
+  int header_cell = 0;
+  if (args_cell + ret_cell > 0) {
+    header_cell = header_cell_count();
+  }
+
+  return header_cell + args_cell + ret_cell;
+}
+
+class ArgumentOffsetComputer : public SignatureInfo {
+private:
+  int _max;
+  GrowableArray<int> _offsets;
+
+  void set(int size, BasicType type) { _size += size; }
+  void do_object(int begin, int end) {
+    if (_offsets.length() < _max) {
+      _offsets.push(_size);
+    }
+    SignatureInfo::do_object(begin, end);
+  }
+  void do_array (int begin, int end) {
+    if (_offsets.length() < _max) {
+      _offsets.push(_size);
+    }
+    SignatureInfo::do_array(begin, end);
+  }
+
+public:
+  ArgumentOffsetComputer(Symbol* signature, int max)
+    : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
+  }
+
+  int total() { lazy_iterate_parameters(); return _size; }
+
+  int off_at(int i) const { return _offsets.at(i); }
+};
+
+void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver) {
+  ResourceMark rm;
+  ArgumentOffsetComputer aos(signature, _number_of_entries);
+  aos.total();
+  for (int i = 0; i < _number_of_entries; i++) {
+    set_stack_slot(i, aos.off_at(i) + (has_receiver ? 1 : 0));
+    set_type(i, type_none());
+  }
+}
+
+void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
+  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+  Bytecode_invoke inv(stream->method(), stream->bci());
+
+  SignatureStream ss(inv.signature());
+  if (has_arguments()) {
+#ifdef ASSERT
+    ResourceMark rm;
+    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+    assert(count > 0, "room for args type but none found?");
+    check_number_of_arguments(count);
+#endif
+    _args.post_initialize(inv.signature(), inv.has_receiver());
+  }
+
+  if (has_return()) {
+    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
+    _ret.post_initialize();
+  }
+}
+
+void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
+  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+  Bytecode_invoke inv(stream->method(), stream->bci());
+
+  if (has_arguments()) {
+#ifdef ASSERT
+    ResourceMark rm;
+    SignatureStream ss(inv.signature());
+    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+    assert(count > 0, "room for args type but none found?");
+    check_number_of_arguments(count);
+#endif
+    _args.post_initialize(inv.signature(), inv.has_receiver());
+  }
+
+  if (has_return()) {
+    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
+    _ret.post_initialize();
+  }
+}
+
+bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
+  return !is_type_none(p) &&
+    !((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
+}
+
+void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
+  for (int i = 0; i < _number_of_entries; i++) {
+    intptr_t p = type(i);
+    if (is_loader_alive(is_alive_cl, p)) {
+      set_type(i, type_none());
+    }
+  }
+}
+
+void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
+  intptr_t p = type();
+  if (is_loader_alive(is_alive_cl, p)) {
+    set_type(type_none());
+  }
+}
+
+bool TypeEntriesAtCall::return_profiling_enabled() {
+  return MethodData::profile_return();
+}
+
+bool TypeEntriesAtCall::arguments_profiling_enabled() {
+  return MethodData::profile_arguments();
+}
+
+#ifndef PRODUCT
+void TypeEntries::print_klass(outputStream* st, intptr_t k) {
+  if (is_type_none(k)) {
+    st->print("none");
+  } else if (is_type_unknown(k)) {
+    st->print("unknown");
+  } else {
+    valid_klass(k)->print_value_on(st);
+  }
+  if (was_null_seen(k)) {
+    st->print(" (null seen)");
+  }
+}
+
+void TypeStackSlotEntries::print_data_on(outputStream* st) const {
+  for (int i = 0; i < _number_of_entries; i++) {
+    _pd->tab(st);
+    st->print("%d: stack(%u) ", i, stack_slot(i));
+    print_klass(st, type(i));
+    st->cr();
+  }
+}
+
+void ReturnTypeEntry::print_data_on(outputStream* st) const {
+  _pd->tab(st);
+  print_klass(st, type());
+  st->cr();
+}
+
+void CallTypeData::print_data_on(outputStream* st) const {
+  CounterData::print_data_on(st);
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    _args.print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    _ret.print_data_on(st);
+  }
+}
+
+void VirtualCallTypeData::print_data_on(outputStream* st) const {
+  VirtualCallData::print_data_on(st);
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    _args.print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    _ret.print_data_on(st);
+  }
+}
+#endif
+
 // ==================================================================
 // ReceiverTypeData
 //
@@ -169,7 +369,7 @@
 }
 
 #ifndef PRODUCT
-void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
+void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
   uint row;
   int entries = 0;
   for (row = 0; row < row_limit(); row++) {
@@ -190,11 +390,11 @@
     }
   }
 }
-void ReceiverTypeData::print_data_on(outputStream* st) {
+void ReceiverTypeData::print_data_on(outputStream* st) const {
   print_shared(st, "ReceiverTypeData");
   print_receiver_data_on(st);
 }
-void VirtualCallData::print_data_on(outputStream* st) {
+void VirtualCallData::print_data_on(outputStream* st) const {
   print_shared(st, "VirtualCallData");
   print_receiver_data_on(st);
 }
@@ -246,7 +446,7 @@
 
 
 #ifndef PRODUCT
-void RetData::print_data_on(outputStream* st) {
+void RetData::print_data_on(outputStream* st) const {
   print_shared(st, "RetData");
   uint row;
   int entries = 0;
@@ -281,7 +481,7 @@
 }
 
 #ifndef PRODUCT
-void BranchData::print_data_on(outputStream* st) {
+void BranchData::print_data_on(outputStream* st) const {
   print_shared(st, "BranchData");
   st->print_cr("taken(%u) displacement(%d)",
                taken(), displacement());
@@ -355,7 +555,7 @@
 }
 
 #ifndef PRODUCT
-void MultiBranchData::print_data_on(outputStream* st) {
+void MultiBranchData::print_data_on(outputStream* st) const {
   print_shared(st, "MultiBranchData");
   st->print_cr("default_count(%u) displacement(%d)",
                default_count(), default_displacement());
@@ -369,7 +569,7 @@
 #endif
 
 #ifndef PRODUCT
-void ArgInfoData::print_data_on(outputStream* st) {
+void ArgInfoData::print_data_on(outputStream* st) const {
   print_shared(st, "ArgInfoData");
   int nargs = number_of_args();
   for (int i = 0; i < nargs; i++) {
@@ -407,7 +607,11 @@
     }
   case Bytecodes::_invokespecial:
   case Bytecodes::_invokestatic:
-    return CounterData::static_cell_count();
+    if (MethodData::profile_arguments() || MethodData::profile_return()) {
+      return variable_cell_count;
+    } else {
+      return CounterData::static_cell_count();
+    }
   case Bytecodes::_goto:
   case Bytecodes::_goto_w:
   case Bytecodes::_jsr:
@@ -415,9 +619,17 @@
     return JumpData::static_cell_count();
   case Bytecodes::_invokevirtual:
   case Bytecodes::_invokeinterface:
-    return VirtualCallData::static_cell_count();
+    if (MethodData::profile_arguments() || MethodData::profile_return()) {
+      return variable_cell_count;
+    } else {
+      return VirtualCallData::static_cell_count();
+    }
   case Bytecodes::_invokedynamic:
-    return CounterData::static_cell_count();
+    if (MethodData::profile_arguments() || MethodData::profile_return()) {
+      return variable_cell_count;
+    } else {
+      return CounterData::static_cell_count();
+    }
   case Bytecodes::_ret:
     return RetData::static_cell_count();
   case Bytecodes::_ifeq:
@@ -453,7 +665,36 @@
     return 0;
   }
   if (cell_count == variable_cell_count) {
-    cell_count = MultiBranchData::compute_cell_count(stream);
+    switch (stream->code()) {
+    case Bytecodes::_lookupswitch:
+    case Bytecodes::_tableswitch:
+      cell_count = MultiBranchData::compute_cell_count(stream);
+      break;
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokestatic:
+    case Bytecodes::_invokedynamic:
+      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
+      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+          profile_return_for_invoke(stream->method(), stream->bci())) {
+        cell_count = CallTypeData::compute_cell_count(stream);
+      } else {
+        cell_count = CounterData::static_cell_count();
+      }
+      break;
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokeinterface: {
+      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
+      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+          profile_return_for_invoke(stream->method(), stream->bci())) {
+        cell_count = VirtualCallTypeData::compute_cell_count(stream);
+      } else {
+        cell_count = VirtualCallData::static_cell_count();
+      }
+      break;
+    }
+    default:
+      fatal("unexpected bytecode for var length profile data");
+    }
   }
   // Note:  cell_count might be zero, meaning that there is just
   //        a DataLayout header, with no extra cells.
@@ -499,6 +740,7 @@
   // Add a cell to record information about modified arguments.
   int arg_size = method->size_of_parameters();
   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
+
   return object_size;
 }
 
@@ -534,10 +776,21 @@
     }
     break;
   case Bytecodes::_invokespecial:
-  case Bytecodes::_invokestatic:
-    cell_count = CounterData::static_cell_count();
-    tag = DataLayout::counter_data_tag;
+  case Bytecodes::_invokestatic: {
+    int counter_data_cell_count = CounterData::static_cell_count();
+    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+        profile_return_for_invoke(stream->method(), stream->bci())) {
+      cell_count = CallTypeData::compute_cell_count(stream);
+    } else {
+      cell_count = counter_data_cell_count;
+    }
+    if (cell_count > counter_data_cell_count) {
+      tag = DataLayout::call_type_data_tag;
+    } else {
+      tag = DataLayout::counter_data_tag;
+    }
     break;
+  }
   case Bytecodes::_goto:
   case Bytecodes::_goto_w:
   case Bytecodes::_jsr:
@@ -546,15 +799,37 @@
     tag = DataLayout::jump_data_tag;
     break;
   case Bytecodes::_invokevirtual:
-  case Bytecodes::_invokeinterface:
-    cell_count = VirtualCallData::static_cell_count();
-    tag = DataLayout::virtual_call_data_tag;
+  case Bytecodes::_invokeinterface: {
+    int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
+    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+        profile_return_for_invoke(stream->method(), stream->bci())) {
+      cell_count = VirtualCallTypeData::compute_cell_count(stream);
+    } else {
+      cell_count = virtual_call_data_cell_count;
+    }
+    if (cell_count > virtual_call_data_cell_count) {
+      tag = DataLayout::virtual_call_type_data_tag;
+    } else {
+      tag = DataLayout::virtual_call_data_tag;
+    }
     break;
-  case Bytecodes::_invokedynamic:
+  }
+  case Bytecodes::_invokedynamic: {
     // %%% should make a type profile for any invokedynamic that takes a ref argument
-    cell_count = CounterData::static_cell_count();
-    tag = DataLayout::counter_data_tag;
+    int counter_data_cell_count = CounterData::static_cell_count();
+    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+        profile_return_for_invoke(stream->method(), stream->bci())) {
+      cell_count = CallTypeData::compute_cell_count(stream);
+    } else {
+      cell_count = counter_data_cell_count;
+    }
+    if (cell_count > counter_data_cell_count) {
+      tag = DataLayout::call_type_data_tag;
+    } else {
+      tag = DataLayout::counter_data_tag;
+    }
     break;
+  }
   case Bytecodes::_ret:
     cell_count = RetData::static_cell_count();
     tag = DataLayout::ret_data_tag;
@@ -585,6 +860,11 @@
     break;
   }
   assert(tag == DataLayout::multi_branch_data_tag ||
+         ((MethodData::profile_arguments() || MethodData::profile_return()) &&
+          (tag == DataLayout::call_type_data_tag ||
+           tag == DataLayout::counter_data_tag ||
+           tag == DataLayout::virtual_call_type_data_tag ||
+           tag == DataLayout::virtual_call_data_tag)) ||
          cell_count == bytecode_cell_count(c), "cell counts must agree");
   if (cell_count >= 0) {
     assert(tag != DataLayout::no_tag, "bad tag");
@@ -631,6 +911,10 @@
     return new MultiBranchData(this);
   case DataLayout::arg_info_data_tag:
     return new ArgInfoData(this);
+  case DataLayout::call_type_data_tag:
+    return new CallTypeData(this);
+  case DataLayout::virtual_call_type_data_tag:
+    return new VirtualCallTypeData(this);
   };
 }
 
@@ -898,3 +1182,70 @@
   NEEDS_CLEANUP;
   // not yet implemented.
 }
+
+bool MethodData::profile_jsr292(methodHandle m, int bci) {
+  if (m->is_compiled_lambda_form()) {
+    return true;
+  }
+
+  Bytecode_invoke inv(m , bci);
+  return inv.is_invokedynamic() || inv.is_invokehandle();
+}
+
+int MethodData::profile_arguments_flag() {
+  return TypeProfileLevel % 10;
+}
+
+bool MethodData::profile_arguments() {
+  return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
+}
+
+bool MethodData::profile_arguments_jsr292_only() {
+  return profile_arguments_flag() == type_profile_jsr292;
+}
+
+bool MethodData::profile_all_arguments() {
+  return profile_arguments_flag() == type_profile_all;
+}
+
+bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
+  if (!profile_arguments()) {
+    return false;
+  }
+
+  if (profile_all_arguments()) {
+    return true;
+  }
+
+  assert(profile_arguments_jsr292_only(), "inconsistent");
+  return profile_jsr292(m, bci);
+}
+
+int MethodData::profile_return_flag() {
+  return TypeProfileLevel / 10;
+}
+
+bool MethodData::profile_return() {
+  return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
+}
+
+bool MethodData::profile_return_jsr292_only() {
+  return profile_return_flag() == type_profile_jsr292;
+}
+
+bool MethodData::profile_all_return() {
+  return profile_return_flag() == type_profile_all;
+}
+
+bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
+  if (!profile_return()) {
+    return false;
+  }
+
+  if (profile_all_return()) {
+    return true;
+  }
+
+  assert(profile_return_jsr292_only(), "inconsistent");
+  return profile_jsr292(m, bci);
+}
--- a/hotspot/src/share/vm/oops/methodData.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -117,7 +117,9 @@
     ret_data_tag,
     branch_data_tag,
     multi_branch_data_tag,
-    arg_info_data_tag
+    arg_info_data_tag,
+    call_type_data_tag,
+    virtual_call_type_data_tag
   };
 
   enum {
@@ -165,7 +167,7 @@
   // occurred, and the MDO shows N occurrences of X, we make the
   // simplifying assumption that all N occurrences can be blamed
   // on that BCI.
-  int trap_state() {
+  int trap_state() const {
     return ((_header._struct._flags >> trap_shift) & trap_mask);
   }
 
@@ -175,11 +177,11 @@
     _header._struct._flags = (new_state << trap_shift) | old_flags;
   }
 
-  u1 flags() {
+  u1 flags() const {
     return _header._struct._flags;
   }
 
-  u2 bci() {
+  u2 bci() const {
     return _header._struct._bci;
   }
 
@@ -198,7 +200,7 @@
   void release_set_cell_at(int index, intptr_t value) {
     OrderAccess::release_store_ptr(&_cells[index], value);
   }
-  intptr_t cell_at(int index) {
+  intptr_t cell_at(int index) const {
     return _cells[index];
   }
 
@@ -206,7 +208,7 @@
     assert(flag_number < flag_limit, "oob");
     _header._struct._flags |= (0x1 << flag_number);
   }
-  bool flag_at(int flag_number) {
+  bool flag_at(int flag_number) const {
     assert(flag_number < flag_limit, "oob");
     return (_header._struct._flags & (0x1 << flag_number)) != 0;
   }
@@ -254,19 +256,23 @@
 class     CounterData;
 class       ReceiverTypeData;
 class         VirtualCallData;
+class           VirtualCallTypeData;
 class       RetData;
+class       CallTypeData;
 class   JumpData;
 class     BranchData;
 class   ArrayData;
 class     MultiBranchData;
 class     ArgInfoData;
 
-
 // ProfileData
 //
 // A ProfileData object is created to refer to a section of profiling
 // data in a structured way.
 class ProfileData : public ResourceObj {
+  friend class TypeEntries;
+  friend class ReturnTypeEntry;
+  friend class TypeStackSlotEntries;
 private:
 #ifndef PRODUCT
   enum {
@@ -280,6 +286,7 @@
 
 protected:
   DataLayout* data() { return _data; }
+  const DataLayout* data() const { return _data; }
 
   enum {
     cell_size = DataLayout::cell_size
@@ -287,7 +294,7 @@
 
 public:
   // How many cells are in this?
-  virtual int cell_count() {
+  virtual int cell_count() const {
     ShouldNotReachHere();
     return -1;
   }
@@ -307,7 +314,7 @@
     assert(0 <= index && index < cell_count(), "oob");
     data()->release_set_cell_at(index, value);
   }
-  intptr_t intptr_at(int index) {
+  intptr_t intptr_at(int index) const {
     assert(0 <= index && index < cell_count(), "oob");
     return data()->cell_at(index);
   }
@@ -317,7 +324,7 @@
   void release_set_uint_at(int index, uint value) {
     release_set_intptr_at(index, (intptr_t) value);
   }
-  uint uint_at(int index) {
+  uint uint_at(int index) const {
     return (uint)intptr_at(index);
   }
   void set_int_at(int index, int value) {
@@ -326,23 +333,23 @@
   void release_set_int_at(int index, int value) {
     release_set_intptr_at(index, (intptr_t) value);
   }
-  int int_at(int index) {
+  int int_at(int index) const {
     return (int)intptr_at(index);
   }
-  int int_at_unchecked(int index) {
+  int int_at_unchecked(int index) const {
     return (int)data()->cell_at(index);
   }
   void set_oop_at(int index, oop value) {
     set_intptr_at(index, cast_from_oop<intptr_t>(value));
   }
-  oop oop_at(int index) {
+  oop oop_at(int index) const {
     return cast_to_oop(intptr_at(index));
   }
 
   void set_flag_at(int flag_number) {
     data()->set_flag_at(flag_number);
   }
-  bool flag_at(int flag_number) {
+  bool flag_at(int flag_number) const {
     return data()->flag_at(flag_number);
   }
 
@@ -362,7 +369,7 @@
   // Constructor for invalid ProfileData.
   ProfileData();
 
-  u2 bci() {
+  u2 bci() const {
     return data()->bci();
   }
 
@@ -370,7 +377,7 @@
     return (address)_data;
   }
 
-  int trap_state() {
+  int trap_state() const {
     return data()->trap_state();
   }
   void set_trap_state(int new_state) {
@@ -378,58 +385,68 @@
   }
 
   // Type checking
-  virtual bool is_BitData()         { return false; }
-  virtual bool is_CounterData()     { return false; }
-  virtual bool is_JumpData()        { return false; }
-  virtual bool is_ReceiverTypeData(){ return false; }
-  virtual bool is_VirtualCallData() { return false; }
-  virtual bool is_RetData()         { return false; }
-  virtual bool is_BranchData()      { return false; }
-  virtual bool is_ArrayData()       { return false; }
-  virtual bool is_MultiBranchData() { return false; }
-  virtual bool is_ArgInfoData()     { return false; }
+  virtual bool is_BitData()         const { return false; }
+  virtual bool is_CounterData()     const { return false; }
+  virtual bool is_JumpData()        const { return false; }
+  virtual bool is_ReceiverTypeData()const { return false; }
+  virtual bool is_VirtualCallData() const { return false; }
+  virtual bool is_RetData()         const { return false; }
+  virtual bool is_BranchData()      const { return false; }
+  virtual bool is_ArrayData()       const { return false; }
+  virtual bool is_MultiBranchData() const { return false; }
+  virtual bool is_ArgInfoData()     const { return false; }
+  virtual bool is_CallTypeData()    const { return false; }
+  virtual bool is_VirtualCallTypeData()const { return false; }
 
 
-  BitData* as_BitData() {
+  BitData* as_BitData() const {
     assert(is_BitData(), "wrong type");
     return is_BitData()         ? (BitData*)        this : NULL;
   }
-  CounterData* as_CounterData() {
+  CounterData* as_CounterData() const {
     assert(is_CounterData(), "wrong type");
     return is_CounterData()     ? (CounterData*)    this : NULL;
   }
-  JumpData* as_JumpData() {
+  JumpData* as_JumpData() const {
     assert(is_JumpData(), "wrong type");
     return is_JumpData()        ? (JumpData*)       this : NULL;
   }
-  ReceiverTypeData* as_ReceiverTypeData() {
+  ReceiverTypeData* as_ReceiverTypeData() const {
     assert(is_ReceiverTypeData(), "wrong type");
     return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
   }
-  VirtualCallData* as_VirtualCallData() {
+  VirtualCallData* as_VirtualCallData() const {
     assert(is_VirtualCallData(), "wrong type");
     return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
   }
-  RetData* as_RetData() {
+  RetData* as_RetData() const {
     assert(is_RetData(), "wrong type");
     return is_RetData()         ? (RetData*)        this : NULL;
   }
-  BranchData* as_BranchData() {
+  BranchData* as_BranchData() const {
     assert(is_BranchData(), "wrong type");
     return is_BranchData()      ? (BranchData*)     this : NULL;
   }
-  ArrayData* as_ArrayData() {
+  ArrayData* as_ArrayData() const {
     assert(is_ArrayData(), "wrong type");
     return is_ArrayData()       ? (ArrayData*)      this : NULL;
   }
-  MultiBranchData* as_MultiBranchData() {
+  MultiBranchData* as_MultiBranchData() const {
     assert(is_MultiBranchData(), "wrong type");
     return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
   }
-  ArgInfoData* as_ArgInfoData() {
+  ArgInfoData* as_ArgInfoData() const {
     assert(is_ArgInfoData(), "wrong type");
     return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
   }
+  CallTypeData* as_CallTypeData() const {
+    assert(is_CallTypeData(), "wrong type");
+    return is_CallTypeData() ? (CallTypeData*)this : NULL;
+  }
+  VirtualCallTypeData* as_VirtualCallTypeData() const {
+    assert(is_VirtualCallTypeData(), "wrong type");
+    return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
+  }
 
 
   // Subclass specific initialization
@@ -443,15 +460,15 @@
   // an oop in a ProfileData to the ci equivalent. Generally speaking,
   // most ProfileData don't require any translation, so we provide the null
   // translation here, and the required translators are in the ci subclasses.
-  virtual void translate_from(ProfileData* data) {}
+  virtual void translate_from(const ProfileData* data) {}
 
-  virtual void print_data_on(outputStream* st) {
+  virtual void print_data_on(outputStream* st) const {
     ShouldNotReachHere();
   }
 
 #ifndef PRODUCT
-  void print_shared(outputStream* st, const char* name);
-  void tab(outputStream* st);
+  void print_shared(outputStream* st, const char* name) const;
+  void tab(outputStream* st, bool first = false) const;
 #endif
 };
 
@@ -470,13 +487,13 @@
   BitData(DataLayout* layout) : ProfileData(layout) {
   }
 
-  virtual bool is_BitData() { return true; }
+  virtual bool is_BitData() const { return true; }
 
   static int static_cell_count() {
     return bit_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -498,7 +515,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -514,18 +531,18 @@
 public:
   CounterData(DataLayout* layout) : BitData(layout) {}
 
-  virtual bool is_CounterData() { return true; }
+  virtual bool is_CounterData() const { return true; }
 
   static int static_cell_count() {
     return counter_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
   // Direct accessor
-  uint count() {
+  uint count() const {
     return uint_at(count_off);
   }
 
@@ -542,7 +559,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -570,18 +587,18 @@
       layout->tag() == DataLayout::branch_data_tag, "wrong type");
   }
 
-  virtual bool is_JumpData() { return true; }
+  virtual bool is_JumpData() const { return true; }
 
   static int static_cell_count() {
     return jump_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
   // Direct accessor
-  uint taken() {
+  uint taken() const {
     return uint_at(taken_off_set);
   }
 
@@ -598,7 +615,7 @@
     return cnt;
   }
 
-  int displacement() {
+  int displacement() const {
     return int_at(displacement_off_set);
   }
 
@@ -615,7 +632,418 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// Entries in a ProfileData object to record types: it can either be
+// none (no profile), unknown (conflicting profile data) or a klass if
+// a single one is seen. Whether a null reference was seen is also
+// recorded. No counter is associated with the type and a single type
+// is tracked (unlike VirtualCallData).
+class TypeEntries {
+
+public:
+
+  // A single cell is used to record information for a type:
+  // - the cell is initialized to 0
+  // - when a type is discovered it is stored in the cell
+  // - bit zero of the cell is used to record whether a null reference
+  // was encountered or not
+  // - bit 1 is set to record a conflict in the type information
+
+  enum {
+    null_seen = 1,
+    type_mask = ~null_seen,
+    type_unknown = 2,
+    status_bits = null_seen | type_unknown,
+    type_klass_mask = ~status_bits
+  };
+
+  // what to initialize a cell to
+  static intptr_t type_none() {
+    return 0;
+  }
+
+  // null seen = bit 0 set?
+  static bool was_null_seen(intptr_t v) {
+    return (v & null_seen) != 0;
+  }
+
+  // conflicting type information = bit 1 set?
+  static bool is_type_unknown(intptr_t v) {
+    return (v & type_unknown) != 0;
+  }
+
+  // not type information yet = all bits cleared, ignoring bit 0?
+  static bool is_type_none(intptr_t v) {
+    return (v & type_mask) == 0;
+  }
+
+  // recorded type: cell without bit 0 and 1
+  static intptr_t klass_part(intptr_t v) {
+    intptr_t r = v & type_klass_mask;
+    assert (r != 0, "invalid");
+    return r;
+  }
+
+  // type recorded
+  static Klass* valid_klass(intptr_t k) {
+    if (!is_type_none(k) &&
+        !is_type_unknown(k)) {
+      return (Klass*)klass_part(k);
+    } else {
+      return NULL;
+    }
+  }
+
+  static intptr_t with_status(intptr_t k, intptr_t in) {
+    return k | (in & status_bits);
+  }
+
+  static intptr_t with_status(Klass* k, intptr_t in) {
+    return with_status((intptr_t)k, in);
+  }
+
+#ifndef PRODUCT
+  static void print_klass(outputStream* st, intptr_t k);
+#endif
+
+  // GC support
+  static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
+
+protected:
+  // ProfileData object these entries are part of
+  ProfileData* _pd;
+  // offset within the ProfileData object where the entries start
+  const int _base_off;
+
+  TypeEntries(int base_off)
+    : _base_off(base_off), _pd(NULL) {}
+
+  void set_intptr_at(int index, intptr_t value) {
+    _pd->set_intptr_at(index, value);
+  }
+
+  intptr_t intptr_at(int index) const {
+    return _pd->intptr_at(index);
+  }
+
+public:
+  void set_profile_data(ProfileData* pd) {
+    _pd = pd;
+  }
+};
+
+// Type entries used for arguments passed at a call and parameters on
+// method entry. 2 cells per entry: one for the type encoded as in
+// TypeEntries and one initialized with the stack slot where the
+// profiled object is to be found so that the interpreter can locate
+// it quickly.
+class TypeStackSlotEntries : public TypeEntries {
+
+private:
+  enum {
+    stack_slot_entry,
+    type_entry,
+    per_arg_cell_count
+  };
+
+  // offset of cell for stack slot for entry i within ProfileData object
+  int stack_slot_offset(int i) const {
+    return _base_off + stack_slot_local_offset(i);
+  }
+
+protected:
+  const int _number_of_entries;
+
+  // offset of cell for type for entry i within ProfileData object
+  int type_offset(int i) const {
+    return _base_off + type_local_offset(i);
+  }
+
+public:
+
+  TypeStackSlotEntries(int base_off, int nb_entries)
+    : TypeEntries(base_off), _number_of_entries(nb_entries) {}
+
+  static int compute_cell_count(Symbol* signature, int max);
+
+  void post_initialize(Symbol* signature, bool has_receiver);
+
+  // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
+  static int stack_slot_local_offset(int i) {
+    return i * per_arg_cell_count + stack_slot_entry;
+  }
+
+  // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
+  static int type_local_offset(int i) {
+    return i * per_arg_cell_count + type_entry;
+  }
+
+  // stack slot for entry i
+  uint stack_slot(int i) const {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    return _pd->uint_at(stack_slot_offset(i));
+  }
+
+  // set stack slot for entry i
+  void set_stack_slot(int i, uint num) {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    _pd->set_uint_at(stack_slot_offset(i), num);
+  }
+
+  // type for entry i
+  intptr_t type(int i) const {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    return _pd->intptr_at(type_offset(i));
+  }
+
+  // set type for entry i
+  void set_type(int i, intptr_t k) {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    _pd->set_intptr_at(type_offset(i), k);
+  }
+
+  static ByteSize per_arg_size() {
+    return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
+  }
+
+  static int per_arg_count() {
+    return per_arg_cell_count ;
+  }
+
+  // GC support
+  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// Type entry used for return from a call. A single cell to record the
+// type.
+class ReturnTypeEntry : public TypeEntries {
+
+private:
+  enum {
+    cell_count = 1
+  };
+
+public:
+  ReturnTypeEntry(int base_off)
+    : TypeEntries(base_off) {}
+
+  void post_initialize() {
+    set_type(type_none());
+  }
+
+  intptr_t type() const {
+    return _pd->intptr_at(_base_off);
+  }
+
+  void set_type(intptr_t k) {
+    _pd->set_intptr_at(_base_off, k);
+  }
+
+  static int static_cell_count() {
+    return cell_count;
+  }
+
+  static ByteSize size() {
+    return in_ByteSize(cell_count * DataLayout::cell_size);
+  }
+
+  ByteSize type_offset() {
+    return DataLayout::cell_offset(_base_off);
+  }
+
+  // GC support
+  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// Entries to collect type information at a call: contains arguments
+// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
+// number of cells. Because the number of cells for the return type is
+// smaller than the number of cells for the type of an arguments, the
+// number of cells is used to tell how many arguments are profiled and
+// whether a return value is profiled. See has_arguments() and
+// has_return().
+class TypeEntriesAtCall {
+private:
+  static int stack_slot_local_offset(int i) {
+    return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
+  }
+
+  static int argument_type_local_offset(int i) {
+    return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);;
+  }
+
+public:
+
+  static int header_cell_count() {
+    return 1;
+  }
+
+  static int cell_count_local_offset() {
+    return 0;
+  }
+
+  static int compute_cell_count(BytecodeStream* stream);
+
+  static void initialize(DataLayout* dl, int base, int cell_count) {
+    int off = base + cell_count_local_offset();
+    dl->set_cell_at(off, cell_count - base - header_cell_count());
+  }
+
+  static bool arguments_profiling_enabled();
+  static bool return_profiling_enabled();
+
+  // Code generation support
+  static ByteSize cell_count_offset() {
+    return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
+  }
+
+  static ByteSize args_data_offset() {
+    return in_ByteSize(header_cell_count() * DataLayout::cell_size);
+  }
+
+  static ByteSize stack_slot_offset(int i) {
+    return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
+  }
+
+  static ByteSize argument_type_offset(int i) {
+    return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
+  }
+};
+
+// CallTypeData
+//
+// A CallTypeData is used to access profiling information about a non
+// virtual call for which we collect type information about arguments
+// and return value.
+class CallTypeData : public CounterData {
+private:
+  // entries for arguments if any
+  TypeStackSlotEntries _args;
+  // entry for return type if any
+  ReturnTypeEntry _ret;
+
+  int cell_count_global_offset() const {
+    return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
+  }
+
+  // number of cells not counting the header
+  int cell_count_no_header() const {
+    return uint_at(cell_count_global_offset());
+  }
+
+  void check_number_of_arguments(int total) {
+    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
+  }
+
+protected:
+  // An entry for a return value takes less space than an entry for an
+  // argument so if the number of cells exceeds the number of cells
+  // needed for an argument, this object contains type information for
+  // at least one argument.
+  bool has_arguments() const {
+    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
+    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
+    return res;
+  }
+
+public:
+  CallTypeData(DataLayout* layout) :
+    CounterData(layout),
+    _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
+    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
+  {
+    assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
+    // Some compilers (VC++) don't want this passed in member initialization list
+    _args.set_profile_data(this);
+    _ret.set_profile_data(this);
+  }
+
+  const TypeStackSlotEntries* args() const {
+    assert(has_arguments(), "no profiling of arguments");
+    return &_args;
+  }
+
+  const ReturnTypeEntry* ret() const {
+    assert(has_return(), "no profiling of return value");
+    return &_ret;
+  }
+
+  virtual bool is_CallTypeData() const { return true; }
+
+  static int static_cell_count() {
+    return -1;
+  }
+
+  static int compute_cell_count(BytecodeStream* stream) {
+    return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
+  }
+
+  static void initialize(DataLayout* dl, int cell_count) {
+    TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
+  }
+
+  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
+
+  virtual int cell_count() const {
+    return CounterData::static_cell_count() +
+      TypeEntriesAtCall::header_cell_count() +
+      int_at_unchecked(cell_count_global_offset());
+  }
+
+  int number_of_arguments() const {
+    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
+  }
+
+  void set_argument_type(int i, Klass* k) {
+    assert(has_arguments(), "no arguments!");
+    intptr_t current = _args.type(i);
+    _args.set_type(i, TypeEntries::with_status(k, current));
+  }
+
+  void set_return_type(Klass* k) {
+    assert(has_return(), "no return!");
+    intptr_t current = _ret.type();
+    _ret.set_type(TypeEntries::with_status(k, current));
+  }
+
+  // An entry for a return value takes less space than an entry for an
+  // argument, so if the remainder of the number of cells divided by
+  // the number of cells for an argument is not null, a return value
+  // is profiled in this object.
+  bool has_return() const {
+    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
+    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
+    return res;
+  }
+
+  // Code generation support
+  static ByteSize args_data_offset() {
+    return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
+  }
+
+  // GC support
+  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
+    if (has_arguments()) {
+      _args.clean_weak_klass_links(is_alive_closure);
+    }
+    if (has_return()) {
+      _ret.clean_weak_klass_links(is_alive_closure);
+    }
+  }
+
+#ifndef PRODUCT
+  virtual void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -636,16 +1064,17 @@
 public:
   ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
     assert(layout->tag() == DataLayout::receiver_type_data_tag ||
-           layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
+           layout->tag() == DataLayout::virtual_call_data_tag ||
+           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
   }
 
-  virtual bool is_ReceiverTypeData() { return true; }
+  virtual bool is_ReceiverTypeData() const { return true; }
 
   static int static_cell_count() {
     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -660,7 +1089,7 @@
     return count0_offset + row * receiver_type_row_cell_count;
   }
 
-  Klass* receiver(uint row) {
+  Klass* receiver(uint row) const {
     assert(row < row_limit(), "oob");
 
     Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
@@ -673,7 +1102,7 @@
     set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
   }
 
-  uint receiver_count(uint row) {
+  uint receiver_count(uint row) const {
     assert(row < row_limit(), "oob");
     return uint_at(receiver_count_cell_index(row));
   }
@@ -721,8 +1150,8 @@
   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
 
 #ifndef PRODUCT
-  void print_receiver_data_on(outputStream* st);
-  void print_data_on(outputStream* st);
+  void print_receiver_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -733,10 +1162,11 @@
 class VirtualCallData : public ReceiverTypeData {
 public:
   VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
-    assert(layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
+    assert(layout->tag() == DataLayout::virtual_call_data_tag ||
+           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
   }
 
-  virtual bool is_VirtualCallData() { return true; }
+  virtual bool is_VirtualCallData() const { return true; }
 
   static int static_cell_count() {
     // At this point we could add more profile state, e.g., for arguments.
@@ -744,7 +1174,7 @@
     return ReceiverTypeData::static_cell_count();
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -754,7 +1184,134 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// VirtualCallTypeData
+//
+// A VirtualCallTypeData is used to access profiling information about
+// a virtual call for which we collect type information about
+// arguments and return value.
+class VirtualCallTypeData : public VirtualCallData {
+private:
+  // entries for arguments if any
+  TypeStackSlotEntries _args;
+  // entry for return type if any
+  ReturnTypeEntry _ret;
+
+  int cell_count_global_offset() const {
+    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
+  }
+
+  // number of cells not counting the header
+  int cell_count_no_header() const {
+    return uint_at(cell_count_global_offset());
+  }
+
+  void check_number_of_arguments(int total) {
+    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
+  }
+
+protected:
+  // An entry for a return value takes less space than an entry for an
+  // argument so if the number of cells exceeds the number of cells
+  // needed for an argument, this object contains type information for
+  // at least one argument.
+  bool has_arguments() const {
+    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
+    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
+    return res;
+  }
+
+public:
+  VirtualCallTypeData(DataLayout* layout) :
+    VirtualCallData(layout),
+    _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
+    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
+  {
+    assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
+    // Some compilers (VC++) don't want this passed in member initialization list
+    _args.set_profile_data(this);
+    _ret.set_profile_data(this);
+  }
+
+  const TypeStackSlotEntries* args() const {
+    assert(has_arguments(), "no profiling of arguments");
+    return &_args;
+  }
+
+  const ReturnTypeEntry* ret() const {
+    assert(has_return(), "no profiling of return value");
+    return &_ret;
+  }
+
+  virtual bool is_VirtualCallTypeData() const { return true; }
+
+  static int static_cell_count() {
+    return -1;
+  }
+
+  static int compute_cell_count(BytecodeStream* stream) {
+    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
+  }
+
+  static void initialize(DataLayout* dl, int cell_count) {
+    TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
+  }
+
+  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
+
+  virtual int cell_count() const {
+    return VirtualCallData::static_cell_count() +
+      TypeEntriesAtCall::header_cell_count() +
+      int_at_unchecked(cell_count_global_offset());
+  }
+
+  int number_of_arguments() const {
+    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
+  }
+
+  void set_argument_type(int i, Klass* k) {
+    assert(has_arguments(), "no arguments!");
+    intptr_t current = _args.type(i);
+    _args.set_type(i, TypeEntries::with_status(k, current));
+  }
+
+  void set_return_type(Klass* k) {
+    assert(has_return(), "no return!");
+    intptr_t current = _ret.type();
+    _ret.set_type(TypeEntries::with_status(k, current));
+  }
+
+  // An entry for a return value takes less space than an entry for an
+  // argument, so if the remainder of the number of cells divided by
+  // the number of cells for an argument is not null, a return value
+  // is profiled in this object.
+  bool has_return() const {
+    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
+    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
+    return res;
+  }
+
+  // Code generation support
+  static ByteSize args_data_offset() {
+    return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
+  }
+
+  // GC support
+  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
+    ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
+    if (has_arguments()) {
+      _args.clean_weak_klass_links(is_alive_closure);
+    }
+    if (has_return()) {
+      _ret.clean_weak_klass_links(is_alive_closure);
+    }
+  }
+
+#ifndef PRODUCT
+  virtual void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -797,7 +1354,7 @@
     assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
   }
 
-  virtual bool is_RetData() { return true; }
+  virtual bool is_RetData() const { return true; }
 
   enum {
     no_bci = -1 // value of bci when bci1/2 are not in use.
@@ -807,7 +1364,7 @@
     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -825,13 +1382,13 @@
   }
 
   // Direct accessors
-  int bci(uint row) {
+  int bci(uint row) const {
     return int_at(bci_cell_index(row));
   }
-  uint bci_count(uint row) {
+  uint bci_count(uint row) const {
     return uint_at(bci_count_cell_index(row));
   }
-  int bci_displacement(uint row) {
+  int bci_displacement(uint row) const {
     return int_at(bci_displacement_cell_index(row));
   }
 
@@ -853,7 +1410,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -878,18 +1435,18 @@
     assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
   }
 
-  virtual bool is_BranchData() { return true; }
+  virtual bool is_BranchData() const { return true; }
 
   static int static_cell_count() {
     return branch_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
   // Direct accessor
-  uint not_taken() {
+  uint not_taken() const {
     return uint_at(not_taken_off_set);
   }
 
@@ -917,7 +1474,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -935,15 +1492,15 @@
     array_start_off_set
   };
 
-  uint array_uint_at(int index) {
+  uint array_uint_at(int index) const {
     int aindex = index + array_start_off_set;
     return uint_at(aindex);
   }
-  int array_int_at(int index) {
+  int array_int_at(int index) const {
     int aindex = index + array_start_off_set;
     return int_at(aindex);
   }
-  oop array_oop_at(int index) {
+  oop array_oop_at(int index) const {
     int aindex = index + array_start_off_set;
     return oop_at(aindex);
   }
@@ -960,17 +1517,17 @@
 public:
   ArrayData(DataLayout* layout) : ProfileData(layout) {}
 
-  virtual bool is_ArrayData() { return true; }
+  virtual bool is_ArrayData() const { return true; }
 
   static int static_cell_count() {
     return -1;
   }
 
-  int array_len() {
+  int array_len() const {
     return int_at_unchecked(array_len_off_set);
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return array_len() + 1;
   }
 
@@ -1017,29 +1574,29 @@
     assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
   }
 
-  virtual bool is_MultiBranchData() { return true; }
+  virtual bool is_MultiBranchData() const { return true; }
 
   static int compute_cell_count(BytecodeStream* stream);
 
-  int number_of_cases() {
+  int number_of_cases() const {
     int alen = array_len() - 2; // get rid of default case here.
     assert(alen % per_case_cell_count == 0, "must be even");
     return (alen / per_case_cell_count);
   }
 
-  uint default_count() {
+  uint default_count() const {
     return array_uint_at(default_count_off_set);
   }
-  int default_displacement() {
+  int default_displacement() const {
     return array_int_at(default_disaplacement_off_set);
   }
 
-  uint count_at(int index) {
+  uint count_at(int index) const {
     return array_uint_at(case_array_start +
                          index * per_case_cell_count +
                          relative_count_off_set);
   }
-  int displacement_at(int index) {
+  int displacement_at(int index) const {
     return array_int_at(case_array_start +
                         index * per_case_cell_count +
                         relative_displacement_off_set);
@@ -1074,7 +1631,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -1085,14 +1642,14 @@
     assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
   }
 
-  virtual bool is_ArgInfoData() { return true; }
+  virtual bool is_ArgInfoData() const { return true; }
 
 
-  int number_of_args() {
+  int number_of_args() const {
     return array_len();
   }
 
-  uint arg_modified(int arg) {
+  uint arg_modified(int arg) const {
     return array_uint_at(arg);
   }
 
@@ -1101,7 +1658,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -1271,6 +1828,21 @@
   // return the argument info cell
   ArgInfoData *arg_info();
 
+  enum {
+    no_type_profile = 0,
+    type_profile_jsr292 = 1,
+    type_profile_all = 2
+  };
+
+  static bool profile_jsr292(methodHandle m, int bci);
+  static int profile_arguments_flag();
+  static bool profile_arguments_jsr292_only();
+  static bool profile_all_arguments();
+  static bool profile_arguments_for_invoke(methodHandle m, int bci);
+  static int profile_return_flag();
+  static bool profile_all_return();
+  static bool profile_return_for_invoke(methodHandle m, int bci);
+
 public:
   static int header_size() {
     return sizeof(MethodData)/wordSize;
@@ -1510,6 +2082,10 @@
   // verification
   void verify_on(outputStream* st);
   void verify_data_on(outputStream* st);
+
+  static bool profile_arguments();
+  static bool profile_return();
+  static bool profile_return_jsr292_only();
 };
 
 #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -197,6 +197,7 @@
 // negative filter: should callee NOT be inlined?
 bool InlineTree::should_not_inline(ciMethod *callee_method,
                                    ciMethod* caller_method,
+                                   JVMState* jvms,
                                    WarmCallInfo* wci_result) {
 
   const char* fail_msg = NULL;
@@ -226,7 +227,7 @@
     // don't inline exception code unless the top method belongs to an
     // exception class
     if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
-      ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method();
+      ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
       if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
         wci_result->set_profit(wci_result->profit() * 0.1);
       }
@@ -328,7 +329,7 @@
 // return true if ok
 // Relocated from "InliningClosure::try_to_inline"
 bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
-                               int caller_bci, ciCallProfile& profile,
+                               int caller_bci, JVMState* jvms, ciCallProfile& profile,
                                WarmCallInfo* wci_result, bool& should_delay) {
 
    // Old algorithm had funny accumulating BC-size counters
@@ -346,7 +347,7 @@
                      wci_result)) {
     return false;
   }
-  if (should_not_inline(callee_method, caller_method, wci_result)) {
+  if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
     return false;
   }
 
@@ -397,24 +398,35 @@
   }
 
   // detect direct and indirect recursive inlining
-  if (!callee_method->is_compiled_lambda_form()) {
+  {
     // count the current method and the callee
-    int inline_level = (method() == callee_method) ? 1 : 0;
-    if (inline_level > MaxRecursiveInlineLevel) {
-      set_msg("recursively inlining too deep");
-      return false;
+    const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
+    int inline_level = 0;
+    if (!is_compiled_lambda_form) {
+      if (method() == callee_method) {
+        inline_level++;
+      }
     }
     // count callers of current method and callee
-    JVMState* jvms = caller_jvms();
-    while (jvms != NULL && jvms->has_method()) {
-      if (jvms->method() == callee_method) {
-        inline_level++;
-        if (inline_level > MaxRecursiveInlineLevel) {
-          set_msg("recursively inlining too deep");
-          return false;
+    Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
+    for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
+      if (j->method() == callee_method) {
+        if (is_compiled_lambda_form) {
+          // Since compiled lambda forms are heavily reused we allow recursive inlining.  If it is truly
+          // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
+          // compiler stack.
+          Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
+          if (caller_argument0 == callee_argument0) {
+            inline_level++;
+          }
+        } else {
+          inline_level++;
         }
       }
-      jvms = jvms->caller();
+    }
+    if (inline_level > MaxRecursiveInlineLevel) {
+      set_msg("recursive inlining is too deep");
+      return false;
     }
   }
 
@@ -536,7 +548,7 @@
   // Check if inlining policy says no.
   WarmCallInfo wci = *(initial_wci);
   bool success = try_to_inline(callee_method, caller_method, caller_bci,
-                               profile, &wci, should_delay);
+                               jvms, profile, &wci, should_delay);
 
 #ifndef PRODUCT
   if (UseOldInlining && InlineWarmCalls
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -44,9 +44,6 @@
 # include "adfiles/ad_ppc.hpp"
 #endif
 
-
-volatile int C2Compiler::_runtimes = uninitialized;
-
 // register information defined by ADLC
 extern const char register_save_policy[];
 extern const int  register_save_type[];
@@ -57,7 +54,7 @@
 const char* C2Compiler::retry_no_escape_analysis() {
   return "retry without escape analysis";
 }
-void C2Compiler::initialize_runtime() {
+bool C2Compiler::init_c2_runtime() {
 
   // Check assumptions used while running ADLC
   Compile::adlc_verification();
@@ -90,41 +87,31 @@
 
   CompilerThread* thread = CompilerThread::current();
 
-  HandleMark  handle_mark(thread);
-
-  OptoRuntime::generate(thread->env());
-
+  HandleMark handle_mark(thread);
+  return OptoRuntime::generate(thread->env());
 }
 
 
 void C2Compiler::initialize() {
-
-  // This method can only be called once per C2Compiler object
   // The first compiler thread that gets here will initialize the
-  // small amount of global state (and runtime stubs) that c2 needs.
+  // small amount of global state (and runtime stubs) that C2 needs.
 
   // There is a race possible once at startup and then we're fine
 
   // Note that this is being called from a compiler thread not the
   // main startup thread.
-
-  if (_runtimes != initialized) {
-    initialize_runtimes( initialize_runtime, &_runtimes);
+  if (should_perform_init()) {
+    bool successful = C2Compiler::init_c2_runtime();
+    int new_state = (successful) ? initialized : failed;
+    set_state(new_state);
   }
-
-  // Mark this compiler object as ready to roll
-  mark_initialized();
 }
 
-void C2Compiler::compile_method(ciEnv* env,
-                                ciMethod* target,
-                                int entry_bci) {
-  if (!is_initialized()) {
-    initialize();
-  }
+void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
+  assert(is_initialized(), "Compiler thread must be initialized");
+
   bool subsume_loads = SubsumeLoads;
-  bool do_escape_analysis = DoEscapeAnalysis &&
-    !env->jvmti_can_access_local_variables();
+  bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables();
   bool eliminate_boxing = EliminateAutoBox;
   while (!env->failing()) {
     // Attempt to compile while subsuming loads into machine instructions.
--- a/hotspot/src/share/vm/opto/c2compiler.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/c2compiler.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -28,24 +28,17 @@
 #include "compiler/abstractCompiler.hpp"
 
 class C2Compiler : public AbstractCompiler {
-private:
-
-  static void initialize_runtime();
+ private:
+  static bool init_c2_runtime();
 
 public:
   // Name
   const char *name() { return "C2"; }
 
-  static volatile int _runtimes;
-
 #ifdef TIERED
   virtual bool is_c2() { return true; };
 #endif // TIERED
 
-  // Customization
-  bool needs_adapters         () { return true; }
-  bool needs_stubs            () { return true; }
-
   void initialize();
 
   // Compilation entry point for methods
--- a/hotspot/src/share/vm/opto/chaitin.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -52,6 +52,7 @@
 class LRG : public ResourceObj {
   friend class VMStructs;
 public:
+  static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions
   enum { SPILL_REG=29999 };     // Register number of a spilled LRG
 
   double _cost;                 // 2 for loads/1 for stores times block freq
@@ -80,14 +81,21 @@
 private:
   uint _eff_degree;             // Effective degree: Sum of neighbors _num_regs
 public:
-  int degree() const { assert( _degree_valid, "" ); return _eff_degree; }
+  int degree() const { assert( _degree_valid , "" ); return _eff_degree; }
   // Degree starts not valid and any change to the IFG neighbor
   // set makes it not valid.
-  void set_degree( uint degree ) { _eff_degree = degree; debug_only(_degree_valid = 1;) }
+  void set_degree( uint degree ) {
+    _eff_degree = degree;
+    debug_only(_degree_valid = 1;)
+    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
+  }
   // Made a change that hammered degree
   void invalid_degree() { debug_only(_degree_valid=0;) }
   // Incrementally modify degree.  If it was correct, it should remain correct
-  void inc_degree( uint mod ) { _eff_degree += mod; }
+  void inc_degree( uint mod ) {
+    _eff_degree += mod;
+    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
+  }
   // Compute the degree between 2 live ranges
   int compute_degree( LRG &l ) const;
 
@@ -95,9 +103,9 @@
   RegMask _mask;                // Allowed registers for this LRG
   uint _mask_size;              // cache of _mask.Size();
 public:
-  int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); }
+  int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); }
   void set_mask_size( int size ) {
-    assert((size == 65535) || (size == (int)_mask.Size()), "");
+    assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), "");
     _mask_size = size;
 #ifdef ASSERT
     _msize_valid=1;
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -47,6 +47,7 @@
 #include "opto/machnode.hpp"
 #include "opto/macro.hpp"
 #include "opto/matcher.hpp"
+#include "opto/mathexactnode.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/node.hpp"
@@ -2986,6 +2987,32 @@
       n->set_req(MemBarNode::Precedent, top());
     }
     break;
+    // Must set a control edge on all nodes that produce a FlagsProj
+    // so they can't escape the block that consumes the flags.
+    // Must also set the non throwing branch as the control
+    // for all nodes that depends on the result. Unless the node
+    // already have a control that isn't the control of the
+    // flag producer
+  case Op_FlagsProj:
+    {
+      MathExactNode* math = (MathExactNode*)  n->in(0);
+      Node* ctrl = math->control_node();
+      Node* non_throwing = math->non_throwing_branch();
+      math->set_req(0, ctrl);
+
+      Node* result = math->result_node();
+      if (result != NULL) {
+        for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
+          Node* out = result->fast_out(j);
+          if (out->in(0) == NULL) {
+            out->set_req(0, non_throwing);
+          } else if (out->in(0) == ctrl) {
+            out->set_req(0, non_throwing);
+          }
+        }
+      }
+    }
+    break;
   default:
     assert( !n->is_Call(), "" );
     assert( !n->is_Mem(), "" );
--- a/hotspot/src/share/vm/opto/escape.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/escape.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -780,6 +780,7 @@
       }
     } else {  // Allocate instance
       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
+          cik->is_subclass_of(_compile->env()->Reference_klass()) ||
          !cik->is_instance_klass() || // StressReflectiveCode
           cik->as_instance_klass()->has_finalizer()) {
         es = PointsToNode::GlobalEscape;
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -2122,7 +2122,7 @@
 // Null check oop.  Set null-path control into Region in slot 3.
 // Make a cast-not-nullness use the other not-null control.  Return cast.
 Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
-                               bool never_see_null) {
+                               bool never_see_null, bool safe_for_replace) {
   // Initial NULL check taken path
   (*null_control) = top();
   Node* cast = null_check_common(value, T_OBJECT, false, null_control);
@@ -2140,6 +2140,9 @@
                   Deoptimization::Action_make_not_entrant);
     (*null_control) = top();    // NULL path is dead
   }
+  if ((*null_control) == top() && safe_for_replace) {
+    replace_in_map(value, cast);
+  }
 
   // Cast away null-ness on the result
   return cast;
@@ -2634,15 +2637,17 @@
   C->set_has_split_ifs(true); // Has chance for split-if optimization
 
   ciProfileData* data = NULL;
+  bool safe_for_replace = false;
   if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
     data = method()->method_data()->bci_to_data(bci());
+    safe_for_replace = true;
   }
   bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
                          && seems_never_null(obj, data));
 
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
-  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
+  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
 
   // If not_null_obj is dead, only null-path is taken
   if (stopped()) {              // Doing instance-of on a NULL?
@@ -2723,11 +2728,13 @@
   }
 
   ciProfileData* data = NULL;
+  bool safe_for_replace = false;
   if (failure_control == NULL) {        // use MDO in regular case only
     assert(java_bc() == Bytecodes::_aastore ||
            java_bc() == Bytecodes::_checkcast,
            "interpreter profiles type checks only for these BCs");
     data = method()->method_data()->bci_to_data(bci());
+    safe_for_replace = true;
   }
 
   // Make the merge point
@@ -2742,7 +2749,7 @@
 
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
-  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
+  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
 
   // If not_null_obj is dead, only null-path is taken
   if (stopped()) {              // Doing instance-of on a NULL?
@@ -3608,7 +3615,7 @@
   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 
   // if (!marking)
-  __ if_then(marking, BoolTest::ne, zero); {
+  __ if_then(marking, BoolTest::ne, zero, unlikely); {
     BasicType index_bt = TypeX_X->basic_type();
     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -378,8 +378,10 @@
   // Return a cast-not-null node which depends on the not-null control.
   // If never_see_null, use an uncommon trap (*null_control sees a top).
   // The cast is not valid along the null path; keep a copy of the original.
+  // If safe_for_replace, then we can replace the value with the cast
+  // in the parsing map (the cast is guaranteed to dominate the map)
   Node* null_check_oop(Node* value, Node* *null_control,
-                       bool never_see_null = false);
+                       bool never_see_null = false, bool safe_for_replace = false);
 
   // Check the null_seen bit.
   bool seems_never_null(Node* obj, ciProfileData* data);
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -616,7 +616,11 @@
       buffer[0] = 0;
       _chaitin->dump_register(node, buffer);
       print_prop("reg", buffer);
-      print_prop("lrg", _chaitin->_lrg_map.live_range_id(node));
+      uint lrg_id = 0;
+      if (node->_idx < _chaitin->_lrg_map.size()) {
+        lrg_id = _chaitin->_lrg_map.live_range_id(node);
+      }
+      print_prop("lrg", lrg_id);
     }
 
     node->_in_dump_cnt--;
--- a/hotspot/src/share/vm/opto/ifg.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -677,7 +677,7 @@
             } else {            // Common case: size 1 bound removal
               if( lrg.mask().Member(r_reg) ) {
                 lrg.Remove(r_reg);
-                lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1);
+                lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
               }
             }
             // If 'l' goes completely dry, it must spill.
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -689,6 +689,7 @@
         ctrl->in(0)->in(1)->is_Bool() &&
         ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI &&
         ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() &&
+        ctrl->in(0)->in(1)->in(1)->in(2) != phase->C->top() &&
         ctrl->in(0)->in(1)->in(1)->in(1) == n) {
       IfNode* dom_iff = ctrl->in(0)->as_If();
       Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con);
--- a/hotspot/src/share/vm/opto/mathexactnode.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/mathexactnode.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -25,9 +25,10 @@
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
 #include "opto/addnode.hpp"
+#include "opto/cfgnode.hpp"
 #include "opto/machnode.hpp"
+#include "opto/matcher.hpp"
 #include "opto/mathexactnode.hpp"
-#include "opto/matcher.hpp"
 #include "opto/subnode.hpp"
 
 MathExactNode::MathExactNode(Node* ctrl, Node* n1, Node* n2) : MultiNode(3) {
@@ -36,6 +37,33 @@
   init_req(2, n2);
 }
 
+BoolNode* MathExactNode::bool_node() const {
+  Node* flags = flags_node();
+  BoolNode* boolnode = flags->unique_out()->as_Bool();
+  assert(boolnode != NULL, "must have BoolNode");
+  return boolnode;
+}
+
+IfNode* MathExactNode::if_node() const {
+  BoolNode* boolnode = bool_node();
+  IfNode* ifnode = boolnode->unique_out()->as_If();
+  assert(ifnode != NULL, "must have IfNode");
+  return ifnode;
+}
+
+Node* MathExactNode::control_node() const {
+  IfNode* ifnode = if_node();
+  return ifnode->in(0);
+}
+
+Node* MathExactNode::non_throwing_branch() const {
+  IfNode* ifnode = if_node();
+  if (bool_node()->_test._test == BoolTest::overflow) {
+    return ifnode->proj_out(0);
+  }
+  return ifnode->proj_out(1);
+}
+
 Node* AddExactINode::match(const ProjNode* proj, const Matcher* m) {
   uint ideal_reg = proj->ideal_reg();
   RegMask rm;
@@ -62,15 +90,15 @@
     }
 
     if (flags != NULL) {
-      BoolNode* bolnode = (BoolNode *) flags->unique_out();
-      switch (bolnode->_test._test) {
+      BoolNode* boolnode = bool_node();
+      switch (boolnode->_test._test) {
         case BoolTest::overflow:
           // if the check is for overflow - never taken
-          igvn->replace_node(bolnode, phase->intcon(0));
+          igvn->replace_node(boolnode, phase->intcon(0));
           break;
         case BoolTest::no_overflow:
           // if the check is for no overflow - always taken
-          igvn->replace_node(bolnode, phase->intcon(1));
+          igvn->replace_node(boolnode, phase->intcon(1));
           break;
         default:
           fatal("Unexpected value of BoolTest");
--- a/hotspot/src/share/vm/opto/mathexactnode.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/mathexactnode.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -27,8 +27,11 @@
 
 #include "opto/multnode.hpp"
 #include "opto/node.hpp"
+#include "opto/subnode.hpp"
 #include "opto/type.hpp"
 
+class BoolNode;
+class IfNode;
 class Node;
 
 class PhaseGVN;
@@ -49,9 +52,13 @@
   virtual bool is_CFG() const { return false; }
   virtual uint ideal_reg() const { return NotAMachineReg; }
 
-  ProjNode* result_node() { return proj_out(result_proj_node); }
-  ProjNode* flags_node() { return proj_out(flags_proj_node); }
+  ProjNode* result_node() const { return proj_out(result_proj_node); }
+  ProjNode* flags_node() const { return proj_out(flags_proj_node); }
+  Node* control_node() const;
+  Node* non_throwing_branch() const;
 protected:
+  IfNode* if_node() const;
+  BoolNode* bool_node() const;
   Node* no_overflow(PhaseGVN *phase, Node* new_result);
 };
 
--- a/hotspot/src/share/vm/opto/parse.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/parse.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -73,6 +73,7 @@
   bool        try_to_inline(ciMethod* callee_method,
                             ciMethod* caller_method,
                             int caller_bci,
+                            JVMState* jvms,
                             ciCallProfile& profile,
                             WarmCallInfo* wci_result,
                             bool& should_delay);
@@ -83,6 +84,7 @@
                             WarmCallInfo* wci_result);
   bool        should_not_inline(ciMethod* callee_method,
                                 ciMethod* caller_method,
+                                JVMState* jvms,
                                 WarmCallInfo* wci_result);
   void        print_inlining(ciMethod* callee_method, int caller_bci,
                              bool success) const;
--- a/hotspot/src/share/vm/opto/parse2.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -268,7 +268,7 @@
     return adjoinRange(value, value, dest, table_index);
   }
 
-  void print(ciEnv* env) {
+  void print() {
     if (is_singleton())
       tty->print(" {%d}=>%d", lo(), dest());
     else if (lo() == min_jint)
@@ -471,8 +471,8 @@
   // These are the switch destinations hanging off the jumpnode
   int i = 0;
   for (SwitchRange* r = lo; r <= hi; r++) {
-    for (int j = r->lo(); j <= r->hi(); j++, i++) {
-      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), j - lowval));
+    for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
+      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
       {
         PreserveJVMState pjvms(this);
         set_control(input);
@@ -632,7 +632,7 @@
     }
     tty->print("   ");
     for( r = lo; r <= hi; r++ ) {
-      r->print(env());
+      r->print();
     }
     tty->print_cr("");
   }
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -343,10 +343,14 @@
 
   // Get the Method* node.
   ciMethod* m = method();
-  address counters_adr = m->ensure_method_counters();
+  MethodCounters* counters_adr = m->ensure_method_counters();
+  if (counters_adr == NULL) {
+    C->record_failure("method counters allocation failed");
+    return;
+  }
 
   Node* ctrl = control();
-  const TypePtr* adr_type = TypeRawPtr::make(counters_adr);
+  const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
   Node *counters_node = makecon(adr_type);
   Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
     MethodCounters::interpreter_invocation_counter_offset_in_bytes());
--- a/hotspot/src/share/vm/opto/reg_split.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/reg_split.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -375,6 +375,7 @@
       }
 
       if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) {
+        assert(Reachblock != NULL, "Reachblock must be non-NULL");
         Node *rdef = Reachblock[lrg2reach[lidx]];
         if (rdef) {
           spill->set_req(i, rdef);
@@ -1336,7 +1337,8 @@
                _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
           insert--;
         }
-        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
+        // since the def cannot contain any live range input, we can pass in NULL as Reachlock parameter
+        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, NULL, false);
         if (!def) {
           return 0;    // Bail out
         }
--- a/hotspot/src/share/vm/opto/runtime.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -138,9 +138,10 @@
 
 
 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \
-  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc)
+  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \
+  if (var == NULL) { return false; }
 
-void OptoRuntime::generate(ciEnv* env) {
+bool OptoRuntime::generate(ciEnv* env) {
 
   generate_exception_blob();
 
@@ -158,7 +159,7 @@
   gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
   gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
-  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
+  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
   gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , false, true );
 
   gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false, false);
@@ -168,7 +169,7 @@
   gen(env, _zap_dead_Java_locals_Java      , zap_dead_locals_Type         , zap_dead_Java_locals_C          ,    0 , false, true , false );
   gen(env, _zap_dead_native_locals_Java    , zap_dead_locals_Type         , zap_dead_native_locals_C        ,    0 , false, true , false );
 # endif
-
+  return true;
 }
 
 #undef gen
@@ -976,30 +977,36 @@
   address handler_address = NULL;
 
   Handle exception(thread, thread->exception_oop());
+  address pc = thread->exception_pc();
+
+  // Clear out the exception oop and pc since looking up an
+  // exception handler can cause class loading, which might throw an
+  // exception and those fields are expected to be clear during
+  // normal bytecode execution.
+  thread->clear_exception_oop_and_pc();
 
   if (TraceExceptions) {
-    trace_exception(exception(), thread->exception_pc(), "");
+    trace_exception(exception(), pc, "");
   }
+
   // for AbortVMOnException flag
   NOT_PRODUCT(Exceptions::debug_check_abort(exception));
 
-  #ifdef ASSERT
-    if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
-      // should throw an exception here
-      ShouldNotReachHere();
-    }
-  #endif
-
+#ifdef ASSERT
+  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
+    // should throw an exception here
+    ShouldNotReachHere();
+  }
+#endif
 
   // new exception handling: this method is entered only from adapters
   // exceptions from compiled java methods are handled in compiled code
   // using rethrow node
 
-  address pc = thread->exception_pc();
   nm = CodeCache::find_nmethod(pc);
   assert(nm != NULL, "No NMethod found");
   if (nm->is_native_method()) {
-    fatal("Native mathod should not have path to exception handling");
+    fatal("Native method should not have path to exception handling");
   } else {
     // we are switching to old paradigm: search for exception handler in caller_frame
     // instead in exception handler of caller_frame.sender()
@@ -1346,7 +1353,8 @@
   tty->print(" in ");
   CodeBlob* blob = CodeCache::find_blob(exception_pc);
   if (blob->is_nmethod()) {
-    ((nmethod*)blob)->method()->print_value();
+    nmethod* nm = blob->as_nmethod_or_null();
+    nm->method()->print_value();
   } else if (blob->is_runtime_stub()) {
     tty->print("<runtime-stub>");
   } else {
--- a/hotspot/src/share/vm/opto/runtime.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -203,8 +203,10 @@
 
   static bool is_callee_saved_register(MachRegisterNumbers reg);
 
-  // One time only generate runtime code stubs
-  static void generate(ciEnv* env);
+  // One time only generate runtime code stubs. Returns true
+  // when runtime stubs have been generated successfully and
+  // false otherwise.
+  static bool generate(ciEnv* env);
 
   // Returns the name of a stub
   static const char* stub_name(address entry);
--- a/hotspot/src/share/vm/opto/stringopts.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/stringopts.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,11 @@
   Node*               _arguments;      // The list of arguments to be concatenated
   GrowableArray<int>  _mode;           // into a String along with a mode flag
                                        // indicating how to treat the value.
-
+  Node_List           _constructors;   // List of constructors (many in case of stacked concat)
   Node_List           _control;        // List of control nodes that will be deleted
   Node_List           _uncommon_traps; // Uncommon traps that needs to be rewritten
                                        // to restart at the initial JVMState.
+
  public:
   // Mode for converting arguments to Strings
   enum {
@@ -73,6 +74,7 @@
     _arguments->del_req(0);
   }
 
+  bool validate_mem_flow();
   bool validate_control_flow();
 
   void merge_add() {
@@ -189,6 +191,10 @@
     assert(!_control.contains(ctrl), "only push once");
     _control.push(ctrl);
   }
+  void add_constructor(Node* init) {
+    assert(!_constructors.contains(init), "only push once");
+    _constructors.push(init);
+  }
   CallStaticJavaNode* end() { return _end; }
   AllocateNode* begin() { return _begin; }
   Node* string_alloc() { return _string_alloc; }
@@ -301,6 +307,12 @@
     }
   }
   result->set_allocation(other->_begin);
+  for (uint i = 0; i < _constructors.size(); i++) {
+    result->add_constructor(_constructors.at(i));
+  }
+  for (uint i = 0; i < other->_constructors.size(); i++) {
+    result->add_constructor(other->_constructors.at(i));
+  }
   result->_multiple = true;
   return result;
 }
@@ -510,7 +522,8 @@
       sc->add_control(constructor);
       sc->add_control(alloc);
       sc->set_allocation(alloc);
-      if (sc->validate_control_flow()) {
+      sc->add_constructor(constructor);
+      if (sc->validate_control_flow() && sc->validate_mem_flow()) {
         return sc;
       } else {
         return NULL;
@@ -620,7 +633,7 @@
 #endif
 
             StringConcat* merged = sc->merge(other, arg);
-            if (merged->validate_control_flow()) {
+            if (merged->validate_control_flow() && merged->validate_mem_flow()) {
 #ifndef PRODUCT
               if (PrintOptimizeStringConcat) {
                 tty->print_cr("stacking would succeed");
@@ -708,6 +721,139 @@
 }
 
 
+bool StringConcat::validate_mem_flow() {
+  Compile* C = _stringopts->C;
+
+  for (uint i = 0; i < _control.size(); i++) {
+#ifndef PRODUCT
+    Node_List path;
+#endif
+    Node* curr = _control.at(i);
+    if (curr->is_Call() && curr != _begin) { // For all calls except the first allocation
+      // Now here's the main invariant in our case:
+      // For memory between the constructor, and appends, and toString we should only see bottom memory,
+      // produced by the previous call we know about.
+      if (!_constructors.contains(curr)) {
+        NOT_PRODUCT(path.push(curr);)
+        Node* mem = curr->in(TypeFunc::Memory);
+        assert(mem != NULL, "calls should have memory edge");
+        assert(!mem->is_Phi(), "should be handled by control flow validation");
+        NOT_PRODUCT(path.push(mem);)
+        while (mem->is_MergeMem()) {
+          for (uint i = 1; i < mem->req(); i++) {
+            if (i != Compile::AliasIdxBot && mem->in(i) != C->top()) {
+#ifndef PRODUCT
+              if (PrintOptimizeStringConcat) {
+                tty->print("fusion has incorrect memory flow (side effects) for ");
+                _begin->jvms()->dump_spec(tty); tty->cr();
+                path.dump();
+              }
+#endif
+              return false;
+            }
+          }
+          // skip through a potential MergeMem chain, linked through Bot
+          mem = mem->in(Compile::AliasIdxBot);
+          NOT_PRODUCT(path.push(mem);)
+        }
+        // now let it fall through, and see if we have a projection
+        if (mem->is_Proj()) {
+          // Should point to a previous known call
+          Node *prev = mem->in(0);
+          NOT_PRODUCT(path.push(prev);)
+          if (!prev->is_Call() || !_control.contains(prev)) {
+#ifndef PRODUCT
+            if (PrintOptimizeStringConcat) {
+              tty->print("fusion has incorrect memory flow (unknown call) for ");
+              _begin->jvms()->dump_spec(tty); tty->cr();
+              path.dump();
+            }
+#endif
+            return false;
+          }
+        } else {
+          assert(mem->is_Store() || mem->is_LoadStore(), err_msg_res("unexpected node type: %s", mem->Name()));
+#ifndef PRODUCT
+          if (PrintOptimizeStringConcat) {
+            tty->print("fusion has incorrect memory flow (unexpected source) for ");
+            _begin->jvms()->dump_spec(tty); tty->cr();
+            path.dump();
+          }
+#endif
+          return false;
+        }
+      } else {
+        // For memory that feeds into constructors it's more complicated.
+        // However the advantage is that any side effect that happens between the Allocate/Initialize and
+        // the constructor will have to be control-dependent on Initialize.
+        // So we actually don't have to do anything, since it's going to be caught by the control flow
+        // analysis.
+#ifdef ASSERT
+        // Do a quick verification of the control pattern between the constructor and the initialize node
+        assert(curr->is_Call(), "constructor should be a call");
+        // Go up the control starting from the constructor call
+        Node* ctrl = curr->in(0);
+        IfNode* iff = NULL;
+        RegionNode* copy = NULL;
+
+        while (true) {
+          // skip known check patterns
+          if (ctrl->is_Region()) {
+            if (ctrl->as_Region()->is_copy()) {
+              copy = ctrl->as_Region();
+              ctrl = copy->is_copy();
+            } else { // a cast
+              assert(ctrl->req() == 3 &&
+                     ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() &&
+                     ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() &&
+                     ctrl->in(1)->in(0) == ctrl->in(2)->in(0) &&
+                     ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(),
+                     "must be a simple diamond");
+              Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2);
+              for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) {
+                Node* use = i.get();
+                assert(use == ctrl || use->is_ConstraintCast(),
+                       err_msg_res("unexpected user: %s", use->Name()));
+              }
+
+              iff = ctrl->in(1)->in(0)->as_If();
+              ctrl = iff->in(0);
+            }
+          } else if (ctrl->is_IfTrue()) { // null checks, class checks
+            iff = ctrl->in(0)->as_If();
+            assert(iff->is_If(), "must be if");
+            // Verify that the other arm is an uncommon trap
+            Node* otherproj = iff->proj_out(1 - ctrl->as_Proj()->_con);
+            CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+            assert(strcmp(call->_name, "uncommon_trap") == 0, "must be uncommond trap");
+            ctrl = iff->in(0);
+          } else {
+            break;
+          }
+        }
+
+        assert(ctrl->is_Proj(), "must be a projection");
+        assert(ctrl->in(0)->is_Initialize(), "should be initialize");
+        for (SimpleDUIterator i(ctrl); i.has_next(); i.next()) {
+          Node* use = i.get();
+          assert(use == copy || use == iff || use == curr || use->is_CheckCastPP() || use->is_Load(),
+                 err_msg_res("unexpected user: %s", use->Name()));
+        }
+#endif // ASSERT
+      }
+    }
+  }
+
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat) {
+    tty->print("fusion has correct memory flow for ");
+    _begin->jvms()->dump_spec(tty); tty->cr();
+    tty->cr();
+  }
+#endif
+  return true;
+}
+
 bool StringConcat::validate_control_flow() {
   // We found all the calls and arguments now lets see if it's
   // safe to transform the graph as we would expect.
@@ -753,7 +899,7 @@
     }
   }
 
-  // Skip backwards through the control checking for unexpected contro flow
+  // Skip backwards through the control checking for unexpected control flow
   Node* ptr = _end;
   bool fail = false;
   while (ptr != _begin) {
@@ -936,7 +1082,7 @@
   if (PrintOptimizeStringConcat && !fail) {
     ttyLocker ttyl;
     tty->cr();
-    tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
+    tty->print("fusion has correct control flow (%d %d) for ", null_check_count, _uncommon_traps.size());
     _begin->jvms()->dump_spec(tty); tty->cr();
     for (int i = 0; i < num_arguments(); i++) {
       argument(i)->dump();
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -5059,6 +5059,7 @@
 void TestReserveMemorySpecial_test();
 void TestVirtualSpace_test();
 void TestMetaspaceAux_test();
+void TestMetachunk_test();
 #if INCLUDE_ALL_GCS
 void TestG1BiasedArray_test();
 #endif
@@ -5070,6 +5071,7 @@
     run_unit_test(TestReserveMemorySpecial_test());
     run_unit_test(TestVirtualSpace_test());
     run_unit_test(TestMetaspaceAux_test());
+    run_unit_test(TestMetachunk_test());
     run_unit_test(GlobalDefinitions::test_globals());
     run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -2694,8 +2694,9 @@
       FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size);
     // Xmaxf
     } else if (match_option(option, "-Xmaxf", &tail)) {
-      int maxf = (int)(atof(tail) * 100);
-      if (maxf < 0 || maxf > 100) {
+      char* err;
+      int maxf = (int)(strtod(tail, &err) * 100);
+      if (*err != '\0' || maxf < 0 || maxf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad max heap free percentage size: %s\n",
                     option->optionString);
@@ -2705,8 +2706,9 @@
       }
     // Xminf
     } else if (match_option(option, "-Xminf", &tail)) {
-      int minf = (int)(atof(tail) * 100);
-      if (minf < 0 || minf > 100) {
+      char* err;
+      int minf = (int)(strtod(tail, &err) * 100);
+      if (*err != '\0' || minf < 0 || minf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad min heap free percentage size: %s\n",
                     option->optionString);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1979,13 +1979,6 @@
   develop(uintx, MetadataAllocationFailALotInterval, 1000,                  \
           "Metadata allocation failure a lot interval")                     \
                                                                             \
-  develop(bool, MetaDataDeallocateALot, false,                              \
-          "Deallocation bunches of metadata at intervals controlled by "    \
-          "MetaDataAllocateALotInterval")                                   \
-                                                                            \
-  develop(uintx, MetaDataDeallocateALotInterval, 100,                       \
-          "Metadata deallocation alot interval")                            \
-                                                                            \
   develop(bool, TraceMetadataChunkAllocation, false,                        \
           "Trace chunk metadata allocations")                               \
                                                                             \
@@ -2175,7 +2168,7 @@
           "Minimum ratio of young generation/survivor space size")          \
                                                                             \
   product(uintx, InitialSurvivorRatio, 8,                                   \
-          "Initial ratio of eden/survivor space size")                      \
+          "Initial ratio of young generation/survivor space size")          \
                                                                             \
   product(uintx, BaseFootPrintEstimate, 256*M,                              \
           "Estimate of footprint other than Java Heap")                     \
@@ -2677,6 +2670,14 @@
   product(bool, AggressiveOpts, false,                                      \
           "Enable aggressive optimizations - see arguments.cpp")            \
                                                                             \
+  product_pd(uintx, TypeProfileLevel,                                       \
+          "=XY, with Y, Type profiling of arguments at call"                \
+          "          X, Type profiling of return value at call"             \
+          "X and Y in 0->off ; 1->js292 only; 2->all methods")              \
+                                                                            \
+  product(intx, TypeProfileArgsLimit,     2,                                \
+          "max number of call arguments to consider for type profiling")    \
+                                                                            \
   /* statistics */                                                          \
   develop(bool, CountCompiledCalls, false,                                  \
           "Count method invocations")                                       \
@@ -3125,10 +3126,14 @@
           "class pointers are used")                                        \
                                                                             \
   product(uintx, MinHeapFreeRatio,    40,                                   \
-          "The minimum percentage of heap free after GC to avoid expansion")\
+          "The minimum percentage of heap free after GC to avoid expansion."\
+          " For most GCs this applies to the old generation. In G1 it"      \
+          " applies to the whole heap. Not supported by ParallelGC.")       \
                                                                             \
   product(uintx, MaxHeapFreeRatio,    70,                                   \
-          "The maximum percentage of heap free after GC to avoid shrinking")\
+          "The maximum percentage of heap free after GC to avoid shrinking."\
+          " For most GCs this applies to the old generation. In G1 it"      \
+          " applies to the whole heap. Not supported by ParallelGC.")       \
                                                                             \
   product(intx, SoftRefLRUPolicyMSPerMB, 1000,                              \
           "Number of milliseconds per MB of free space in the heap")        \
@@ -3823,7 +3828,6 @@
   product(bool, UseLockedTracing, false,                                    \
           "Use locked-tracing when doing event-based tracing")
 
-
 /*
  *  Macros for factoring of globals
  */
--- a/hotspot/src/share/vm/runtime/java.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/java.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -183,6 +183,7 @@
   collected_profiled_methods->sort(&compare_methods);
 
   int count = collected_profiled_methods->length();
+  int total_size = 0;
   if (count > 0) {
     for (int index = 0; index < count; index++) {
       Method* m = collected_profiled_methods->at(index);
@@ -190,10 +191,13 @@
       tty->print_cr("------------------------------------------------------------------------");
       //m->print_name(tty);
       m->print_invocation_count();
+      tty->print_cr("  mdo size: %d bytes", m->method_data()->size_in_bytes());
       tty->cr();
       m->print_codes();
+      total_size += m->method_data()->size_in_bytes();
     }
     tty->print_cr("------------------------------------------------------------------------");
+    tty->print_cr("Total MDO size: %d bytes", total_size);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/signature.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/signature.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -378,6 +378,16 @@
   return result;
 }
 
+int SignatureStream::reference_parameter_count() {
+  int args_count = 0;
+  for ( ; !at_return_type(); next()) {
+    if (is_object()) {
+      args_count++;
+    }
+  }
+  return args_count;
+}
+
 bool SignatureVerifier::is_valid_signature(Symbol* sig) {
   const char* signature = (const char*)sig->bytes();
   ssize_t len = sig->utf8_length();
--- a/hotspot/src/share/vm/runtime/signature.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/signature.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -401,6 +401,9 @@
 
   // return same as_symbol except allocation of new symbols is avoided.
   Symbol* as_symbol_or_null();
+
+  // count the number of references in the signature
+  int reference_parameter_count();
 };
 
 class SignatureVerifier : public StackObj {
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1454,7 +1454,6 @@
   _interp_only_mode    = 0;
   _special_runtime_exit_condition = _no_async_condition;
   _pending_async_exception = NULL;
-  _is_compiling = false;
   _thread_stat = NULL;
   _thread_stat = new ThreadStatistics();
   _blocked_on_compilation = false;
@@ -1815,7 +1814,8 @@
     // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
     // the execution of the method. If that is not enough, then we don't really care. Thread.stop
     // is deprecated anyhow.
-    { int count = 3;
+    if (!is_Compiler_thread()) {
+      int count = 3;
       while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
         EXCEPTION_MARK;
         JavaValue result(T_VOID);
@@ -1828,7 +1828,6 @@
         CLEAR_PENDING_EXCEPTION;
       }
     }
-
     // notify JVMTI
     if (JvmtiExport::should_post_thread_life()) {
       JvmtiExport::post_thread_end(this);
@@ -3239,6 +3238,7 @@
   _counters = counters;
   _buffer_blob = NULL;
   _scanned_nmethod = NULL;
+  _compiler = NULL;
 
 #ifndef PRODUCT
   _ideal_graph_printer = NULL;
@@ -3255,6 +3255,7 @@
   }
 }
 
+
 // ======= Threads ========
 
 // The Threads class links together all active threads, and provides
@@ -3275,8 +3276,6 @@
 // All JavaThreads
 #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
 
-void os_stream();
-
 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
 void Threads::threads_do(ThreadClosure* tc) {
   assert_locked_or_safepoint(Threads_lock);
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -923,9 +923,6 @@
   volatile address _exception_handler_pc;        // PC for handler of exception
   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 
-  // support for compilation
-  bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
-
   // support for JNI critical regions
   jint    _jni_active_critical;                  // count of entries into JNI critical region
 
@@ -1005,10 +1002,6 @@
   // Testers
   virtual bool is_Java_thread() const            { return true;  }
 
-  // compilation
-  void set_is_compiling(bool f)                  { _is_compiling = f; }
-  bool is_compiling() const                      { return _is_compiling; }
-
   // Thread chain operations
   JavaThread* next() const                       { return _next; }
   void set_next(JavaThread* p)                   { _next = p; }
@@ -1283,6 +1276,11 @@
   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
   void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
 
+  void clear_exception_oop_and_pc() {
+    set_exception_oop(NULL);
+    set_exception_pc(NULL);
+  }
+
   // Stack overflow support
   inline size_t stack_available(address cur_sp);
   address stack_yellow_zone_base()
@@ -1811,13 +1809,14 @@
  private:
   CompilerCounters* _counters;
 
-  ciEnv*        _env;
-  CompileLog*   _log;
-  CompileTask*  _task;
-  CompileQueue* _queue;
-  BufferBlob*   _buffer_blob;
+  ciEnv*            _env;
+  CompileLog*       _log;
+  CompileTask*      _task;
+  CompileQueue*     _queue;
+  BufferBlob*       _buffer_blob;
 
-  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
+  nmethod*          _scanned_nmethod;  // nmethod being scanned by the sweeper
+  AbstractCompiler* _compiler;
 
  public:
 
@@ -1829,14 +1828,17 @@
   // Hide this compiler thread from external view.
   bool is_hidden_from_external_view() const      { return true; }
 
-  CompileQueue* queue()                          { return _queue; }
-  CompilerCounters* counters()                   { return _counters; }
+  void set_compiler(AbstractCompiler* c)         { _compiler = c; }
+  AbstractCompiler* compiler() const             { return _compiler; }
+
+  CompileQueue* queue()        const             { return _queue; }
+  CompilerCounters* counters() const             { return _counters; }
 
   // Get/set the thread's compilation environment.
   ciEnv*        env()                            { return _env; }
   void          set_env(ciEnv* env)              { _env = env; }
 
-  BufferBlob*   get_buffer_blob()                { return _buffer_blob; }
+  BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
   void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; };
 
   // Get/set the thread's logging information
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -58,7 +58,7 @@
 #include "memory/generation.hpp"
 #include "memory/generationSpec.hpp"
 #include "memory/heap.hpp"
-#include "memory/metablock.hpp"
+#include "memory/metachunk.hpp"
 #include "memory/referenceType.hpp"
 #include "memory/space.hpp"
 #include "memory/tenuredGeneration.hpp"
@@ -917,7 +917,6 @@
   volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
   volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
   volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
-  nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
   nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
   nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
    volatile_nonstatic_field(JavaThread,        _thread_state,                                 JavaThreadState)                       \
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1545,7 +1545,9 @@
 
 // writes a HPROF_GC_CLASS_DUMP record for the given class
 void VM_HeapDumper::do_class_dump(Klass* k) {
-  DumperSupport::dump_class_and_array_classes(writer(), k);
+  if (k->oop_is_instance()) {
+    DumperSupport::dump_class_and_array_classes(writer(), k);
+  }
 }
 
 // writes a HPROF_GC_CLASS_DUMP records for a given basic type
@@ -1722,7 +1724,7 @@
   SymbolTable::symbols_do(&sym_dumper);
 
   // write HPROF_LOAD_CLASS records
-  SystemDictionary::classes_do(&do_load_class);
+  ClassLoaderDataGraph::classes_do(&do_load_class);
   Universe::basic_type_classes_do(&do_load_class);
 
   // write HPROF_FRAME and HPROF_TRACE records
@@ -1733,7 +1735,7 @@
   write_dump_header();
 
   // Writes HPROF_GC_CLASS_DUMP records
-  SystemDictionary::classes_do(&do_class_dump);
+  ClassLoaderDataGraph::classes_do(&do_class_dump);
   Universe::basic_type_classes_do(&do_basic_type_array_class_dump);
   check_segment_length();
 
--- a/hotspot/src/share/vm/services/runtimeService.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/services/runtimeService.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,7 +119,7 @@
 #endif /* USDT2 */
 
   // Print the time interval in which the app was executing
-  if (PrintGCApplicationConcurrentTime) {
+  if (PrintGCApplicationConcurrentTime && _app_timer.is_updated()) {
     gclog_or_tty->date_stamp(PrintGCDateStamps);
     gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print_cr("Application time: %3.7f seconds",
--- a/hotspot/src/share/vm/shark/sharkCompiler.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/shark/sharkCompiler.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -133,11 +133,10 @@
     exit(1);
   }
 
-  execution_engine()->addModule(
-    _native_context->module());
+  execution_engine()->addModule(_native_context->module());
 
   // All done
-  mark_initialized();
+  set_state(initialized);
 }
 
 void SharkCompiler::initialize() {
--- a/hotspot/src/share/vm/shark/sharkCompiler.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/shark/sharkCompiler.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -50,10 +50,6 @@
     return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
   }
 
-  // Customization
-  bool needs_adapters()  { return false; }
-  bool needs_stubs()     { return false; }
-
   // Initialization
   void initialize();
 
--- a/hotspot/src/share/vm/utilities/constantTag.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/constantTag.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -51,7 +51,9 @@
     case JVM_CONSTANT_ClassIndex :
     case JVM_CONSTANT_StringIndex :
     case JVM_CONSTANT_MethodHandle :
+    case JVM_CONSTANT_MethodHandleInError :
     case JVM_CONSTANT_MethodType :
+    case JVM_CONSTANT_MethodTypeInError :
       return T_OBJECT;
     default:
       ShouldNotReachHere();
@@ -60,6 +62,19 @@
 }
 
 
+jbyte constantTag::non_error_value() const {
+  switch (_tag) {
+  case JVM_CONSTANT_UnresolvedClassInError:
+    return JVM_CONSTANT_UnresolvedClass;
+  case JVM_CONSTANT_MethodHandleInError:
+    return JVM_CONSTANT_MethodHandle;
+  case JVM_CONSTANT_MethodTypeInError:
+    return JVM_CONSTANT_MethodType;
+  default:
+    return _tag;
+  }
+}
+
 
 const char* constantTag::internal_name() const {
   switch (_tag) {
--- a/hotspot/src/share/vm/utilities/constantTag.hpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/constantTag.hpp	Fri Oct 18 21:30:42 2013 -0700
@@ -108,7 +108,8 @@
     _tag = tag;
   }
 
-  jbyte value()                      { return _tag; }
+  jbyte value() const                { return _tag; }
+  jbyte non_error_value() const;
 
   BasicType basic_type() const;        // if used with ldc, what kind of value gets pushed?
 
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -465,7 +465,7 @@
 }
 
 // log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
-// in log_name, %p => pipd1234 and
+// in log_name, %p => pid1234 and
 //              %t => YYYY-MM-DD_HH-MM-SS
 static const char* make_log_name(const char* log_name, const char* force_directory) {
   char timestr[32];
@@ -792,7 +792,7 @@
 
 void defaultStream::init_log() {
   // %%% Need a MutexLocker?
-  const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
+  const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
   const char* try_name = make_log_name(log_name, NULL);
   fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
   if (!file->is_open()) {
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Fri Oct 18 21:30:42 2013 -0700
@@ -1050,7 +1050,7 @@
         FILE* replay_data_file = os::open(fd, "w");
         if (replay_data_file != NULL) {
           fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
-          env->dump_replay_data(&replay_data_stream);
+          env->dump_replay_data_unsafe(&replay_data_stream);
           out.print_raw("#\n# Compiler replay data is saved as:\n# ");
           out.print_raw_cr(buffer);
         } else {
--- a/hotspot/test/TEST.groups	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/TEST.groups	Fri Oct 18 21:30:42 2013 -0700
@@ -27,7 +27,7 @@
 # - compact1, compact2, compact3, full JRE, JDK
 #
 # In addition they support testing of the minimal VM on compact1 and compact2.
-# Essentially this defines groups based around the specified API's and VM 
+# Essentially this defines groups based around the specified API's and VM
 # services available in the runtime.
 #
 # The groups are defined hierarchically in two forms:
@@ -44,9 +44,9 @@
 # by listing the top-level test directories.
 #
 # To use a group simply list it on the jtreg command line eg:
-#   jtreg :jdk    
+#   jtreg :jdk
 # runs all tests. While
-#   jtreg :compact2  
+#   jtreg :compact2
 # runs those tests that only require compact1 and compact2 API's.
 #
 
@@ -69,6 +69,7 @@
   runtime/7107135/Test7107135.sh \
   runtime/7158988/FieldMonitor.java \
   runtime/7194254/Test7194254.java \
+  runtime/8026365/InvokeSpecialAnonTest.java \
   runtime/jsig/Test8017498.sh \
   runtime/Metaspace/FragmentMetaspace.java \
   runtime/NMT/BaselineWithParameter.java \
@@ -124,7 +125,7 @@
   compiler/whitebox/IsMethodCompilableTest.java \
   gc/6581734/Test6581734.java \
   gc/7072527/TestFullGCCount.java \
-  gc/7168848/HumongousAlloc.java \
+  gc/g1/TestHumongousAllocInitialMark.java \
   gc/arguments/TestG1HeapRegionSize.java \
   gc/metaspace/TestMetaspaceMemoryPool.java \
   runtime/InternalApi/ThreadCpuTimesDeadlock.java \
@@ -140,7 +141,7 @@
  -:needs_jdk
 
 # Tests that require compact2 API's and a full VM
-#  
+#
 needs_full_vm_compact2 =
 
 # Compact 1 adds full VM tests
--- a/hotspot/test/compiler/8013496/Test8013496.sh	Thu Oct 17 09:40:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-#!/bin/sh
-# 
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-# 
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-# 
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-# 
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-# 
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-# 
-#
-# @test
-# @bug 8013496
-# @summary Test checks that the order in which ReversedCodeCacheSize and 
-#          InitialCodeCacheSize are passed to the VM is irrelevant.  
-# @run shell Test8013496.sh
-#
-#
-## some tests require path to find test source dir
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-set -x
-
-${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1
-${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1
-
-diff 1.out 2.out
-
-result=$?
-if [ $result -eq 0 ] ; then  
-  echo "Test Passed"
-  exit 0
-else
-  echo "Test Failed"
-  exit 1
-fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8013496
+ * @summary Test checks that the order in which ReversedCodeCacheSize and
+ *          InitialCodeCacheSize are passed to the VM is irrelevant.
+ * @library /testlibrary
+ *
+ */
+import com.oracle.java.testlibrary.*;
+
+public class CheckReservedInitialCodeCacheSizeArgOrder {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb1,  pb2;
+    OutputAnalyzer out1, out2;
+
+    pb1 = ProcessTools.createJavaProcessBuilder("-XX:InitialCodeCacheSize=4m", "-XX:ReservedCodeCacheSize=8m", "-version");
+    pb2 = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=8m", "-XX:InitialCodeCacheSize=4m", "-version");
+
+    out1 = new OutputAnalyzer(pb1.start());
+    out2 = new OutputAnalyzer(pb2.start());
+
+    // Check that the outputs are equal
+    if (out1.getStdout().compareTo(out2.getStdout()) != 0) {
+      throw new RuntimeException("Test failed");
+    }
+
+    out1.shouldHaveExitValue(0);
+    out2.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/mathexact/RepeatTest.java	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8025657
+ * @summary Test repeating addExact
+ * @compile RepeatTest.java
+ * @run main RepeatTest
+ *
+ */
+
+import java.lang.ArithmeticException;
+
+public class RepeatTest {
+  public static void main(String[] args) {
+    java.util.Random rnd = new java.util.Random();
+    for (int i = 0; i < 50000; ++i) {
+      int x = Integer.MAX_VALUE - 10;
+      int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5); //rnd.nextInt() / 2;
+
+      int c = rnd.nextInt() / 2;
+      int d = rnd.nextInt() / 2;
+
+      int a = addExact(x, y);
+
+      if (a != 36) {
+          throw new RuntimeException("a != 0 : " + a);
+      }
+
+      int b = nonExact(c, d);
+      int n = addExact2(c, d);
+
+
+      if (n != b) {
+        throw new RuntimeException("n != b : " + n + " != " + b);
+      }
+    }
+  }
+
+  public static int addExact2(int x, int y) {
+      int result = 0;
+      result += java.lang.Math.addExact(x, y);
+      result += java.lang.Math.addExact(x, y);
+      result += java.lang.Math.addExact(x, y);
+      result += java.lang.Math.addExact(x, y);
+      return result;
+  }
+
+  public static int addExact(int x, int y) {
+    int result = 0;
+    try {
+        result += 5;
+        result = java.lang.Math.addExact(x, y);
+    } catch (ArithmeticException e) {
+        result += 1;
+    }
+    try {
+        result += 6;
+
+        result += java.lang.Math.addExact(x, y);
+    } catch (ArithmeticException e) {
+        result += 2;
+    }
+    try {
+        result += 7;
+        result += java.lang.Math.addExact(x, y);
+    } catch (ArithmeticException e) {
+        result += 3;
+    }
+    try {
+        result += 8;
+        result += java.lang.Math.addExact(x, y);
+    } catch (ArithmeticException e) {
+        result += 4;
+    }
+    return result;
+  }
+
+  public static int nonExact(int x, int y) {
+    int result = x + y;
+    result += x + y;
+    result += x + y;
+    result += x + y;
+    return result;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8026124
+ * @summary Javascript file provoked assertion failure in linkResolver.cpp
+ *
+ * @run main/othervm CreatesInterfaceDotEqualsCallInfo
+ */
+
+public class CreatesInterfaceDotEqualsCallInfo {
+  public static void main(String[] args) throws java.io.IOException {
+    String[] jsargs = { System.getProperty("test.src", ".") +
+                        "/createsInterfaceDotEqualsCallInfo.js" };
+    jdk.nashorn.tools.Shell.main(System.in, System.out, System.err, jsargs);
+    System.out.println("PASS, did not crash running Javascript");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jsr292/createsInterfaceDotEqualsCallInfo.js	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+var path = new java.io.File("/Users/someone").toPath();
+path.toString();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/startup/SmallCodeCacheStartup.java	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8023014
+ * @summary Test ensures that there is no crash when compiler initialization fails
+ * @library /testlibrary
+ *
+ */
+import com.oracle.java.testlibrary.*;
+
+public class SmallCodeCacheStartup {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+    OutputAnalyzer out;
+
+    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m", "-XX:CICompilerCount=64", "-version");
+    out = new OutputAnalyzer(pb.start());
+    out.shouldContain("no space to run compiler");
+    out.shouldHaveExitValue(0);
+  }
+}
--- a/hotspot/test/gc/7168848/HumongousAlloc.java	Thu Oct 17 09:40:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test Humongous.java
- * @bug 7168848
- * @summary G1: humongous object allocations should initiate marking cycles when necessary
- * @run main/othervm -Xms100m -Xmx100m -XX:+PrintGC -XX:G1HeapRegionSize=1m -XX:+UseG1GC  HumongousAlloc
- *
- */
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import java.util.List;
-
-public class HumongousAlloc {
-
-    public static byte[] dummy;
-    private static int sleepFreq = 40;
-    private static int sleepTime = 1000;
-    private static double size = 0.75;
-    private static int iterations = 50;
-    private static int MB = 1024 * 1024;
-
-    public static void allocate(int size, int sleepTime, int sleepFreq) throws InterruptedException {
-        System.out.println("Will allocate objects of size: " + size
-                + " bytes and sleep for " + sleepTime
-                + " ms after every " + sleepFreq + "th allocation.");
-        int count = 0;
-        while (count < iterations) {
-            for (int i = 0; i < sleepFreq; i++) {
-                dummy = new byte[size - 16];
-            }
-            Thread.sleep(sleepTime);
-            count++;
-        }
-    }
-
-    public static void main(String[] args) throws InterruptedException {
-        allocate((int) (size * MB), sleepTime, sleepFreq);
-        List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
-        for (GarbageCollectorMXBean collector : collectors) {
-            if (collector.getName().contains("G1 Old")) {
-               long count = collector.getCollectionCount();
-                if (count > 0) {
-                    throw new RuntimeException("Failed: FullGCs should not have happened. The number of FullGC run is " + count);
-                }
-                else {
-                    System.out.println("Passed.");
-                }
-            }
-        }
-    }
-}
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestHeapFreeRatio.java	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestHeapFreeRatio
+ * @key gc
+ * @bug 8025661
+ * @summary Test parsing of -Xminf and -Xmaxf
+ * @library /testlibrary
+ * @run main/othervm TestHeapFreeRatio
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class TestHeapFreeRatio {
+
+  enum Validation {
+    VALID,
+    MIN_INVALID,
+    MAX_INVALID,
+    COMBINATION_INVALID
+  }
+
+  private static void testMinMaxFreeRatio(String min, String max, Validation type) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        "-Xminf" + min,
+        "-Xmaxf" + max,
+        "-version");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    switch (type) {
+    case VALID:
+      output.shouldNotContain("Error");
+      output.shouldHaveExitValue(0);
+      break;
+    case MIN_INVALID:
+      output.shouldContain("Bad min heap free percentage size: -Xminf" + min);
+      output.shouldContain("Error");
+      output.shouldHaveExitValue(1);
+      break;
+    case MAX_INVALID:
+      output.shouldContain("Bad max heap free percentage size: -Xmaxf" + max);
+      output.shouldContain("Error");
+      output.shouldHaveExitValue(1);
+      break;
+    case COMBINATION_INVALID:
+      output.shouldContain("must be less than or equal to MaxHeapFreeRatio");
+      output.shouldContain("Error");
+      output.shouldHaveExitValue(1);
+      break;
+    default:
+      throw new IllegalStateException("Must specify expected validation type");
+    }
+
+    System.out.println(output.getOutput());
+  }
+
+  public static void main(String args[]) throws Exception {
+    testMinMaxFreeRatio( "0.1", "0.5", Validation.VALID);
+    testMinMaxFreeRatio(  ".1",  ".5", Validation.VALID);
+    testMinMaxFreeRatio( "0.5", "0.5", Validation.VALID);
+
+    testMinMaxFreeRatio("-0.1", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio( "1.1", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio("=0.1", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio("0.1f", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio(
+                     "INVALID", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio(
+                  "2147483647", "0.5", Validation.MIN_INVALID);
+
+    testMinMaxFreeRatio( "0.1", "-0.5", Validation.MAX_INVALID);
+    testMinMaxFreeRatio( "0.1",  "1.5", Validation.MAX_INVALID);
+    testMinMaxFreeRatio( "0.1", "0.5f", Validation.MAX_INVALID);
+    testMinMaxFreeRatio( "0.1", "=0.5", Validation.MAX_INVALID);
+    testMinMaxFreeRatio(
+                     "0.1",  "INVALID", Validation.MAX_INVALID);
+    testMinMaxFreeRatio(
+                   "0.1", "2147483647", Validation.MAX_INVALID);
+
+    testMinMaxFreeRatio( "0.5",  "0.1", Validation.COMBINATION_INVALID);
+    testMinMaxFreeRatio(  ".5",  ".10", Validation.COMBINATION_INVALID);
+    testMinMaxFreeRatio("0.12","0.100", Validation.COMBINATION_INVALID);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestHumongousAllocInitialMark.java	Fri Oct 18 21:30:42 2013 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestHumongousAllocInitialMark
+ * @bug 7168848
+ * @summary G1: humongous object allocations should initiate marking cycles when necessary
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class TestHumongousAllocInitialMark {
+    private static final int heapSize                       = 200; // MB
+    private static final int heapRegionSize                 = 1;   // MB
+    private static final int initiatingHeapOccupancyPercent = 50;  // %
+
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UseG1GC",
+            "-Xms" + heapSize + "m",
+            "-Xmx" + heapSize + "m",
+            "-XX:G1HeapRegionSize=" + heapRegionSize + "m",
+            "-XX:InitiatingHeapOccupancyPercent=" + initiatingHeapOccupancyPercent,
+            "-XX:+PrintGC",
+            HumongousObjectAllocator.class.getName());
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("GC pause (G1 Humongous Allocation) (young) (initial-mark)");
+        output.shouldNotContain("Full GC");
+        output.shouldHaveExitValue(0);
+    }
+
+    static class HumongousObjectAllocator {
+        private static byte[] dummy;
+
+        public static void main(String [] args) throws Exception {
+            // Make object size 75% of region size
+            final int humongousObjectSize =
+                (int)(heapRegionSize * 1024 * 1024 * 0.75);
+
+            // Number of objects to allocate to go above IHOP
+            final int humongousObjectAllocations =
+                (int)((heapSize * initiatingHeapOccupancyPercent / 100.0) / heapRegionSize) + 1;
+
+            // Allocate
+            for (int i = 1; i <= humongousObjectAllocations; i++) {
+                System.out.println("Allocating humongous object " + i + "/" + humongousObjectAllocations +
+                                   " of size " + humongousObjectSize + " bytes");
+                dummy = new byte[humongousObjectSize];
+            }
+        }
+    }
+}
+
--- a/hotspot/test/gc/startup_warnings/TestCMS.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/gc/startup_warnings/TestCMS.java	Fri Oct 18 21:30:42 2013 -0700
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/hotspot/test/gc/startup_warnings/TestCMSNoIncrementalMode.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/gc/startup_warnings/TestCMSNoIncrementalMode.java	Fri Oct 18 21:30:42 2013 -0700
@@ -37,7 +37,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/hotspot/test/gc/startup_warnings/TestG1.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/gc/startup_warnings/TestG1.java	Fri Oct 18 21:30:42 2013 -0700
@@ -37,7 +37,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/hotspot/test/gc/startup_warnings/TestParNewCMS.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/gc/startup_warnings/TestParNewCMS.java	Fri Oct 18 21:30:42 2013 -0700
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/hotspot/test/gc/startup_warnings/TestParallelGC.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/gc/startup_warnings/TestParallelGC.java	Fri Oct 18 21:30:42 2013 -0700
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/hotspot/test/gc/startup_warnings/TestParallelScavengeSerialOld.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/gc/startup_warnings/TestParallelScavengeSerialOld.java	Fri Oct 18 21:30:42 2013 -0700
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-XX:-UseParallelOldGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/hotspot/test/gc/startup_warnings/TestSerialGC.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/gc/startup_warnings/TestSerialGC.java	Fri Oct 18 21:30:42 2013 -0700
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseSerialGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/hotspot/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Fri Oct 18 21:30:42 2013 -0700
@@ -59,7 +59,7 @@
         // If we are on MacOSX, test if JMap tool is signed, otherwise return
         // since test will fail with privilege error.
         if (Platform.isOSX()) {
-            String jmapToolPath = JDKToolFinder.getCurrentJDKTool("jmap");
+            String jmapToolPath = JDKToolFinder.getTestJDKTool("jmap");
             ProcessBuilder codesignProcessBuilder = new ProcessBuilder(
                     "codesign", "-v", jmapToolPath);
             Process codesignProcess = codesignProcessBuilder.start();
@@ -107,7 +107,7 @@
             System.out.println("Extracted pid: " + pid);
 
             JDKToolLauncher jMapLauncher = JDKToolLauncher
-                    .create("jmap", false);
+                    .createUsingTestJDK("jmap");
             jMapLauncher.addToolArg("-dump:format=b,file=" + pid + "-"
                     + HEAP_DUMP_FILE_NAME);
             jMapLauncher.addToolArg(String.valueOf(pid));
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Thu Oct 17 09:40:51 2013 -0700
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Fri Oct 18 21:30:42 2013 -0700
@@ -56,7 +56,7 @@
         if (useCompilerJDK) {
             executable = JDKToolFinder.getJDKTool(tool);
         } else {
-            executable = JDKToolFinder.getCurrentJDKTool(tool);
+            executable = JDKToolFinder.getTestJDKTool(tool);
         }
         vmArgs.addAll(Arrays.asList(ProcessTools.getPlatformSpecificVMArgs()));
     }
@@ -74,17 +74,15 @@
     }
 
     /**
-     * Creates a new JDKToolLauncher for the specified tool.
+     * Creates a new JDKToolLauncher for the specified tool in the Tested JDK.
      *
      * @param tool
      *            The name of the tool
-     * @param useCompilerPath
-     *            If true use the compiler JDK path, otherwise use the tested
-     *            JDK path.
+     *
      * @return A new JDKToolLauncher
      */
-    public static JDKToolLauncher create(String tool, boolean useCompilerJDK) {
-        return new JDKToolLauncher(tool, useCompilerJDK);
+    public static JDKToolLauncher createUsingTestJDK(String tool) {
+        return new JDKToolLauncher(tool, false);
     }
 
     /**