Merge
authorlana
Sat, 28 Jan 2012 20:41:27 -0800
changeset 11688 092073c222f1
parent 11687 f13cadbb0bb5 (current diff)
parent 11616 7d3720d8c595 (diff)
child 11689 ffbfda5c3886
Merge
hotspot/src/os/bsd/vm/decoder_bsd.cpp
--- a/.hgtags	Sat Jan 28 10:46:46 2012 -0800
+++ b/.hgtags	Sat Jan 28 20:41:27 2012 -0800
@@ -143,3 +143,5 @@
 0ff7113a0882ec82d642cb9f0297b4e497807ced jdk8-b19
 6561530ea757c3f3a6fb171c9cc7b3885cdeca85 jdk8-b20
 b3a426170188f52981cf4573a2f14d487fddab0d jdk8-b21
+e8f03541af27e38aafb619b96863e17f65ffe53b jdk8-b22
+498124337041ad53cbaa7eb110f3d7acd6d4eac4 jdk8-b23
--- a/.hgtags-top-repo	Sat Jan 28 10:46:46 2012 -0800
+++ b/.hgtags-top-repo	Sat Jan 28 20:41:27 2012 -0800
@@ -143,3 +143,5 @@
 237bc29afbfc6f56a4fe4a6008e2befb59c44bac jdk8-b19
 5a5eaf6374bcbe23530899579fed17a05b7705f3 jdk8-b20
 cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21
+7ad075c809952e355d25030605da6af30456ed74 jdk8-b22
+60d6f64a86b1e511169d264727f6d51415978df0 jdk8-b23
--- a/corba/.hgtags	Sat Jan 28 10:46:46 2012 -0800
+++ b/corba/.hgtags	Sat Jan 28 20:41:27 2012 -0800
@@ -143,3 +143,5 @@
 e1366c5d84ef984095a332bcee70b3938232d07d jdk8-b19
 51d8b6cb18c0978ecfa4f33e1537d35ee01b69fa jdk8-b20
 f157fc2a71a38ce44007a6f18d5b011824dce705 jdk8-b21
+a11d0062c445d5f36651c78650ab88aa594bcbff jdk8-b22
+5218eb256658442b62b05295aafa5b5f35252972 jdk8-b23
--- a/hotspot/.hgtags	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/.hgtags	Sat Jan 28 20:41:27 2012 -0800
@@ -211,3 +211,7 @@
 fe2c8764998112b7fefcd7d41599714813ae4327 jdk8-b20
 9952d1c439d64c5fd4ad1236a63a62bd5a49d4c3 jdk8-b21
 513351373923f74a7c91755748b95c9771e59f96 hs23-b10
+24727fb37561779077fdfa5a33342246f20e5c0f jdk8-b22
+dcc292399a39113957eebbd3e487b7e05e2c79fc hs23-b11
+e850d8e7ea54b91c7aa656e297f0f9f38dd4c296 jdk8-b23
+9e177d44b10fe92ecffa965fef9c5ac5433c1b46 hs23-b12
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,8 +49,12 @@
     static private long g1CommittedFieldOffset;
     // size_t _summary_bytes_used;
     static private CIntegerField summaryBytesUsedField;
-    // G1MonitoringSupport* _g1mm
+    // G1MonitoringSupport* _g1mm;
     static private AddressField g1mmField;
+    // MasterOldRegionSet _old_set;
+    static private long oldSetFieldOffset;
+    // MasterHumongousRegionSet _humongous_set;
+    static private long humongousSetFieldOffset;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -67,12 +71,14 @@
         g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
         summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
         g1mmField = type.getAddressField("_g1mm");
+        oldSetFieldOffset = type.getField("_old_set").getOffset();
+        humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
     }
 
     public long capacity() {
         Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
-        MemRegion g1_committed = new MemRegion(g1CommittedAddr);
-        return g1_committed.byteSize();
+        MemRegion g1Committed = new MemRegion(g1CommittedAddr);
+        return g1Committed.byteSize();
     }
 
     public long used() {
@@ -94,6 +100,18 @@
         return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
     }
 
+    public HeapRegionSetBase oldSet() {
+        Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
+        return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
+                                                             oldSetAddr);
+    }
+
+    public HeapRegionSetBase humongousSet() {
+        Address humongousSetAddr = addr.addOffsetTo(humongousSetFieldOffset);
+        return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
+                                                             humongousSetAddr);
+    }
+
     private Iterator<HeapRegion> heapRegionIterator() {
         return hrs().heapRegionIterator();
     }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,6 +77,10 @@
         return edenUsedField.getValue(addr);
     }
 
+    public long edenRegionNum() {
+        return edenUsed() / HeapRegion.grainBytes();
+    }
+
     public long survivorCommitted() {
         return survivorCommittedField.getValue(addr);
     }
@@ -85,6 +89,10 @@
         return survivorUsedField.getValue(addr);
     }
 
+    public long survivorRegionNum() {
+        return survivorUsed() / HeapRegion.grainBytes();
+    }
+
     public long oldCommitted() {
         return oldCommittedField.getValue(addr);
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionSetBase. Represents a group of regions.
+
+public class HeapRegionSetBase extends VMObject {
+    // size_t _length;
+    static private CIntegerField lengthField;
+    // size_t _region_num;
+    static private CIntegerField regionNumField;
+    // size_t _total_used_bytes;
+    static private CIntegerField totalUsedBytesField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegionSetBase");
+
+        lengthField         = type.getCIntegerField("_length");
+        regionNumField      = type.getCIntegerField("_region_num");
+        totalUsedBytesField = type.getCIntegerField("_total_used_bytes");
+    }
+
+    public long length() {
+        return lengthField.getValue(addr);
+    }
+
+    public long regionNum() {
+        return regionNumField.getValue(addr);
+    }
+
+    public long totalUsedBytes() {
+        return totalUsedBytesField.getValue(addr);
+    }
+
+    public HeapRegionSetBase(Address addr) {
+        super(addr);
+    }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,6 +67,7 @@
       printValue("SurvivorRatio    = ", getFlagValue("SurvivorRatio", flagMap));
       printValMB("PermSize         = ", getFlagValue("PermSize", flagMap));
       printValMB("MaxPermSize      = ", getFlagValue("MaxPermSize", flagMap));
+      printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
 
       System.out.println();
       System.out.println("Heap Usage:");
@@ -100,11 +101,20 @@
          } else if (sharedHeap instanceof G1CollectedHeap) {
              G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
              G1MonitoringSupport g1mm = g1h.g1mm();
-             System.out.println("G1 Young Generation");
-             printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
-             printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
-             printG1Space("To Space:", 0, 0);
-             printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
+             long edenRegionNum = g1mm.edenRegionNum();
+             long survivorRegionNum = g1mm.survivorRegionNum();
+             HeapRegionSetBase oldSet = g1h.oldSet();
+             HeapRegionSetBase humongousSet = g1h.humongousSet();
+             long oldRegionNum = oldSet.regionNum() + humongousSet.regionNum();
+             printG1Space("G1 Heap:", g1h.n_regions(),
+                          g1h.used(), g1h.capacity());
+             System.out.println("G1 Young Generation:");
+             printG1Space("Eden Space:", edenRegionNum,
+                          g1mm.edenUsed(), g1mm.edenCommitted());
+             printG1Space("Survivor Space:", survivorRegionNum,
+                          g1mm.survivorUsed(), g1mm.survivorCommitted());
+             printG1Space("G1 Old Generation:", oldRegionNum,
+                          g1mm.oldUsed(), g1mm.oldCommitted());
          } else {
              throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
          }
@@ -216,9 +226,11 @@
       System.out.println(alignment +  (double)space.used() * 100.0 / space.capacity() + "% used");
    }
 
-   private void printG1Space(String spaceName, long used, long capacity) {
+   private void printG1Space(String spaceName, long regionNum,
+                             long used, long capacity) {
       long free = capacity - used;
       System.out.println(spaceName);
+      printValue("regions  = ", regionNum);
       printValMB("capacity = ", capacity);
       printValMB("used     = ", used);
       printValMB("free     = ", free);
--- a/hotspot/make/Makefile	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/Makefile	Sat Jan 28 20:41:27 2012 -0800
@@ -367,7 +367,7 @@
 $(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
 	$(install-file)
 
-# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
+# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h)
 $(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
 	$(install-file)
 
@@ -384,6 +384,16 @@
 $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/services/%
 	$(install-file)
 
+JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi)
+# export jfr.h
+ifeq ($JFR_EXISTS,1)
+$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/%
+	$(install-file)
+else
+$(EXPORT_INCLUDE_DIR)/jfr.h:
+	
+endif
+
 # Doc files (jvmti.html)
 $(EXPORT_DOCS_DIR)/platform/jvmti/%: $(DOCS_DIR)/%
 	$(install-file)
--- a/hotspot/make/bsd/Makefile	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/bsd/Makefile	Sat Jan 28 20:41:27 2012 -0800
@@ -208,7 +208,7 @@
 TARGETS_SHARK     = $(addsuffix shark,$(TARGETS))
 
 BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
-BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
+BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
 BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
 
 BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
--- a/hotspot/make/bsd/makefiles/buildtree.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/bsd/makefiles/buildtree.make	Sat Jan 28 20:41:27 2012 -0800
@@ -162,20 +162,6 @@
   endif
 endif
 
-ifeq ($(OS_VENDOR), Darwin)
-  # MACOSX FIXME: we should be able to run test_gamma (see MACOSX_PORT-214)
-  ifeq ($(ALWAYS_PASS_TEST_GAMMA),)
-    # ALWAYS_PASS_TEST_GAMMA wasn't set so we default to true on MacOS X
-    # until MACOSX_PORT-214 is fixed
-    ALWAYS_PASS_TEST_GAMMA=true
-  endif
-endif
-ifeq ($(ALWAYS_PASS_TEST_GAMMA), true)
-  TEST_GAMMA_STATUS= echo 'exit 0';
-else
-  TEST_GAMMA_STATUS=
-endif
-
 BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION=  JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
 
 BUILDTREE	= \
@@ -353,12 +339,10 @@
 	$(BUILDTREE_COMMENT); \
 	[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
 	{ \
-	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
-	echo "DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
 	echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
 	} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
 	echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
-	echo "export JAVA_HOME LD_LIBRARY_PATH DYLD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
+	echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
 	) > $@
 
 env.csh: env.sh
@@ -412,7 +396,7 @@
 JAVA_FLAG/64 = -d64
 
 WRONG_DATA_MODE_MSG = \
-	echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
+	echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
 
 CROSS_COMPILING_MSG = \
 	echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
@@ -420,20 +404,78 @@
 test_gamma:  $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
-	echo '#!/bin/sh'; \
+	echo "#!/bin/sh"; \
+	echo ""; \
 	$(BUILDTREE_COMMENT); \
-	echo '. ./env.sh'; \
-	echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \
-	echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
-	echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
-	echo "then"; \
-	echo "  $(WRONG_DATA_MODE_MSG); exit 0;"; \
+	echo ""; \
+	echo "# Include environment settings for gamma run"; \
+	echo ""; \
+	echo ". ./env.sh"; \
+	echo ""; \
+	echo "# Do not run gamma test for cross compiles"; \
+	echo ""; \
+	echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
+	echo "  $(CROSS_COMPILING_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
+	echo ""; \
+	echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
+	echo "  $(NO_JAVA_HOME_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Check JAVA_HOME version to be used for the test"; \
+	echo ""; \
+	echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
+	echo "if [ \$$? -ne 0 ]; then "; \
+	echo "  $(WRONG_DATA_MODE_MSG)"; \
+	echo "  exit 0"; \
 	echo "fi"; \
+	echo ""; \
+	echo "# Use gamma_g if it exists"; \
+	echo ""; \
+	echo "GAMMA_PROG=gamma"; \
+	echo "if [ -f gamma_g ]; then "; \
+	echo "  GAMMA_PROG=gamma_g"; \
+	echo "fi"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
+	echo "  # NOTE: gamma assumes the OpenJDK directory layout."; \
+	echo ""; \
+	echo "  GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
+	echo "  JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  if [ ! -f \$${JVM_LIB} ]; then"; \
+	echo "    JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  fi"; \
+	echo "  if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
+	echo "    $(WRONG_DATA_MODE_MSG)"; \
+	echo "    exit 0"; \
+	echo "  fi"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Compile Queens program for test"; \
+	echo ""; \
 	echo "rm -f Queens.class"; \
 	echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
-	echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
-	echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
-	$(TEST_GAMMA_STATUS) \
+	echo ""; \
+	echo "# Set library path solely for gamma launcher test run"; \
+	echo ""; \
+	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "export LD_LIBRARY_PATH"; \
+	echo "unset LD_LIBRARY_PATH_32"; \
+	echo "unset LD_LIBRARY_PATH_64"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "  export DYLD_LIBRARY_PATH"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
+	echo ""; \
+	echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
 	) > $@
 	$(QUIETLY) chmod +x $@
 
--- a/hotspot/make/bsd/makefiles/defs.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/bsd/makefiles/defs.make	Sat Jan 28 20:41:27 2012 -0800
@@ -142,6 +142,7 @@
 # client and server subdirectories have symbolic links to ../libjsig.so
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 
 ifndef BUILD_CLIENT_ONLY
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@@ -150,7 +151,6 @@
 
 ifneq ($(ZERO_BUILD), true)
   ifeq ($(ARCH_DATA_MODEL), 32)
-    EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
   endif
--- a/hotspot/make/bsd/makefiles/launcher.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/bsd/makefiles/launcher.make	Sat Jan 28 20:41:27 2012 -0800
@@ -50,7 +50,24 @@
   LIBS_LAUNCHER             += $(STATIC_STDCXX) $(LIBS)
 else
   LAUNCHER.o                 = launcher.o
-  LFLAGS_LAUNCHER           += -L`pwd`
+  LFLAGS_LAUNCHER           += -L`pwd` 
+
+  # The gamma launcher runs the JDK from $JAVA_HOME, overriding the JVM with a
+  # freshly built JVM at ./libjvm.{so|dylib}.  This is accomplished by setting 
+  # the library searchpath using ({DY}LD_LIBRARY_PATH) to find the local JVM 
+  # first.  Gamma dlopen()s libjava from $JAVA_HOME/jre/lib{/$arch}, which is
+  # statically linked with CoreFoundation framework libs. Unfortunately, gamma's
+  # unique searchpath results in some unresolved symbols in the framework 
+  # libraries, because JDK libraries are inadvertently discovered first on the
+  # searchpath, e.g. libjpeg.  On Mac OS X, filenames are case *insensitive*.
+  # So, the actual filename collision is libjpeg.dylib and libJPEG.dylib.
+  # To resolve this, gamma needs to also statically link with the CoreFoundation 
+  # framework libraries.
+
+  ifeq ($(OS_VENDOR),Darwin)
+    LFLAGS_LAUNCHER         += -framework CoreFoundation 
+  endif
+
   LIBS_LAUNCHER             += -l$(JVM) $(LIBS)
 endif
 
--- a/hotspot/make/bsd/makefiles/vm.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/bsd/makefiles/vm.make	Sat Jan 28 20:41:27 2012 -0800
@@ -96,6 +96,10 @@
 CPPFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
 endif
 
+ifndef JAVASE_EMBEDDED
+CFLAGS += -DINCLUDE_TRACE
+endif
+
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN/BYFILE)
 
@@ -147,6 +151,12 @@
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
+ifndef JAVASE_EMBEDDED
+SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
+  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
+  fi)
+endif
+
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
 CORE_PATHS+=$(GENERATED)/jvmtifiles
 
@@ -327,8 +337,8 @@
 $(LIBJVM).dSYM: $(LIBJVM)
 	dsymutil $(LIBJVM)
 
-# no launcher or libjvm_db for macosx
-build: $(LIBJVM) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
+# no libjvm_db for macosx
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
 	echo "Doing vm.make build:"
 else
 build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
--- a/hotspot/make/defs.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/defs.make	Sat Jan 28 20:41:27 2012 -0800
@@ -294,3 +294,7 @@
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
+
+ifndef JAVASE_EMBEDDED
+EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
+endif
--- a/hotspot/make/hotspot_version	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/hotspot_version	Sat Jan 28 20:41:27 2012 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=23
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=10
+HS_BUILD_NUMBER=12
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/jprt.properties	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/jprt.properties	Sat Jan 28 20:41:27 2012 -0800
@@ -174,6 +174,10 @@
 jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
 
+jprt.my.macosx.x64.jdk8=macosx_x64_10.7
+jprt.my.macosx.x64.jdk7=macosx_x64_10.7
+jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
+
 jprt.my.windows.i586.jdk8=windows_i586_5.1
 jprt.my.windows.i586.jdk7=windows_i586_5.1
 jprt.my.windows.i586.jdk7b107=windows_i586_5.0
@@ -211,6 +215,7 @@
     ${jprt.my.solaris.x64}-{product|fastdebug|debug}, \
     ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
     ${jprt.my.linux.x64}-{product|fastdebug}, \
+    ${jprt.my.macosx.x64}-{product|fastdebug|debug}, \
     ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
     ${jprt.my.windows.x64}-{product|fastdebug|debug}
 
@@ -416,6 +421,30 @@
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
 
+jprt.my.macosx.x64.test.targets = \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_default, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_default, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
+
 jprt.my.windows.i586.test.targets = \
     ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
     ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
@@ -492,6 +521,7 @@
   ${jprt.my.solaris.x64.test.targets}, \
   ${jprt.my.linux.i586.test.targets}, \
   ${jprt.my.linux.x64.test.targets}, \
+  ${jprt.my.macosx.x64.test.targets}, \
   ${jprt.my.windows.i586.test.targets}, \
   ${jprt.my.windows.x64.test.targets}, \
   ${jprt.test.targets.open}
@@ -538,6 +568,7 @@
   ${jprt.my.solaris.x64}-*-c2-servertest, \
   ${jprt.my.linux.i586}-*-c2-servertest, \
   ${jprt.my.linux.x64}-*-c2-servertest, \
+  ${jprt.my.macosx.x64}-*-c2-servertest, \
   ${jprt.my.windows.i586}-*-c2-servertest, \
   ${jprt.my.windows.x64}-*-c2-servertest
 
@@ -548,6 +579,7 @@
   ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
   ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
   ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
   ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
   ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
   
--- a/hotspot/make/linux/makefiles/buildtree.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/linux/makefiles/buildtree.make	Sat Jan 28 20:41:27 2012 -0800
@@ -326,11 +326,10 @@
 	$(BUILDTREE_COMMENT); \
 	[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
 	{ \
-	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
 	echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
 	} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
 	echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
-	echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
+	echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
 	) > $@
 
 env.csh: env.sh
@@ -384,7 +383,7 @@
 JAVA_FLAG/64 = -d64
 
 WRONG_DATA_MODE_MSG = \
-	echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
+	echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
 
 CROSS_COMPILING_MSG = \
 	echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
@@ -392,19 +391,78 @@
 test_gamma:  $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
-	echo '#!/bin/sh'; \
+	echo "#!/bin/sh"; \
+	echo ""; \
 	$(BUILDTREE_COMMENT); \
-	echo '. ./env.sh'; \
-	echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \
-	echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
-	echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
-	echo "then"; \
-	echo "  $(WRONG_DATA_MODE_MSG); exit 0;"; \
+	echo ""; \
+	echo "# Include environment settings for gamma run"; \
+	echo ""; \
+	echo ". ./env.sh"; \
+	echo ""; \
+	echo "# Do not run gamma test for cross compiles"; \
+	echo ""; \
+	echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
+	echo "  $(CROSS_COMPILING_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
+	echo ""; \
+	echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
+	echo "  $(NO_JAVA_HOME_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Check JAVA_HOME version to be used for the test"; \
+	echo ""; \
+	echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
+	echo "if [ \$$? -ne 0 ]; then "; \
+	echo "  $(WRONG_DATA_MODE_MSG)"; \
+	echo "  exit 0"; \
 	echo "fi"; \
+	echo ""; \
+	echo "# Use gamma_g if it exists"; \
+	echo ""; \
+	echo "GAMMA_PROG=gamma"; \
+	echo "if [ -f gamma_g ]; then "; \
+	echo "  GAMMA_PROG=gamma_g"; \
+	echo "fi"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
+	echo "  # NOTE: gamma assumes the OpenJDK directory layout."; \
+	echo ""; \
+	echo "  GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
+	echo "  JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  if [ ! -f \$${JVM_LIB} ]; then"; \
+	echo "    JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  fi"; \
+	echo "  if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
+	echo "    $(WRONG_DATA_MODE_MSG)"; \
+	echo "    exit 0"; \
+	echo "  fi"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Compile Queens program for test"; \
+	echo ""; \
 	echo "rm -f Queens.class"; \
 	echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
-	echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
-	echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
+	echo ""; \
+	echo "# Set library path solely for gamma launcher test run"; \
+	echo ""; \
+	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "export LD_LIBRARY_PATH"; \
+	echo "unset LD_LIBRARY_PATH_32"; \
+	echo "unset LD_LIBRARY_PATH_64"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "  export DYLD_LIBRARY_PATH"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
+	echo ""; \
+	echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
 	) > $@
 	$(QUIETLY) chmod +x $@
 
--- a/hotspot/make/linux/makefiles/vm.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/linux/makefiles/vm.make	Sat Jan 28 20:41:27 2012 -0800
@@ -98,6 +98,10 @@
   ${JRE_VERSION}     \
   ${VM_DISTRO}
 
+ifndef JAVASE_EMBEDDED
+CFLAGS += -DINCLUDE_TRACE
+endif
+
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN/BYFILE)
 
@@ -143,6 +147,12 @@
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
+ifndef JAVASE_EMBEDDED
+SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
+  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
+  fi)
+endif
+
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
 CORE_PATHS+=$(GENERATED)/jvmtifiles
 
--- a/hotspot/make/solaris/makefiles/buildtree.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/solaris/makefiles/buildtree.make	Sat Jan 28 20:41:27 2012 -0800
@@ -118,7 +118,7 @@
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
 BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
-        env.ksh env.csh jdkpath.sh .dbxrc test_gamma
+        env.sh env.csh jdkpath.sh .dbxrc test_gamma
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -313,22 +313,19 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
-env.ksh: $(BUILDTREE_MAKE)
+env.sh: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
 	$(BUILDTREE_COMMENT); \
 	[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
 	{ \
-	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
-	echo "unset LD_LIBRARY_PATH_32"; \
-	echo "unset LD_LIBRARY_PATH_64"; \
 	echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
 	} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
 	echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
 	echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
 	) > $@
 
-env.csh: env.ksh
+env.csh: env.sh
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
 	$(BUILDTREE_COMMENT); \
@@ -384,23 +381,86 @@
 JAVA_FLAG/64 = -d64
 
 WRONG_DATA_MODE_MSG = \
-	echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
+	echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
+
+CROSS_COMPILING_MSG = \
+	echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
 
 test_gamma:  $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
-	echo '#!/bin/ksh'; \
+	echo "#!/bin/sh"; \
+	echo ""; \
 	$(BUILDTREE_COMMENT); \
-	echo '. ./env.ksh'; \
-	echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
-	echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
-	echo "then"; \
-	echo "  $(WRONG_DATA_MODE_MSG); exit 0;"; \
+	echo ""; \
+	echo "# Include environment settings for gamma run"; \
+	echo ""; \
+	echo ". ./env.sh"; \
+	echo ""; \
+	echo "# Do not run gamma test for cross compiles"; \
+	echo ""; \
+	echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
+	echo "  $(CROSS_COMPILING_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
+	echo ""; \
+	echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
+	echo "  $(NO_JAVA_HOME_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Check JAVA_HOME version to be used for the test"; \
+	echo ""; \
+	echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
+	echo "if [ \$$? -ne 0 ]; then "; \
+	echo "  $(WRONG_DATA_MODE_MSG)"; \
+	echo "  exit 0"; \
 	echo "fi"; \
+	echo ""; \
+	echo "# Use gamma_g if it exists"; \
+	echo ""; \
+	echo "GAMMA_PROG=gamma"; \
+	echo "if [ -f gamma_g ]; then "; \
+	echo "  GAMMA_PROG=gamma_g"; \
+	echo "fi"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
+	echo "  # NOTE: gamma assumes the OpenJDK directory layout."; \
+	echo ""; \
+	echo "  GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
+	echo "  JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  if [ ! -f \$${JVM_LIB} ]; then"; \
+	echo "    JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  fi"; \
+	echo "  if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
+	echo "    $(WRONG_DATA_MODE_MSG)"; \
+	echo "    exit 0"; \
+	echo "  fi"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Compile Queens program for test"; \
+	echo ""; \
 	echo "rm -f Queens.class"; \
 	echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
-	echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
-	echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
+	echo ""; \
+	echo "# Set library path solely for gamma launcher test run"; \
+	echo ""; \
+	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "export LD_LIBRARY_PATH"; \
+	echo "unset LD_LIBRARY_PATH_32"; \
+	echo "unset LD_LIBRARY_PATH_64"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "  export DYLD_LIBRARY_PATH"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
+	echo ""; \
+	echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
 	) > $@
 	$(QUIETLY) chmod +x $@
 
--- a/hotspot/make/solaris/makefiles/vm.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/solaris/makefiles/vm.make	Sat Jan 28 20:41:27 2012 -0800
@@ -93,7 +93,7 @@
 CFLAGS += $(CFLAGS/NOEX)
 
 # Extra flags from gnumake's invocation or environment
-CFLAGS += $(EXTRA_CFLAGS)
+CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE
 
 # Math Library (libm.so), do not use -lm.
 #    There might be two versions of libm.so on the build system:
@@ -160,6 +160,10 @@
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
+SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
+  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
+  fi)
+
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
 CORE_PATHS+=$(GENERATED)/jvmtifiles
 
--- a/hotspot/make/windows/build.bat	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/windows/build.bat	Sat Jan 28 20:41:27 2012 -0800
@@ -35,6 +35,8 @@
 if %errorlevel% == 0 goto isia64
 cl 2>&1 | grep "AMD64" >NUL
 if %errorlevel% == 0 goto amd64
+cl 2>&1 | grep "x64" >NUL
+if %errorlevel% == 0 goto amd64
 set ARCH=x86
 set BUILDARCH=i486
 set Platform_arch=x86
--- a/hotspot/make/windows/create_obj_files.sh	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/windows/create_obj_files.sh	Sat Jan 28 20:41:27 2012 -0800
@@ -73,6 +73,13 @@
 
 BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles"
 
+if [ -d "${ALTSRC}/share/vm/jfr" ]; then
+  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent"
+  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util"
+  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm"
+  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
+fi
+
 CORE_PATHS="${BASE_PATHS}"
 # shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS.
 if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then
--- a/hotspot/make/windows/makefiles/projectcreator.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/windows/makefiles/projectcreator.make	Sat Jan 28 20:41:27 2012 -0800
@@ -58,7 +58,8 @@
         -absoluteInclude $(HOTSPOTBUILDSPACE)/%f/generated \
         -ignorePath $(HOTSPOTBUILDSPACE)/%f/generated \
         -ignorePath src\share\vm\adlc \
-        -ignorePath src\share\vm\shark
+        -ignorePath src\share\vm\shark \
+        -ignorePath posix
 
 # This is referenced externally by both the IDE and batch builds
 ProjectCreatorOptions=
@@ -88,7 +89,7 @@
         -jdkTargetRoot $(HOTSPOTJDKDIST) \
         -define ALIGN_STACK_FRAMES \
         -define VM_LITTLE_ENDIAN \
-        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
+        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
         -postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \
         -ignoreFile jsig.c \
         -ignoreFile jvmtiEnvRecommended.cpp \
--- a/hotspot/make/windows/makefiles/vm.make	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/make/windows/makefiles/vm.make	Sat Jan 28 20:41:27 2012 -0800
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # Resource file containing VERSIONINFO
@@ -30,7 +30,7 @@
 COMMONSRC=$(WorkSpace)\src
 ALTSRC=$(WorkSpace)\src\closed
 
-!ifdef RELEASE 
+!ifdef RELEASE
 !ifdef DEVELOP
 CPP_FLAGS=$(CPP_FLAGS) /D "DEBUG"
 !else
@@ -74,6 +74,10 @@
 CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
 CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
 
+!ifndef JAVASE_EMBEDDED
+CPP_FLAGS=$(CPP_FLAGS) /D "INCLUDE_TRACE"
+!endif
+
 CPP_FLAGS=$(CPP_FLAGS) $(CPP_INCLUDE_DIRS)
 
 # Define that so jni.h is on correct side
@@ -97,7 +101,7 @@
 !endif
 
 # If you modify exports below please do the corresponding changes in
-# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java 
+# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
 LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
   /export:JNI_GetDefaultJavaVMInitArgs       \
   /export:JNI_CreateJavaVM                   \
@@ -170,6 +174,7 @@
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/prims
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/runtime
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/services
+VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/trace
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/utilities
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/libadt
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/os/windows/vm
@@ -177,6 +182,13 @@
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
 
+!if exists($(ALTSRC)\share\vm\jfr)
+VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent
+VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util
+VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm
+VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
+!endif
+
 VM_PATH={$(VM_PATH)}
 
 # Special case files not using precompiled header files.
@@ -263,6 +275,9 @@
 {$(COMMONSRC)\share\vm\services}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
+{$(COMMONSRC)\share\vm\trace}.cpp.obj::
+        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+
 {$(COMMONSRC)\share\vm\utilities}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
@@ -340,6 +355,9 @@
 {$(ALTSRC)\share\vm\services}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
+{$(ALTSRC)\share\vm\trace}.cpp.obj::
+        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+
 {$(ALTSRC)\share\vm\utilities}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
@@ -371,6 +389,18 @@
 {..\generated\jvmtifiles}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
+{$(ALTSRC)\share\vm\jfr}.cpp.obj::
+        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+
+{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj::
+        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+
+{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
+        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+
+{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
+        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+
 default::
 
 _build_pch_file.obj:
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -391,7 +391,7 @@
   __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
   __ delayed()->nop();
   __ should_not_reach_here();
-  assert(code_offset() - offset <= exception_handler_size, "overflow");
+  guarantee(code_offset() - offset <= exception_handler_size, "overflow");
   __ end_a_stub();
 
   return offset;
@@ -474,8 +474,7 @@
   AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
   __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
   __ delayed()->nop();
-  assert(code_offset() - offset <= deopt_handler_size, "overflow");
-  debug_only(__ stop("should have gone to the caller");)
+  guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
   __ end_a_stub();
 
   return offset;
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -69,7 +69,7 @@
 #else
          call_stub_size = 20,
 #endif // _LP64
-         exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(10*4),
-         deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(10*4) };
+         exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
+         deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)  };
 
 #endif // CPU_SPARC_VM_C1_LIRASSEMBLER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -810,7 +810,7 @@
 }
 
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 #define DESCRIBE_FP_OFFSET(name) \
   values.describe(frame_no, fp() + frame::name##_offset, #name)
@@ -820,11 +820,19 @@
     values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
   }
 
-  if (is_interpreted_frame()) {
+  if (is_ricochet_frame()) {
+    MethodHandles::RicochetFrame::describe(this, values, frame_no);
+  } else if (is_interpreted_frame()) {
     DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
     DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
     DESCRIBE_FP_OFFSET(interpreter_frame_padding);
     DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
+
+    // esp, according to Lesp (e.g. not depending on bci), if seems valid
+    intptr_t* esp = *interpreter_frame_esp_addr();
+    if ((esp >= sp()) && (esp < fp())) {
+      values.describe(-1, esp, "*Lesp");
+    }
   }
 
   if (!is_compiled_frame()) {
@@ -844,4 +852,3 @@
   // unused... but returns fp() to minimize changes introduced by 7087445
   return fp();
 }
-
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,8 @@
 
 inline intptr_t*    frame::sender_sp() const  { return fp(); }
 
+inline intptr_t* frame::real_fp() const { return fp(); }
+
 // Used only in frame::oopmapreg_to_location
 // This return a value in VMRegImpl::slot_size
 inline int frame::pd_oop_map_offset_adjustment() const {
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -177,7 +177,7 @@
   BLOCK_COMMENT("ricochet_blob.bounce");
 
   if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
-  trace_method_handle(_masm, "ricochet_blob.bounce");
+  trace_method_handle(_masm, "return/ricochet_blob.bounce");
 
   __ JMP(L1_continuation, 0);
   __ delayed()->nop();
@@ -268,14 +268,16 @@
 }
 
 // Emit code to verify that FP is pointing at a valid ricochet frame.
-#ifdef ASSERT
+#ifndef PRODUCT
 enum {
   ARG_LIMIT = 255, SLOP = 45,
   // use this parameter for checking for garbage stack movements:
   UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
   // the slop defends against false alarms due to fencepost errors
 };
+#endif
 
+#ifdef ASSERT
 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
   // The stack should look like this:
   //    ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
@@ -1001,31 +1003,142 @@
 }
 
 #ifndef PRODUCT
+void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no)  {
+    RicochetFrame* rf = new RicochetFrame(*fr);
+
+    // ricochet slots (kept in registers for sparc)
+    values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no));
+
+    // relevant ricochet targets (in caller frame)
+    values.describe(-1, rf->saved_args_base(),  err_msg("*saved_args_base for #%d", frame_no));
+    values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()),  err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no));
+}
+#endif // ASSERT
+
+#ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
                               oopDesc* mh,
-                              intptr_t* saved_sp) {
+                              intptr_t* saved_sp,
+                              intptr_t* args,
+                              intptr_t* tracing_fp) {
   bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have mh
-  tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
-  if (has_mh)
+
+  tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args);
+
+  if (Verbose) {
+    // dumping last frame with frame::describe
+
+    JavaThread* p = JavaThread::active();
+
+    ResourceMark rm;
+    PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
+    FrameValues values;
+
+    // Note: We want to allow trace_method_handle from any call site.
+    // While trace_method_handle creates a frame, it may be entered
+    // without a valid return PC in O7 (e.g. not just after a call).
+    // Walking that frame could lead to failures due to that invalid PC.
+    // => carefully detect that frame when doing the stack walking
+
+    // walk up to the right frame using the "tracing_fp" argument
+    intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
+    frame cur_frame(cur_sp, frame::unpatchable, NULL);
+
+    while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) {
+      cur_frame = os::get_sender_for_C_frame(&cur_frame);
+    }
+
+    // safely create a frame and call frame::describe
+    intptr_t *dump_sp = cur_frame.sender_sp();
+    intptr_t *dump_fp = cur_frame.link();
+
+    bool walkable = has_mh; // whether the traced frame shoud be walkable
+
+    // the sender for cur_frame is the caller of trace_method_handle
+    if (walkable) {
+      // The previous definition of walkable may have to be refined
+      // if new call sites cause the next frame constructor to start
+      // failing. Alternatively, frame constructors could be
+      // modified to support the current or future non walkable
+      // frames (but this is more intrusive and is not considered as
+      // part of this RFE, which will instead use a simpler output).
+      frame dump_frame = frame(dump_sp,
+                               cur_frame.sp(), // younger_sp
+                               false); // no adaptation
+      dump_frame.describe(values, 1);
+    } else {
+      // Robust dump for frames which cannot be constructed from sp/younger_sp
+      // Add descriptions without building a Java frame to avoid issues
+      values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
+      values.describe(-1, dump_sp, "sp");
+    }
+
+    bool has_args = has_mh; // whether Gargs is meaningful
+
+    // mark args, if seems valid (may not be valid for some adapters)
+    if (has_args) {
+      if ((args >= dump_sp) && (args < dump_fp)) {
+        values.describe(-1, args, "*G4_args");
+      }
+    }
+
+    // mark saved_sp, if seems valid (may not be valid for some adapters)
+    intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
+    if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
+      values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
+    }
+
+    // Note: the unextended_sp may not be correct
+    tty->print_cr("  stack layout:");
+    values.print(p);
+  }
+
+  if (has_mh) {
     print_method_handle(mh);
+  }
 }
+
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
   BLOCK_COMMENT("trace_method_handle {");
   // save: Gargs, O5_savedSP
-  __ save_frame(16);
+  __ save_frame(16); // need space for saving required FPU state
+
   __ set((intptr_t) adaptername, O0);
   __ mov(G3_method_handle, O1);
   __ mov(I5_savedSP, O2);
+  __ mov(Gargs, O3);
+  __ mov(I6, O4); // frame identifier for safe stack walking
+
+  // Save scratched registers that might be needed. Robustness is more
+  // important than optimizing the saves for this debug only code.
+
+  // save FP result, valid at some call sites (adapter_opt_return_float, ...)
+  Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
+  __ stf(FloatRegisterImpl::D, Ftos_d, d_save);
+  // Safely save all globals but G2 (handled by call_VM_leaf) and G7
+  // (OS reserved).
   __ mov(G3_method_handle, L3);
   __ mov(Gargs, L4);
   __ mov(G5_method_type, L5);
-  __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
+  __ mov(G6, L6);
+  __ mov(G1, L1);
+
+  __ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
 
   __ mov(L3, G3_method_handle);
   __ mov(L4, Gargs);
   __ mov(L5, G5_method_type);
+  __ mov(L6, G6);
+  __ mov(L1, G1);
+  __ ldf(FloatRegisterImpl::D, d_save, Ftos_d);
+
   __ restore();
   BLOCK_COMMENT("} trace_method_handle");
 }
@@ -1045,7 +1158,7 @@
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
           // OP_COLLECT_ARGS is below...
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
-         |(!UseRicochetFrames ? 0 :
+         |(
            java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
            ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
            |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
@@ -1250,7 +1363,7 @@
         move_typed_arg(_masm, arg_type, false,
                        prim_value_addr,
                        Address(O0_argslot, 0),
-                       O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
+                      O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
       }
 
       if (direct_to_method) {
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,6 +145,8 @@
   }
 
   static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+
+  static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
 };
 
 // Additional helper methods for MethodHandles code generation:
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -406,7 +406,7 @@
   // search an exception handler (rax: exception oop, rdx: throwing pc)
   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
   __ should_not_reach_here();
-  assert(code_offset() - offset <= exception_handler_size, "overflow");
+  guarantee(code_offset() - offset <= exception_handler_size, "overflow");
   __ end_a_stub();
 
   return offset;
@@ -490,8 +490,7 @@
 
   __ pushptr(here.addr());
   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
-
-  assert(code_offset() - offset <= deopt_handler_size, "overflow");
+  guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
   __ end_a_stub();
 
   return offset;
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -651,13 +651,15 @@
   return &interpreter_frame_tos_address()[index];
 }
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 #define DESCRIBE_FP_OFFSET(name) \
   values.describe(frame_no, fp() + frame::name##_offset, #name)
 
 void frame::describe_pd(FrameValues& values, int frame_no) {
-  if (is_interpreted_frame()) {
+  if (is_ricochet_frame()) {
+    MethodHandles::RicochetFrame::describe(this, values, frame_no);
+  } else if (is_interpreted_frame()) {
     DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
     DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
     DESCRIBE_FP_OFFSET(interpreter_frame_method);
@@ -667,7 +669,6 @@
     DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
     DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
   }
-
 }
 #endif
 
@@ -675,3 +676,21 @@
   // used to reset the saved FP
   return fp();
 }
+
+intptr_t* frame::real_fp() const {
+  if (_cb != NULL) {
+    // use the frame size if valid
+    int size = _cb->frame_size();
+    if ((size > 0) &&
+        (! is_ricochet_frame())) {
+      // Work-around: ricochet explicitly excluded because frame size is not
+      // constant for the ricochet blob but its frame_size could not, for
+      // some reasons, be declared as <= 0. This potentially confusing
+      // size declaration should be fixed as another CR.
+      return unextended_sp() + size;
+    }
+  }
+  // else rely on fp()
+  assert(! is_compiled_frame(), "unknown compiled frame size");
+  return fp();
+}
--- a/hotspot/src/cpu/x86/vm/frame_x86.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -188,6 +188,7 @@
   frame(intptr_t* sp, intptr_t* fp);
 
   // accessors for the instance variables
+  // Note: not necessarily the real 'frame pointer' (see real_fp)
   intptr_t*   fp() const { return _fp; }
 
   inline address* sender_pc_addr() const;
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -279,14 +279,16 @@
 }
 
 // Emit code to verify that RBP is pointing at a valid ricochet frame.
-#ifdef ASSERT
+#ifndef PRODUCT
 enum {
   ARG_LIMIT = 255, SLOP = 4,
   // use this parameter for checking for garbage stack movements:
   UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
   // the slop defends against false alarms due to fencepost errors
 };
+#endif
 
+#ifdef ASSERT
 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
   // The stack should look like this:
   //    ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
@@ -990,6 +992,26 @@
   BLOCK_COMMENT("} move_return_value");
 }
 
+#ifndef PRODUCT
+#define DESCRIBE_RICOCHET_OFFSET(rf, name) \
+  values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name)
+
+void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no)  {
+    address bp = (address) fr->fp();
+    RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
+
+    // ricochet slots
+    DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp);
+    DESCRIBE_RICOCHET_OFFSET(rf, conversion);
+    DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base);
+    DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout);
+    DESCRIBE_RICOCHET_OFFSET(rf, saved_target);
+    DESCRIBE_RICOCHET_OFFSET(rf, continuation);
+
+    // relevant ricochet targets (in caller frame)
+    values.describe(-1, rf->saved_args_base(),  err_msg("*saved_args_base for #%d", frame_no));
+}
+#endif // ASSERT
 
 #ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
@@ -1001,11 +1023,12 @@
                               intptr_t* saved_bp) {
   // called as a leaf from native code: do not block the JVM!
   bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have rcx_mh
+
   intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
   intptr_t* base_sp = last_sp;
   typedef MethodHandles::RicochetFrame RicochetFrame;
   RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes());
-  if (!UseRicochetFrames || Universe::heap()->is_in((address) rfp->saved_args_base())) {
+  if (Universe::heap()->is_in((address) rfp->saved_args_base())) {
     // Probably an interpreter frame.
     base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
   }
@@ -1030,13 +1053,64 @@
     tty->cr();
     if (last_sp != saved_sp && last_sp != NULL)
       tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp);
-    int stack_dump_count = 16;
-    if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
-      stack_dump_count = (int)(saved_bp + 2 - saved_sp);
-    if (stack_dump_count > 64)  stack_dump_count = 48;
-    for (i = 0; i < stack_dump_count; i += 4) {
-      tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT,
-                    i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
+
+    {
+     // dumping last frame with frame::describe
+
+      JavaThread* p = JavaThread::active();
+
+      ResourceMark rm;
+      PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
+      FrameValues values;
+
+      // Note: We want to allow trace_method_handle from any call site.
+      // While trace_method_handle creates a frame, it may be entered
+      // without a PC on the stack top (e.g. not just after a call).
+      // Walking that frame could lead to failures due to that invalid PC.
+      // => carefully detect that frame when doing the stack walking
+
+      // Current C frame
+      frame cur_frame = os::current_frame();
+
+      // Robust search of trace_calling_frame (independant of inlining).
+      // Assumes saved_regs comes from a pusha in the trace_calling_frame.
+      assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
+      frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
+      while (trace_calling_frame.fp() < saved_regs) {
+        trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
+      }
+
+      // safely create a frame and call frame::describe
+      intptr_t *dump_sp = trace_calling_frame.sender_sp();
+      intptr_t *dump_fp = trace_calling_frame.link();
+
+      bool walkable = has_mh; // whether the traced frame shoud be walkable
+
+      if (walkable) {
+        // The previous definition of walkable may have to be refined
+        // if new call sites cause the next frame constructor to start
+        // failing. Alternatively, frame constructors could be
+        // modified to support the current or future non walkable
+        // frames (but this is more intrusive and is not considered as
+        // part of this RFE, which will instead use a simpler output).
+        frame dump_frame = frame(dump_sp, dump_fp);
+        dump_frame.describe(values, 1);
+      } else {
+        // Stack may not be walkable (invalid PC above FP):
+        // Add descriptions without building a Java frame to avoid issues
+        values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
+        values.describe(-1, dump_sp, "sp for #1");
+      }
+
+      // mark saved_sp if seems valid
+      if (has_mh) {
+        if ((saved_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
+          values.describe(-1, saved_sp, "*saved_sp");
+        }
+      }
+
+      tty->print_cr("  stack layout:");
+      values.print(p);
     }
     if (has_mh)
       print_method_handle(mh);
@@ -1066,26 +1140,49 @@
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
   BLOCK_COMMENT("trace_method_handle {");
-  __ push(rax);
-  __ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp  __ pusha();
+  __ enter();
+  __ andptr(rsp, -16); // align stack if needed for FPU state
   __ pusha();
-  __ mov(rbx, rsp);
-  __ enter();
+  __ mov(rbx, rsp); // for retreiving saved_regs
+  // Note: saved_regs must be in the entered frame for the
+  // robust stack walking implemented in trace_method_handle_stub.
+
+  // save FP result, valid at some call sites (adapter_opt_return_float, ...)
+  __ increment(rsp, -2 * wordSize);
+  if  (UseSSE >= 2) {
+    __ movdbl(Address(rsp, 0), xmm0);
+  } else if (UseSSE == 1) {
+    __ movflt(Address(rsp, 0), xmm0);
+  } else {
+    __ fst_d(Address(rsp, 0));
+  }
+
   // incoming state:
   // rcx: method handle
   // r13 or rsi: saved sp
   // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
+  // Note: fix the increment below if pushing more arguments
   __ push(rbp);               // saved_bp
-  __ push(rsi);               // saved_sp
-  __ push(rax);               // entry_sp
+  __ push(saved_last_sp_register()); // saved_sp
+  __ push(rbp);               // entry_sp (with extra align space)
   __ push(rbx);               // pusha saved_regs
   __ push(rcx);               // mh
-  __ push(rcx);               // adaptername
+  __ push(rcx);               // slot for adaptername
   __ movptr(Address(rsp, 0), (intptr_t) adaptername);
   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
-  __ leave();
+  __ increment(rsp, 6 * wordSize); // MethodHandleStubArguments
+
+  if  (UseSSE >= 2) {
+    __ movdbl(xmm0, Address(rsp, 0));
+  } else if (UseSSE == 1) {
+    __ movflt(xmm0, Address(rsp, 0));
+  } else {
+    __ fld_d(Address(rsp, 0));
+  }
+  __ increment(rsp, 2 * wordSize);
+
   __ popa();
-  __ pop(rax);
+  __ leave();
   BLOCK_COMMENT("} trace_method_handle");
 }
 #endif //PRODUCT
@@ -1104,7 +1201,7 @@
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
           //OP_COLLECT_ARGS is below...
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
-         |(!UseRicochetFrames ? 0 :
+         |(
            java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
            ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
            |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -224,6 +224,8 @@
   }
 
   static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+
+  static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
 };
 
 // Additional helper methods for MethodHandles code generation:
--- a/hotspot/src/cpu/zero/vm/frame_zero.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/frame_zero.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -418,7 +418,7 @@
   }
 }
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 void frame::describe_pd(FrameValues& values, int frame_no) {
 
--- a/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -72,6 +72,10 @@
   return fp() + 1;
 }
 
+inline intptr_t* frame::real_fp() const {
+  return fp();
+}
+
 inline intptr_t* frame::link() const {
   ShouldNotCallThis();
 }
--- a/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -29,43 +29,3 @@
   adapter_code_size = 0
 };
 
-#define TARGET_ARCH_NYI_6939861 1
-// ..#ifdef TARGET_ARCH_NYI_6939861
-// ..  // Here are some backward compatible declarations until the 6939861 ports are updated.
-// ..  #define _adapter_flyby    (_EK_LIMIT + 10)
-// ..  #define _adapter_ricochet (_EK_LIMIT + 11)
-// ..  #define _adapter_opt_spread_1    _adapter_opt_spread_1_ref
-// ..  #define _adapter_opt_spread_more _adapter_opt_spread_ref
-// ..  enum {
-// ..    _INSERT_NO_MASK   = -1,
-// ..    _INSERT_REF_MASK  = 0,
-// ..    _INSERT_INT_MASK  = 1,
-// ..    _INSERT_LONG_MASK = 3
-// ..  };
-// ..  static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
-// ..    arg_type = ek_bound_mh_arg_type(ek);
-// ..    arg_mask = 0;
-// ..    arg_slots = type2size[arg_type];;
-// ..  }
-// ..  static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
-// ..    int swap_slots = ek_adapter_opt_swap_slots(ek);
-// ..    rotate = ek_adapter_opt_swap_mode(ek);
-// ..    swap_bytes = swap_slots * Interpreter::stackElementSize;
-// ..  }
-// ..  static int get_ek_adapter_opt_spread_info(EntryKind ek) {
-// ..    return ek_adapter_opt_spread_count(ek);
-// ..  }
-// ..
-// ..  static void insert_arg_slots(MacroAssembler* _masm,
-// ..                               RegisterOrConstant arg_slots,
-// ..                               int arg_mask,
-// ..                               Register argslot_reg,
-// ..                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
-// ..
-// ..  static void remove_arg_slots(MacroAssembler* _masm,
-// ..                               RegisterOrConstant arg_slots,
-// ..                               Register argslot_reg,
-// ..                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
-// ..
-// ..  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
-// ..#endif //TARGET_ARCH_NYI_6939861
--- a/hotspot/src/os/bsd/vm/decoder_bsd.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "prims/jvm.h"
-#include "utilities/decoder.hpp"
-
-#include <cxxabi.h>
-
-#ifdef __APPLE__
-
-void Decoder::initialize() {
-  _initialized = true;
-}
-
-void Decoder::uninitialize() {
-  _initialized = false;
-}
-
-bool Decoder::can_decode_C_frame_in_vm() {
-  return false;
-}
-
-Decoder::decoder_status Decoder::decode(address addr, const char* filepath, char *buf, int buflen, int *offset) {
-  return symbol_not_found;
-}
-
-
-#endif
-
-bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
-  int   status;
-  char* result;
-  size_t size = (size_t)buflen;
-
-  // Don't pass buf to __cxa_demangle. In case of the 'buf' is too small,
-  // __cxa_demangle will call system "realloc" for additional memory, which
-  // may use different malloc/realloc mechanism that allocates 'buf'.
-  if ((result = abi::__cxa_demangle(symbol, NULL, NULL, &status)) != NULL) {
-    jio_snprintf(buf, buflen, "%s", result);
-      // call c library's free
-      ::free(result);
-      return true;
-  }
-  return false;
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os/bsd/vm/decoder_machO.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#ifdef __APPLE__
+#include "decoder_machO.hpp"
+#endif
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os/bsd/vm/decoder_machO.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_BSD_VM_DECODER_MACHO_HPP
+#define OS_BSD_VM_DECODER_MACHO_HPP
+
+#ifdef __APPLE__
+
+#include "utilities/decoder.hpp"
+
+// Just a placehold for now
+class MachODecoder: public NullDecoder {
+public:
+  MachODecoder() { }
+  ~MachODecoder() { }
+};
+
+#endif
+
+#endif // OS_BSD_VM_DECODER_MACHO_HPP
+
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -301,6 +301,12 @@
 #error Add appropriate cpu_arch setting
 #endif
 
+// Compiler variant
+#ifdef COMPILER2
+#define COMPILER_VARIANT "server"
+#else
+#define COMPILER_VARIANT "client"
+#endif
 
 #ifndef _ALLBSD_SOURCE
 // pid_t gettid()
@@ -1920,7 +1926,7 @@
     return true;
   } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
     if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-       dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
+       buf, buflen, offset, dlinfo.dli_fname)) {
        return true;
     }
   }
@@ -2507,7 +2513,7 @@
 
 static char saved_jvm_path[MAXPATHLEN] = {0};
 
-// Find the full path to the current module, libjvm.so or libjvm_g.so
+// Find the full path to the current module, libjvm or libjvm_g
 void os::jvm_path(char *buf, jint buflen) {
   // Error checking.
   if (buflen < MAXPATHLEN) {
@@ -2532,11 +2538,11 @@
 
   if (Arguments::created_by_gamma_launcher()) {
     // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
+    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm".  If "/jre/lib/" appears at
     // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
-    // up the path so it looks like libjvm.so is installed there (append a
-    // fake suffix hotspot/libjvm.so).
+    // we're done.  Otherwise, check for a JAVA_HOME environment variable and
+    // construct a path to the JVM being overridden.
+
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
       for (--p; p > buf && *p != '/'; --p)
@@ -2550,7 +2556,7 @@
         char* jrelib_p;
         int len;
 
-        // Check the current module name "libjvm.so" or "libjvm_g.so".
+        // Check the current module name "libjvm" or "libjvm_g".
         p = strrchr(buf, '/');
         assert(strstr(p, "/libjvm") == p, "invalid library name");
         p = strstr(p, "_g") ? "_g" : "";
@@ -2563,19 +2569,32 @@
         // modules image doesn't have "jre" subdirectory
         len = strlen(buf);
         jrelib_p = buf + len;
-        snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
+
+        // Add the appropriate library subdir
+        snprintf(jrelib_p, buflen-len, "/jre/lib");
         if (0 != access(buf, F_OK)) {
-          snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
+          snprintf(jrelib_p, buflen-len, "/lib");
         }
 
+        // Add the appropriate client or server subdir
+        len = strlen(buf);
+        jrelib_p = buf + len;
+        snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
+        if (0 != access(buf, F_OK)) {
+          snprintf(jrelib_p, buflen-len, "");
+        }
+
+        // If the path exists within JAVA_HOME, add the JVM library name
+        // to complete the path to JVM being overridden.  Otherwise fallback
+        // to the path to the current library.
         if (0 == access(buf, F_OK)) {
-          // Use current module name "libjvm[_g].so" instead of
-          // "libjvm"debug_only("_g")".so" since for fastdebug version
-          // we should have "libjvm.so" but debug_only("_g") adds "_g"!
+          // Use current module name "libjvm[_g]" instead of
+          // "libjvm"debug_only("_g")"" since for fastdebug version
+          // we should have "libjvm" but debug_only("_g") adds "_g"!
           len = strlen(buf);
-          snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
+          snprintf(buf + len, buflen-len, "/libjvm%s%s", p, JNI_LIB_SUFFIX);
         } else {
-          // Go back to path of .so
+          // Fall back to path of current library
           rp = realpath(dli_fname, buf);
           if (rp == NULL)
             return;
@@ -3570,26 +3589,28 @@
 // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
 
 #if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   19,              // 0 Entry should never be used
 
    0,              // 1 MinPriority
    3,              // 2
    6,              // 3
 
-   10,              // 4
-   15,              // 5 NormPriority
-   18,              // 6
-
-   21,              // 7
-   25,              // 8
-   28,              // 9 NearMaxPriority
-
-   31              // 10 MaxPriority
+  10,              // 4
+  15,              // 5 NormPriority
+  18,              // 6
+
+  21,              // 7
+  25,              // 8
+  28,              // 9 NearMaxPriority
+
+  31,              // 10 MaxPriority
+
+  31               // 11 CriticalPriority
 };
 #elif defined(__APPLE__)
 /* Using Mach high-level priority assignments */
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
    0,              // 0 Entry should never be used (MINPRI_USER)
 
   27,              // 1 MinPriority
@@ -3604,10 +3625,12 @@
   34,              // 8
   35,              // 9 NearMaxPriority
 
-  36               // 10 MaxPriority
+  36,              // 10 MaxPriority
+
+  36               // 11 CriticalPriority
 };
 #else
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   19,              // 0 Entry should never be used
 
    4,              // 1 MinPriority
@@ -3622,7 +3645,9 @@
   -3,              // 8
   -4,              // 9 NearMaxPriority
 
-  -5               // 10 MaxPriority
+  -5,              // 10 MaxPriority
+
+  -5               // 11 CriticalPriority
 };
 #endif
 
@@ -3638,6 +3663,9 @@
       ThreadPriorityPolicy = 0;
     }
   }
+  if (UseCriticalJavaThreadPriority) {
+    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
+  }
   return 0;
 }
 
--- a/hotspot/src/os/linux/vm/decoder_linux.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/linux/vm/decoder_linux.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -23,11 +23,11 @@
  */
 
 #include "prims/jvm.h"
-#include "utilities/decoder.hpp"
+#include "utilities/decoder_elf.hpp"
 
 #include <cxxabi.h>
 
-bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
+bool ElfDecoder::demangle(const char* symbol, char *buf, int buflen) {
   int   status;
   char* result;
   size_t size = (size_t)buflen;
@@ -43,3 +43,4 @@
   }
   return false;
 }
+
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1732,7 +1732,7 @@
     return true;
   } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
     if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-       dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
+        buf, buflen, offset, dlinfo.dli_fname)) {
        return true;
     }
   }
@@ -3383,7 +3383,7 @@
 // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
 // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
 
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   19,              // 0 Entry should never be used
 
    4,              // 1 MinPriority
@@ -3398,7 +3398,9 @@
   -3,              // 8
   -4,              // 9 NearMaxPriority
 
-  -5               // 10 MaxPriority
+  -5,              // 10 MaxPriority
+
+  -5               // 11 CriticalPriority
 };
 
 static int prio_init() {
@@ -3413,6 +3415,9 @@
       ThreadPriorityPolicy = 0;
     }
   }
+  if (UseCriticalJavaThreadPriority) {
+    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
+  }
   return 0;
 }
 
--- a/hotspot/src/os/posix/launcher/java_md.c	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/posix/launcher/java_md.c	Sat Jan 28 20:41:27 2012 -0800
@@ -701,6 +701,14 @@
     char libjava[MAXPATHLEN];
 
     if (GetApplicationHome(path, pathsize)) {
+
+        /* Is the JRE universal, i.e. no arch dir? */
+        sprintf(libjava, "%s/jre/lib/" JAVA_DLL, path);
+        if (access(libjava, F_OK) == 0) {
+            strcat(path, "/jre");
+            goto found;
+        }
+
         /* Is JRE co-located with the application? */
         sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch);
         if (access(libjava, F_OK) == 0) {
@@ -734,7 +742,7 @@
     ifn->GetDefaultJavaVMInitArgs = JNI_GetDefaultJavaVMInitArgs;
     return JNI_TRUE;
 #else
-   Dl_info dlinfo;
+    Dl_info dlinfo;
     void *libjvm;
 
     if (_launcher_debug) {
--- a/hotspot/src/os/solaris/vm/decoder_solaris.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/solaris/vm/decoder_solaris.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -22,10 +22,11 @@
  *
  */
 
-#include "utilities/decoder.hpp"
+#include "utilities/decoder_elf.hpp"
 
 #include <demangle.h>
 
-bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
+bool ElfDecoder::demangle(const char* symbol, char *buf, int buflen) {
   return !cplus_demangle(symbol, buf, (size_t)buflen);
 }
+
--- a/hotspot/src/os/solaris/vm/osThread_solaris.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/solaris/vm/osThread_solaris.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,17 +28,17 @@
 // This is embedded via include into the class OSThread
 
  private:
-
-  thread_t _thread_id;      // Solaris thread id
-  unsigned int  _lwp_id;    // lwp ID, only used with bound threads
-  sigset_t _caller_sigmask; // Caller's signal mask
-  bool _vm_created_thread;  // true if the VM create this thread
-                            // false if primary thread or attached thread
+  thread_t _thread_id;         // Solaris thread id
+  uint     _lwp_id;            // lwp ID, only used with bound threads
+  int      _native_priority;   // Saved native priority when starting
+                               // a bound thread
+  sigset_t _caller_sigmask;    // Caller's signal mask
+  bool     _vm_created_thread; // true if the VM created this thread,
+                               // false if primary thread or attached thread
  public:
-
-  thread_t thread_id() const      { return _thread_id; }
-
-  unsigned int lwp_id() const     { return _lwp_id; }
+  thread_t thread_id() const       { return _thread_id; }
+  uint     lwp_id() const          { return _lwp_id; }
+  int      native_priority() const { return _native_priority; }
 
   // Set and get state of _vm_created_thread flag
   void set_vm_created()           { _vm_created_thread = true; }
@@ -62,8 +62,9 @@
     return true;
   }
 #endif
-  void set_thread_id(thread_t id) { _thread_id = id;   }
-  void set_lwp_id(unsigned int id){ _lwp_id = id;   }
+  void set_thread_id(thread_t id)    { _thread_id = id; }
+  void set_lwp_id(uint id)           { _lwp_id = id; }
+  void set_native_priority(int prio) { _native_priority = prio; }
 
  // ***************************************************************
  // interrupt support.  interrupts (using signals) are used to get
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,7 @@
 # include <sys/rtpriocntl.h>
 # include <sys/tspriocntl.h>
 # include <sys/iapriocntl.h>
+# include <sys/fxpriocntl.h>
 # include <sys/loadavg.h>
 # include <string.h>
 # include <stdio.h>
@@ -129,8 +130,8 @@
 #ifdef _GNU_SOURCE
 // See bug #6514594
 extern "C" int madvise(caddr_t, size_t, int);
-extern "C"  int memcntl(caddr_t addr, size_t len, int cmd, caddr_t  arg,
-     int attr, int mask);
+extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
+                       int attr, int mask);
 #endif //_GNU_SOURCE
 
 /*
@@ -215,8 +216,9 @@
 #define MaximumPriority 127
 
 // Values for ThreadPriorityPolicy == 1
-int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
-                                        80, 96, 112, 124, 127 };
+int prio_policy1[CriticalPriority+1] = {
+  -99999,  0, 16,  32,  48,  64,
+          80, 96, 112, 124, 127, 127 };
 
 // System parameters used internally
 static clock_t clock_tics_per_sec = 100;
@@ -1048,15 +1050,22 @@
   }
 
   // If the creator called set priority before we started,
-  // we need to call set priority now that we have an lwp.
-  // Get the priority from libthread and set the priority
-  // for the new Solaris lwp.
+  // we need to call set_native_priority now that we have an lwp.
+  // We used to get the priority from thr_getprio (we called
+  // thr_setprio way back in create_thread) and pass it to
+  // set_native_priority, but Solaris scales the priority
+  // in java_to_os_priority, so when we read it back here,
+  // we pass trash to set_native_priority instead of what's
+  // in java_to_os_priority. So we save the native priority
+  // in the osThread and recall it here.
+
   if ( osthr->thread_id() != -1 ) {
     if ( UseThreadPriorities ) {
-      thr_getprio(osthr->thread_id(), &prio);
+      int prio = osthr->native_priority();
       if (ThreadPriorityVerbose) {
-        tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
-                      osthr->thread_id(), osthr->lwp_id(), prio );
+        tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
+                      INTPTR_FORMAT ", setting priority: %d\n",
+                      osthr->thread_id(), osthr->lwp_id(), prio);
       }
       os::set_native_priority(thread, prio);
     }
@@ -1353,13 +1362,12 @@
   // Remember that we created this thread so we can set priority on it
   osthread->set_vm_created();
 
-  // Set the default thread priority otherwise use NormalPriority
-
-  if ( UseThreadPriorities ) {
-     thr_setprio(tid, (DefaultThreadPriority == -1) ?
+  // Set the default thread priority.  If using bound threads, setting
+  // lwp priority will be delayed until thread start.
+  set_native_priority(thread,
+                      DefaultThreadPriority == -1 ?
                         java_to_os_priority[NormPriority] :
                         DefaultThreadPriority);
-  }
 
   // Initial thread state is INITIALIZED, not SUSPENDED
   osthread->set_state(INITIALIZED);
@@ -1997,7 +2005,7 @@
       }
       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-          dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
+           buf, buflen, offset, dlinfo.dli_fname)) {
           return true;
         }
       }
@@ -2015,7 +2023,7 @@
         return true;
       } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
-          dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
+          buf, buflen, offset, dlinfo.dli_fname)) {
           return true;
         }
       }
@@ -3728,7 +3736,7 @@
 } SchedInfo;
 
 
-static SchedInfo tsLimits, iaLimits, rtLimits;
+static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
 
 #ifdef ASSERT
 static int  ReadBackValidate = 1;
@@ -3739,6 +3747,8 @@
 static int  myCur       = 0;
 static bool priocntl_enable = false;
 
+static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
+static int java_MaxPriority_to_os_priority = 0; // Saved mapping
 
 // Call the version of priocntl suitable for all supported versions
 // of Solaris. We need to call through this wrapper so that we can
@@ -3783,19 +3793,27 @@
   if (os::Solaris::T2_libthread() || UseBoundThreads) {
     // If ThreadPriorityPolicy is 1, switch tables
     if (ThreadPriorityPolicy == 1) {
-      for (i = 0 ; i < MaxPriority+1; i++)
+      for (i = 0 ; i < CriticalPriority+1; i++)
         os::java_to_os_priority[i] = prio_policy1[i];
     }
+    if (UseCriticalJavaThreadPriority) {
+      // MaxPriority always maps to the FX scheduling class and criticalPrio.
+      // See set_native_priority() and set_lwp_class_and_priority().
+      // Save original MaxPriority mapping in case attempt to
+      // use critical priority fails.
+      java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
+      // Set negative to distinguish from other priorities
+      os::java_to_os_priority[MaxPriority] = -criticalPrio;
+    }
   }
   // Not using Bound Threads, set to ThreadPolicy 1
   else {
-    for ( i = 0 ; i < MaxPriority+1; i++ ) {
+    for ( i = 0 ; i < CriticalPriority+1; i++ ) {
       os::java_to_os_priority[i] = prio_policy1[i];
     }
     return 0;
   }
 
-
   // Get IDs for a set of well-known scheduling classes.
   // TODO-FIXME: GETCLINFO returns the current # of classes in the
   // the system.  We should have a loop that iterates over the
@@ -3828,24 +3846,33 @@
   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
   rtLimits.minPrio = 0;
 
+  strcpy(ClassInfo.pc_clname, "FX");
+  ClassInfo.pc_cid = -1;
+  rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
+  if (rslt < 0) return errno;
+  assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
+  fxLimits.schedPolicy = ClassInfo.pc_cid;
+  fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
+  fxLimits.minPrio = 0;
 
   // Query our "current" scheduling class.
-  // This will normally be IA,TS or, rarely, RT.
-  memset (&ParmInfo, 0, sizeof(ParmInfo));
+  // This will normally be IA, TS or, rarely, FX or RT.
+  memset(&ParmInfo, 0, sizeof(ParmInfo));
   ParmInfo.pc_cid = PC_CLNULL;
-  rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
-  if ( rslt < 0 ) return errno;
+  rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
+  if (rslt < 0) return errno;
   myClass = ParmInfo.pc_cid;
 
   // We now know our scheduling classId, get specific information
-  // the class.
+  // about the class.
   ClassInfo.pc_cid = myClass;
   ClassInfo.pc_clname[0] = 0;
-  rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
-  if ( rslt < 0 ) return errno;
-
-  if (ThreadPriorityVerbose)
-    tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
+  rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
+  if (rslt < 0) return errno;
+
+  if (ThreadPriorityVerbose) {
+    tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
+  }
 
   memset(&ParmInfo, 0, sizeof(pcparms_t));
   ParmInfo.pc_cid = PC_CLNULL;
@@ -3865,6 +3892,11 @@
     myMin = tsLimits.minPrio;
     myMax = tsLimits.maxPrio;
     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
+  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
+    fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
+    myMin = fxLimits.minPrio;
+    myMax = fxLimits.maxPrio;
+    myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
   } else {
     // No clue - punt
     if (ThreadPriorityVerbose)
@@ -3872,8 +3904,9 @@
     return EINVAL;      // no clue, punt
   }
 
-  if (ThreadPriorityVerbose)
-        tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
+  if (ThreadPriorityVerbose) {
+    tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
+  }
 
   priocntl_enable = true;  // Enable changing priorities
   return 0;
@@ -3882,6 +3915,7 @@
 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
+#define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
 
 
 // scale_to_lwp_priority
@@ -3900,13 +3934,13 @@
 }
 
 
-// set_lwp_priority
+// set_lwp_class_and_priority
 //
-// Set the priority of the lwp.  This call should only be made
-// when using bound threads (T2 threads are bound by default).
+// Set the class and priority of the lwp.  This call should only
+// be made when using bound threads (T2 threads are bound by default).
 //
-int     set_lwp_priority (int ThreadID, int lwpid, int newPrio )
-{
+int set_lwp_class_and_priority(int ThreadID, int lwpid,
+                               int newPrio, int new_class, bool scale) {
   int rslt;
   int Actual, Expected, prv;
   pcparms_t ParmInfo;                   // for GET-SET
@@ -3927,19 +3961,20 @@
     return EINVAL;
   }
 
-
   // If lwp hasn't started yet, just return
   // the _start routine will call us again.
   if ( lwpid <= 0 ) {
     if (ThreadPriorityVerbose) {
-      tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
+      tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
+                     INTPTR_FORMAT " to %d, lwpid not set",
                      ThreadID, newPrio);
     }
     return 0;
   }
 
   if (ThreadPriorityVerbose) {
-    tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
+    tty->print_cr ("set_lwp_class_and_priority("
+                   INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
                    ThreadID, lwpid, newPrio);
   }
 
@@ -3948,40 +3983,70 @@
   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
   if (rslt < 0) return errno;
 
-  if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
+  int cur_class = ParmInfo.pc_cid;
+  ParmInfo.pc_cid = (id_t)new_class;
+
+  if (new_class == rtLimits.schedPolicy) {
     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
-    rtInfo->rt_pri     = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
+    rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
+                                                       rtLimits.maxPrio, newPrio)
+                               : newPrio;
     rtInfo->rt_tqsecs  = RT_NOCHANGE;
     rtInfo->rt_tqnsecs = RT_NOCHANGE;
     if (ThreadPriorityVerbose) {
       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
     }
-  } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
-    iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
-    int maxClamped     = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
-    iaInfo->ia_upri    = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
-    iaInfo->ia_uprilim = IA_NOCHANGE;
+  } else if (new_class == iaLimits.schedPolicy) {
+    iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
+    int maxClamped     = MIN2(iaLimits.maxPrio,
+                              cur_class == new_class
+                                ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
+    iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
+                                                       maxClamped, newPrio)
+                               : newPrio;
+    iaInfo->ia_uprilim = cur_class == new_class
+                           ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
     iaInfo->ia_mode    = IA_NOCHANGE;
+    iaInfo->ia_nice    = cur_class == new_class ? IA_NOCHANGE : NZERO;
     if (ThreadPriorityVerbose) {
-      tty->print_cr ("IA: [%d...%d] %d->%d\n",
-               iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
+      tty->print_cr("IA: [%d...%d] %d->%d\n",
+                    iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
     }
-  } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
-    tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
-    int maxClamped     = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
-    prv                = tsInfo->ts_upri;
-    tsInfo->ts_upri    = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
-    tsInfo->ts_uprilim = IA_NOCHANGE;
+  } else if (new_class == tsLimits.schedPolicy) {
+    tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
+    int maxClamped     = MIN2(tsLimits.maxPrio,
+                              cur_class == new_class
+                                ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
+    tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
+                                                       maxClamped, newPrio)
+                               : newPrio;
+    tsInfo->ts_uprilim = cur_class == new_class
+                           ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
     if (ThreadPriorityVerbose) {
-      tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
-               prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
+      tty->print_cr("TS: [%d...%d] %d->%d\n",
+                    tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
     }
-    if (prv == tsInfo->ts_upri) return 0;
+  } else if (new_class == fxLimits.schedPolicy) {
+    fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
+    int maxClamped     = MIN2(fxLimits.maxPrio,
+                              cur_class == new_class
+                                ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
+    fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
+                                                       maxClamped, newPrio)
+                               : newPrio;
+    fxInfo->fx_uprilim = cur_class == new_class
+                           ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
+    fxInfo->fx_tqsecs  = FX_NOCHANGE;
+    fxInfo->fx_tqnsecs = FX_NOCHANGE;
+    if (ThreadPriorityVerbose) {
+      tty->print_cr("FX: [%d...%d] %d->%d\n",
+                    fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
+    }
   } else {
-    if ( ThreadPriorityVerbose ) {
-      tty->print_cr ("Unknown scheduling class\n");
+    if (ThreadPriorityVerbose) {
+      tty->print_cr("Unknown new scheduling class %d\n", new_class);
     }
-      return EINVAL;    // no clue, punt
+    return EINVAL;    // no clue, punt
   }
 
   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
@@ -4016,16 +4081,20 @@
   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
     Actual   = TSPRI(ReadBack)->ts_upri;
     Expected = TSPRI(ParmInfo)->ts_upri;
+  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
+    Actual   = FXPRI(ReadBack)->fx_upri;
+    Expected = FXPRI(ParmInfo)->fx_upri;
   } else {
-    if ( ThreadPriorityVerbose ) {
-      tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
+    if (ThreadPriorityVerbose) {
+      tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
+                    ParmInfo.pc_cid);
     }
   }
 
   if (Actual != Expected) {
-    if ( ThreadPriorityVerbose ) {
-      tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
-             lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
+    if (ThreadPriorityVerbose) {
+      tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
+                     lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
     }
   }
 #endif
@@ -4033,8 +4102,6 @@
   return 0;
 }
 
-
-
 // Solaris only gives access to 128 real priorities at a time,
 // so we expand Java's ten to fill this range.  This would be better
 // if we dynamically adjusted relative priorities.
@@ -4055,8 +4122,7 @@
 // which do not explicitly alter their thread priorities.
 //
 
-
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   -99999,         // 0 Entry should never be used
 
   0,              // 1 MinPriority
@@ -4071,17 +4137,51 @@
   127,            // 8
   127,            // 9 NearMaxPriority
 
-  127             // 10 MaxPriority
+  127,            // 10 MaxPriority
+
+  -criticalPrio   // 11 CriticalPriority
 };
 
-
 OSReturn os::set_native_priority(Thread* thread, int newpri) {
+  OSThread* osthread = thread->osthread();
+
+  // Save requested priority in case the thread hasn't been started
+  osthread->set_native_priority(newpri);
+
+  // Check for critical priority request
+  bool fxcritical = false;
+  if (newpri == -criticalPrio) {
+    fxcritical = true;
+    newpri = criticalPrio;
+  }
+
   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
-  if ( !UseThreadPriorities ) return OS_OK;
-  int status = thr_setprio(thread->osthread()->thread_id(), newpri);
-  if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
-    status |= (set_lwp_priority (thread->osthread()->thread_id(),
-                    thread->osthread()->lwp_id(), newpri ));
+  if (!UseThreadPriorities) return OS_OK;
+
+  int status = 0;
+
+  if (!fxcritical) {
+    // Use thr_setprio only if we have a priority that thr_setprio understands
+    status = thr_setprio(thread->osthread()->thread_id(), newpri);
+  }
+
+  if (os::Solaris::T2_libthread() ||
+      (UseBoundThreads && osthread->is_vm_created())) {
+    int lwp_status =
+      set_lwp_class_and_priority(osthread->thread_id(),
+                                 osthread->lwp_id(),
+                                 newpri,
+                                 fxcritical ? fxLimits.schedPolicy : myClass,
+                                 !fxcritical);
+    if (lwp_status != 0 && fxcritical) {
+      // Try again, this time without changing the scheduling class
+      newpri = java_MaxPriority_to_os_priority;
+      lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
+                                              osthread->lwp_id(),
+                                              newpri, myClass, false);
+    }
+    status |= lwp_status;
+  }
   return (status == 0) ? OS_OK : OS_ERR;
 }
 
--- a/hotspot/src/os/windows/vm/decoder_windows.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/windows/vm/decoder_windows.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,22 +24,24 @@
 
 #include "precompiled.hpp"
 #include "prims/jvm.h"
-#include "runtime/os.hpp"
-#include "utilities/decoder.hpp"
+#include "decoder_windows.hpp"
+
+WindowsDecoder::WindowsDecoder() {
+  _dbghelp_handle = NULL;
+  _can_decode_in_vm = false;
+  _pfnSymGetSymFromAddr64 = NULL;
+  _pfnUndecorateSymbolName = NULL;
 
-HMODULE                   Decoder::_dbghelp_handle = NULL;
-bool                      Decoder::_can_decode_in_vm = false;
-pfn_SymGetSymFromAddr64   Decoder::_pfnSymGetSymFromAddr64 = NULL;
-pfn_UndecorateSymbolName  Decoder::_pfnUndecorateSymbolName = NULL;
+  _decoder_status = no_error;
+  initialize();
+}
 
-void Decoder::initialize() {
-  if (!_initialized) {
-    _initialized = true;
-
-    HINSTANCE handle = os::win32::load_Windows_dll("dbghelp.dll", NULL, 0);
+void WindowsDecoder::initialize() {
+  if (!has_error() && _dbghelp_handle == NULL) {
+    HMODULE handle = ::LoadLibrary("dbghelp.dll");
     if (!handle) {
       _decoder_status = helper_not_found;
-        return;
+      return;
     }
 
     _dbghelp_handle = handle;
@@ -70,32 +72,29 @@
 
      // find out if jvm.dll contains private symbols, by decoding
      // current function and comparing the result
-     address addr = (address)Decoder::initialize;
+     address addr = (address)Decoder::decode;
      char buf[MAX_PATH];
-     if (decode(addr, buf, sizeof(buf), NULL) == no_error) {
-       _can_decode_in_vm = !strcmp(buf, "Decoder::initialize");
+     if (decode(addr, buf, sizeof(buf), NULL)) {
+       _can_decode_in_vm = !strcmp(buf, "Decoder::decode");
      }
   }
 }
 
-void Decoder::uninitialize() {
-  assert(_initialized, "Decoder not yet initialized");
+void WindowsDecoder::uninitialize() {
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
   if (_dbghelp_handle != NULL) {
     ::FreeLibrary(_dbghelp_handle);
   }
-  _initialized = false;
+  _dbghelp_handle = NULL;
 }
 
-bool Decoder::can_decode_C_frame_in_vm() {
-  initialize();
-  return  _can_decode_in_vm;
+bool WindowsDecoder::can_decode_C_frame_in_vm() const {
+  return  (!has_error() && _can_decode_in_vm);
 }
 
 
-Decoder::decoder_status Decoder::decode(address addr, char *buf, int buflen, int *offset) {
-  assert(_initialized, "Decoder not yet initialized");
+bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath)  {
   if (_pfnSymGetSymFromAddr64 != NULL) {
     PIMAGEHLP_SYMBOL64 pSymbol;
     char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
@@ -105,19 +104,20 @@
     DWORD64 displacement;
     if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
       if (buf != NULL) {
-        if (!demangle(pSymbol->Name, buf, buflen)) {
+        if (demangle(pSymbol->Name, buf, buflen)) {
           jio_snprintf(buf, buflen, "%s", pSymbol->Name);
         }
       }
-      if (offset != NULL) *offset = (int)displacement;
-      return no_error;
+      if(offset != NULL) *offset = (int)displacement;
+      return true;
     }
   }
-  return helper_not_found;
+  if (buf != NULL && buflen > 0) buf[0] = '\0';
+  if (offset != NULL) *offset = -1;
+  return false;
 }
 
-bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
-  assert(_initialized, "Decoder not yet initialized");
+bool WindowsDecoder::demangle(const char* symbol, char *buf, int buflen) {
   return _pfnUndecorateSymbolName != NULL &&
          _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os/windows/vm/decoder_windows.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_WINDOWS_VM_DECODER_WINDOWS_HPP
+#define OS_WINDOWS_VM_DECIDER_WINDOWS_HPP
+
+#include <windows.h>
+#include <imagehlp.h>
+
+#include "utilities/decoder.hpp"
+
+// functions needed for decoding symbols
+typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
+typedef BOOL  (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
+typedef BOOL  (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
+typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
+
+class WindowsDecoder: public NullDecoder {
+
+public:
+  WindowsDecoder();
+  ~WindowsDecoder() { uninitialize(); };
+
+  bool can_decode_C_frame_in_vm() const;
+  bool demangle(const char* symbol, char *buf, int buflen);
+  bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath = NULL);
+
+private:
+  void initialize();
+  void uninitialize();
+
+private:
+  HMODULE                   _dbghelp_handle;
+  bool                      _can_decode_in_vm;
+  pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
+  pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
+};
+
+#endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
+
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1391,7 +1391,7 @@
 
 bool os::dll_address_to_function_name(address addr, char *buf,
                                       int buflen, int *offset) {
-  if (Decoder::decode(addr, buf, buflen, offset) == Decoder::no_error) {
+  if (Decoder::decode(addr, buf, buflen, offset)) {
     return true;
   }
   if (offset != NULL)  *offset  = -1;
@@ -3296,7 +3296,7 @@
 // so we compress Java's ten down to seven.  It would be better
 // if we dynamically adjusted relative priorities.
 
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
   THREAD_PRIORITY_LOWEST,                       // 2
@@ -3307,10 +3307,11 @@
   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
-  THREAD_PRIORITY_HIGHEST                       // 10 MaxPriority
+  THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
+  THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
 };
 
-int prio_policy1[MaxPriority + 1] = {
+int prio_policy1[CriticalPriority + 1] = {
   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
   THREAD_PRIORITY_LOWEST,                       // 2
@@ -3321,17 +3322,21 @@
   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
   THREAD_PRIORITY_HIGHEST,                      // 8
   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
-  THREAD_PRIORITY_TIME_CRITICAL                 // 10 MaxPriority
+  THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
+  THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
 };
 
 static int prio_init() {
   // If ThreadPriorityPolicy is 1, switch tables
   if (ThreadPriorityPolicy == 1) {
     int i;
-    for (i = 0; i < MaxPriority + 1; i++) {
+    for (i = 0; i < CriticalPriority + 1; i++) {
       os::java_to_os_priority[i] = prio_policy1[i];
     }
   }
+  if (UseCriticalJavaThreadPriority) {
+    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
+  }
   return 0;
 }
 
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1354,9 +1354,10 @@
   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
 
  public:
-  LIR_OpBranch(LIR_Condition cond, Label* lbl)
+  LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
     , _cond(cond)
+    , _type(type)
     , _label(lbl)
     , _block(NULL)
     , _ublock(NULL)
@@ -2053,7 +2054,7 @@
   void jump(CodeStub* stub) {
     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
   }
-  void branch(LIR_Condition cond, Label* lbl)        { append(new LIR_OpBranch(cond, lbl)); }
+  void branch(LIR_Condition cond, BasicType type, Label* lbl)        { append(new LIR_OpBranch(cond, type, lbl)); }
   void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
     append(new LIR_OpBranch(cond, type, block));
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -2350,7 +2350,7 @@
     } else {
       LabelObj* L = new LabelObj();
       __ cmp(lir_cond_less, value, low_key);
-      __ branch(lir_cond_less, L->label());
+      __ branch(lir_cond_less, T_INT, L->label());
       __ cmp(lir_cond_lessEqual, value, high_key);
       __ branch(lir_cond_lessEqual, T_INT, dest);
       __ branch_destination(L->label());
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -413,8 +413,9 @@
     }
     bci = branch_bci + offset;
   }
-
+  assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
   osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
+  assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
   return osr_nm;
 }
 
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1347,7 +1347,13 @@
     return _backtrace();
   }
 
-  inline void push(methodOop method, short bci, TRAPS) {
+  inline void push(methodOop method, int bci, TRAPS) {
+    // Smear the -1 bci to 0 since the array only holds unsigned
+    // shorts.  The later line number lookup would just smear the -1
+    // to a 0 even if it could be recorded.
+    if (bci == SynchronizationEntryBCI) bci = 0;
+    assert(bci == (jushort)bci, "doesn't fit");
+
     if (_index >= trace_chunk_size) {
       methodHandle mhandle(THREAD, method);
       expand(CHECK);
@@ -1574,8 +1580,13 @@
   int chunk_count = 0;
 
   for (;!st.at_end(); st.next()) {
-    // add element
-    bcis->ushort_at_put(chunk_count, st.bci());
+    // Add entry and smear the -1 bci to 0 since the array only holds
+    // unsigned shorts.  The later line number lookup would just smear
+    // the -1 to a 0 even if it could be recorded.
+    int bci = st.bci();
+    if (bci == SynchronizationEntryBCI) bci = 0;
+    assert(bci == (jushort)bci, "doesn't fit");
+    bcis->ushort_at_put(chunk_count, bci);
     methods->obj_at_put(chunk_count, st.method());
 
     chunk_count++;
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -204,6 +204,24 @@
   return s;
 }
 
+// Look up the address of the literal in the SymbolTable for this Symbol*
+// Do not create any new symbols
+// Do not increment the reference count to keep this alive
+Symbol** SymbolTable::lookup_symbol_addr(Symbol* sym){
+  unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length());
+  int index = the_table()->hash_to_index(hash);
+
+  for (HashtableEntry<Symbol*>* e = the_table()->bucket(index); e != NULL; e = e->next()) {
+    if (e->hash() == hash) {
+      Symbol* literal_sym = e->literal();
+      if (sym == literal_sym) {
+        return e->literal_addr();
+      }
+    }
+  }
+  return NULL;
+}
+
 // Suggestion: Push unicode-based lookup all the way into the hashing
 // and probing logic, so there is no need for convert_to_utf8 until
 // an actual new Symbol* is created.
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -144,6 +144,9 @@
 
   static void release(Symbol* sym);
 
+  // Look up the address of the literal in the SymbolTable for this Symbol*
+  static Symbol** lookup_symbol_addr(Symbol* sym);
+
   // jchar (utf16) version of lookups
   static Symbol* lookup_unicode(const jchar* name, int len, TRAPS);
   static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -2131,6 +2131,12 @@
     }
   }
 
+  // Assign a classid if one has not already been assigned.  The
+  // counter does not need to be atomically incremented since this
+  // is only done while holding the SystemDictionary_lock.
+  // All loaded classes get a unique ID.
+  TRACE_INIT_ID(k);
+
   // Check for a placeholder. If there, remove it and make a
   // new system dictionary entry.
   placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -855,23 +855,23 @@
     // Note that this only sets the JavaThread _priority field, which by
     // definition is limited to Java priorities and not OS priorities.
     // The os-priority is set in the CompilerThread startup code itself
+
     java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
-    // CLEANUP PRIORITIES: This -if- statement hids a bug whereby the compiler
-    // threads never have their OS priority set.  The assumption here is to
-    // enable the Performance group to do flag tuning, figure out a suitable
-    // CompilerThreadPriority, and then remove this 'if' statement (and
-    // comment) and unconditionally set the priority.
+
+    // Note that we cannot call os::set_priority because it expects Java
+    // priorities and we are *explicitly* using OS priorities so that it's
+    // possible to set the compiler thread priority higher than any Java
+    // thread.
 
-    // Compiler Threads should be at the highest Priority
-    if ( CompilerThreadPriority != -1 )
-      os::set_native_priority( compiler_thread, CompilerThreadPriority );
-    else
-      os::set_native_priority( compiler_thread, os::java_to_os_priority[NearMaxPriority]);
-
-      // Note that I cannot call os::set_priority because it expects Java
-      // priorities and I am *explicitly* using OS priorities so that it's
-      // possible to set the compiler thread priority higher than any Java
-      // thread.
+    int native_prio = CompilerThreadPriority;
+    if (native_prio == -1) {
+      if (UseCriticalCompilerThreadPriority) {
+        native_prio = os::java_to_os_priority[CriticalPriority];
+      } else {
+        native_prio = os::java_to_os_priority[NearMaxPriority];
+      }
+    }
+    os::set_native_priority(compiler_thread, native_prio);
 
     java_lang_Thread::set_daemon(thread_oop());
 
@@ -879,6 +879,7 @@
     Threads::add(compiler_thread);
     Thread::start(compiler_thread);
   }
+
   // Let go of Threads_lock before yielding
   os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
 
@@ -961,7 +962,7 @@
                                         methodHandle hot_method,
                                         int hot_count,
                                         const char* comment,
-                                        TRAPS) {
+                                        Thread* thread) {
   // do nothing if compiler thread(s) is not available
   if (!_initialized ) {
     return;
@@ -1037,7 +1038,7 @@
 
   // Acquire our lock.
   {
-    MutexLocker locker(queue->lock(), THREAD);
+    MutexLocker locker(queue->lock(), thread);
 
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
@@ -1119,7 +1120,7 @@
 nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
                                        int comp_level,
                                        methodHandle hot_method, int hot_count,
-                                       const char* comment, TRAPS) {
+                                       const char* comment, Thread* THREAD) {
   // make sure arguments make sense
   assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method");
   assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
@@ -1173,10 +1174,10 @@
   assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
   // some prerequisites that are compiler specific
   if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) {
-    method->constants()->resolve_string_constants(CHECK_0);
+    method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NULL);
     // Resolve all classes seen in the signature of the method
     // we are compiling.
-    methodOopDesc::load_signature_classes(method, CHECK_0);
+    methodOopDesc::load_signature_classes(method, CHECK_AND_CLEAR_NULL);
   }
 
   // If the method is native, do the lookup in the thread requesting
@@ -1230,7 +1231,7 @@
       return NULL;
     }
   } else {
-    compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, CHECK_0);
+    compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
   }
 
   // return requested nmethod
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -333,7 +333,7 @@
                                   methodHandle hot_method,
                                   int hot_count,
                                   const char* comment,
-                                  TRAPS);
+                                  Thread* thread);
   static CompileQueue* compile_queue(int comp_level) {
     if (is_c2_compile(comp_level)) return _c2_method_queue;
     if (is_c1_compile(comp_level)) return _c1_method_queue;
@@ -363,7 +363,7 @@
                                  int comp_level,
                                  methodHandle hot_method,
                                  int hot_count,
-                                 const char* comment, TRAPS);
+                                 const char* comment, Thread* thread);
 
   static void compiler_thread_loop();
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,10 +75,25 @@
   set_name("Concurrent Mark-Sweep GC Thread");
 
   if (os::create_thread(this, os::cgc_thread)) {
-    // XXX: need to set this to low priority
-    // unless "agressive mode" set; priority
-    // should be just less than that of VMThread.
-    os::set_priority(this, NearMaxPriority);
+    // An old comment here said: "Priority should be just less
+    // than that of VMThread".  Since the VMThread runs at
+    // NearMaxPriority, the old comment was inaccurate, but
+    // changing the default priority to NearMaxPriority-1
+    // could change current behavior, so the default of
+    // NearMaxPriority stays in place.
+    //
+    // Note that there's a possibility of the VMThread
+    // starving if UseCriticalCMSThreadPriority is on.
+    // That won't happen on Solaris for various reasons,
+    // but may well happen on non-Solaris platforms.
+    int native_prio;
+    if (UseCriticalCMSThreadPriority) {
+      native_prio = os::java_to_os_priority[CriticalPriority];
+    } else {
+      native_prio = os::java_to_os_priority[NearMaxPriority];
+    }
+    os::set_native_priority(this, native_prio);
+
     if (!DisableStartThread) {
       os::start_thread(this);
     }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -42,8 +42,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 
-//
-// CMS Bit Map Wrapper
+// Concurrent marking bit map wrapper
 
 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
   _bm((uintptr_t*)NULL,0),
@@ -53,13 +52,13 @@
   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 
-  guarantee(brs.is_reserved(), "couldn't allocate CMS bit map");
+  guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
   // For now we'll just commit all of the bit map up fromt.
   // Later on we'll try to be more parsimonious with swap.
   guarantee(_virtual_space.initialize(brs, brs.size()),
-            "couldn't reseve backing store for CMS bit map");
+            "couldn't reseve backing store for concurrent marking bit map");
   assert(_virtual_space.committed_size() == brs.size(),
-         "didn't reserve backing store for all of CMS bit map?");
+         "didn't reserve backing store for all of concurrent marking bit map?");
   _bm.set_map((uintptr_t*)_virtual_space.low());
   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
          _bmWordSize, "inconsistency in bit map sizing");
@@ -104,17 +103,6 @@
   return (int) (diff >> _shifter);
 }
 
-bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
-  HeapWord* left  = MAX2(_bmStartWord, mr.start());
-  HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end());
-  if (right > left) {
-    // Right-open interval [leftOffset, rightOffset).
-    return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
-  } else {
-    return true;
-  }
-}
-
 void CMBitMapRO::mostly_disjoint_range_union(BitMap*   from_bitmap,
                                              size_t    from_start_index,
                                              HeapWord* to_start_word,
@@ -431,8 +419,6 @@
     assert(newOop->is_oop(), "Expected an oop");
     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
            "only grey objects on this stack");
-    // iterate over the oops in this oop, marking and pushing
-    // the ones in CMS generation.
     newOop->oop_iterate(cl);
     if (yield_after && _cm->do_yield_check()) {
       res = false;
@@ -474,6 +460,84 @@
               && !nextMarkBitMap()->isMarked((HeapWord*)obj)));
 }
 
+CMRootRegions::CMRootRegions() :
+  _young_list(NULL), _cm(NULL), _scan_in_progress(false),
+  _should_abort(false),  _next_survivor(NULL) { }
+
+void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
+  _young_list = g1h->young_list();
+  _cm = cm;
+}
+
+void CMRootRegions::prepare_for_scan() {
+  assert(!scan_in_progress(), "pre-condition");
+
+  // Currently, only survivors can be root regions.
+  assert(_next_survivor == NULL, "pre-condition");
+  _next_survivor = _young_list->first_survivor_region();
+  _scan_in_progress = (_next_survivor != NULL);
+  _should_abort = false;
+}
+
+HeapRegion* CMRootRegions::claim_next() {
+  if (_should_abort) {
+    // If someone has set the should_abort flag, we return NULL to
+    // force the caller to bail out of their loop.
+    return NULL;
+  }
+
+  // Currently, only survivors can be root regions.
+  HeapRegion* res = _next_survivor;
+  if (res != NULL) {
+    MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
+    // Read it again in case it changed while we were waiting for the lock.
+    res = _next_survivor;
+    if (res != NULL) {
+      if (res == _young_list->last_survivor_region()) {
+        // We just claimed the last survivor so store NULL to indicate
+        // that we're done.
+        _next_survivor = NULL;
+      } else {
+        _next_survivor = res->get_next_young_region();
+      }
+    } else {
+      // Someone else claimed the last survivor while we were trying
+      // to take the lock so nothing else to do.
+    }
+  }
+  assert(res == NULL || res->is_survivor(), "post-condition");
+
+  return res;
+}
+
+void CMRootRegions::scan_finished() {
+  assert(scan_in_progress(), "pre-condition");
+
+  // Currently, only survivors can be root regions.
+  if (!_should_abort) {
+    assert(_next_survivor == NULL, "we should have claimed all survivors");
+  }
+  _next_survivor = NULL;
+
+  {
+    MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
+    _scan_in_progress = false;
+    RootRegionScan_lock->notify_all();
+  }
+}
+
+bool CMRootRegions::wait_until_scan_finished() {
+  if (!scan_in_progress()) return false;
+
+  {
+    MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
+    while (scan_in_progress()) {
+      RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
+    }
+  }
+  return true;
+}
+
 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif // _MSC_VER
@@ -498,6 +562,7 @@
   _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
            CardTableModRefBS::card_shift,
            false /* in_resource_area*/),
+
   _prevMarkBitMap(&_markBitMap1),
   _nextMarkBitMap(&_markBitMap2),
   _at_least_one_mark_complete(false),
@@ -526,7 +591,11 @@
   _cleanup_times(),
   _total_counting_time(0.0),
   _total_rs_scrub_time(0.0),
-  _parallel_workers(NULL) {
+
+  _parallel_workers(NULL),
+
+  _count_card_bitmaps(NULL),
+  _count_marked_bytes(NULL) {
   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
   if (verbose_level < no_verbose) {
     verbose_level = no_verbose;
@@ -557,9 +626,16 @@
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   satb_qs.set_buffer_size(G1SATBBufferSize);
 
+  _root_regions.init(_g1h, this);
+
   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
 
+  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_task_num);
+  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_task_num);
+
+  BitMap::idx_t card_bm_size = _card_bm.size();
+
   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
   _active_tasks = _max_task_num;
   for (int i = 0; i < (int) _max_task_num; ++i) {
@@ -567,10 +643,26 @@
     task_queue->initialize();
     _task_queues->register_queue(i, task_queue);
 
-    _tasks[i] = new CMTask(i, this, task_queue, _task_queues);
+    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
+    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions);
+
+    _tasks[i] = new CMTask(i, this,
+                           _count_marked_bytes[i],
+                           &_count_card_bitmaps[i],
+                           task_queue, _task_queues);
+
     _accum_task_vtime[i] = 0.0;
   }
 
+  // Calculate the card number for the bottom of the heap. Used
+  // in biasing indexes into the accounting card bitmaps.
+  _heap_bottom_card_num =
+    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
+                                CardTableModRefBS::card_shift);
+
+  // Clear all the liveness counting data
+  clear_all_count_data();
+
   if (ConcGCThreads > ParallelGCThreads) {
     vm_exit_during_initialization("Can't have more ConcGCThreads "
                                   "than ParallelGCThreads.");
@@ -750,11 +842,6 @@
   ShouldNotReachHere();
 }
 
-// This closure is used to mark refs into the g1 generation
-// from external roots in the CMS bit map.
-// Called at the first checkpoint.
-//
-
 void ConcurrentMark::clearNextBitmap() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1CollectorPolicy* g1p = g1h->g1_policy();
@@ -794,6 +881,9 @@
     assert(!g1h->mark_in_progress(), "invariant");
   }
 
+  // Clear the liveness counting data
+  clear_all_count_data();
+
   // Repeat the asserts from above.
   guarantee(cmThread()->during_cycle(), "invariant");
   guarantee(!g1h->mark_in_progress(), "invariant");
@@ -854,6 +944,8 @@
   satb_mq_set.set_active_all_threads(true, /* new active value */
                                      false /* expected_active */);
 
+  _root_regions.prepare_for_scan();
+
   // update_g1_committed() will be called at the end of an evac pause
   // when marking is on. So, it's also called at the end of the
   // initial-mark pause to update the heap end, if the heap expands
@@ -1147,6 +1239,69 @@
   return 0;
 }
 
+void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
+  // Currently, only survivors can be root regions.
+  assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
+  G1RootRegionScanClosure cl(_g1h, this, worker_id);
+
+  const uintx interval = PrefetchScanIntervalInBytes;
+  HeapWord* curr = hr->bottom();
+  const HeapWord* end = hr->top();
+  while (curr < end) {
+    Prefetch::read(curr, interval);
+    oop obj = oop(curr);
+    int size = obj->oop_iterate(&cl);
+    assert(size == obj->size(), "sanity");
+    curr += size;
+  }
+}
+
+class CMRootRegionScanTask : public AbstractGangTask {
+private:
+  ConcurrentMark* _cm;
+
+public:
+  CMRootRegionScanTask(ConcurrentMark* cm) :
+    AbstractGangTask("Root Region Scan"), _cm(cm) { }
+
+  void work(uint worker_id) {
+    assert(Thread::current()->is_ConcurrentGC_thread(),
+           "this should only be done by a conc GC thread");
+
+    CMRootRegions* root_regions = _cm->root_regions();
+    HeapRegion* hr = root_regions->claim_next();
+    while (hr != NULL) {
+      _cm->scanRootRegion(hr, worker_id);
+      hr = root_regions->claim_next();
+    }
+  }
+};
+
+void ConcurrentMark::scanRootRegions() {
+  // scan_in_progress() will have been set to true only if there was
+  // at least one root region to scan. So, if it's false, we
+  // should not attempt to do any further work.
+  if (root_regions()->scan_in_progress()) {
+    _parallel_marking_threads = calc_parallel_marking_threads();
+    assert(parallel_marking_threads() <= max_parallel_marking_threads(),
+           "Maximum number of marking threads exceeded");
+    uint active_workers = MAX2(1U, parallel_marking_threads());
+
+    CMRootRegionScanTask task(this);
+    if (parallel_marking_threads() > 0) {
+      _parallel_workers->set_active_workers((int) active_workers);
+      _parallel_workers->run_task(&task);
+    } else {
+      task.work(0);
+    }
+
+    // It's possible that has_aborted() is true here without actually
+    // aborting the survivor scan earlier. This is OK as it's
+    // mainly used for sanity checking.
+    root_regions()->scan_finished();
+  }
+}
+
 void ConcurrentMark::markFromRoots() {
   // we might be tempted to assert that:
   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
@@ -1225,6 +1380,10 @@
       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
     }
   } else {
+    // Aggregate the per-task counting data that we have accumulated
+    // while marking.
+    aggregate_count_data();
+
     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
     // We're done with marking.
     // This is the end of  the marking cycle, we're expected all
@@ -1262,48 +1421,41 @@
   g1p->record_concurrent_mark_remark_end();
 }
 
-#define CARD_BM_TEST_MODE 0
-
+// Used to calculate the # live objects per region
+// for verification purposes
 class CalcLiveObjectsClosure: public HeapRegionClosure {
 
   CMBitMapRO* _bm;
   ConcurrentMark* _cm;
-  bool _changed;
-  bool _yield;
-  size_t _words_done;
-  size_t _tot_live;
-  size_t _tot_used;
-  size_t _regions_done;
-  double _start_vtime_sec;
-
   BitMap* _region_bm;
   BitMap* _card_bm;
+
+  // Debugging
+  size_t _tot_words_done;
+  size_t _tot_live;
+  size_t _tot_used;
+
+  size_t _region_marked_bytes;
+
   intptr_t _bottom_card_num;
-  bool _final;
 
   void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
-    for (intptr_t i = start_card_num; i <= last_card_num; i++) {
-#if CARD_BM_TEST_MODE
-      guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set.");
-#else
-      _card_bm->par_at_put(i - _bottom_card_num, 1);
-#endif
+    assert(start_card_num <= last_card_num, "sanity");
+    BitMap::idx_t start_idx = start_card_num - _bottom_card_num;
+    BitMap::idx_t last_idx = last_card_num - _bottom_card_num;
+
+    for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
+      _card_bm->par_at_put(i, 1);
     }
   }
 
 public:
-  CalcLiveObjectsClosure(bool final,
-                         CMBitMapRO *bm, ConcurrentMark *cm,
+  CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
                          BitMap* region_bm, BitMap* card_bm) :
-    _bm(bm), _cm(cm), _changed(false), _yield(true),
-    _words_done(0), _tot_live(0), _tot_used(0),
-    _region_bm(region_bm), _card_bm(card_bm),_final(final),
-    _regions_done(0), _start_vtime_sec(0.0)
-  {
-    _bottom_card_num =
-      intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >>
-               CardTableModRefBS::card_shift);
-  }
+    _bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
+    _region_marked_bytes(0), _tot_words_done(0),
+    _tot_live(0), _tot_used(0),
+    _bottom_card_num(cm->heap_bottom_card_num()) { }
 
   // It takes a region that's not empty (i.e., it has at least one
   // live object in it and sets its corresponding bit on the region
@@ -1319,29 +1471,16 @@
       _region_bm->par_at_put((BitMap::idx_t) index, true);
     } else {
       // Starts humongous case: calculate how many regions are part of
-      // this humongous region and then set the bit range. It might
-      // have been a bit more efficient to look at the object that
-      // spans these humongous regions to calculate their number from
-      // the object's size. However, it's a good idea to calculate
-      // this based on the metadata itself, and not the region
-      // contents, so that this code is not aware of what goes into
-      // the humongous regions (in case this changes in the future).
+      // this humongous region and then set the bit range.
       G1CollectedHeap* g1h = G1CollectedHeap::heap();
-      size_t end_index = index + 1;
-      while (end_index < g1h->n_regions()) {
-        HeapRegion* chr = g1h->region_at(end_index);
-        if (!chr->continuesHumongous()) break;
-        end_index += 1;
-      }
+      HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
+      size_t end_index = last_hr->hrs_index() + 1;
       _region_bm->par_at_put_range((BitMap::idx_t) index,
                                    (BitMap::idx_t) end_index, true);
     }
   }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (!_final && _regions_done == 0) {
-      _start_vtime_sec = os::elapsedVTime();
-    }
 
     if (hr->continuesHumongous()) {
       // We will ignore these here and process them when their
@@ -1355,48 +1494,41 @@
     }
 
     HeapWord* nextTop = hr->next_top_at_mark_start();
-    HeapWord* start   = hr->top_at_conc_mark_count();
-    assert(hr->bottom() <= start && start <= hr->end() &&
-           hr->bottom() <= nextTop && nextTop <= hr->end() &&
-           start <= nextTop,
-           "Preconditions.");
-    // Otherwise, record the number of word's we'll examine.
+    HeapWord* start   = hr->bottom();
+
+    assert(start <= hr->end() && start <= nextTop && nextTop <= hr->end(),
+           err_msg("Preconditions not met - "
+                   "start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
+                   start, nextTop, hr->end()));
+
+    // Record the number of word's we'll examine.
     size_t words_done = (nextTop - start);
+
     // Find the first marked object at or after "start".
     start = _bm->getNextMarkedWordAddress(start, nextTop);
+
     size_t marked_bytes = 0;
 
     // Below, the term "card num" means the result of shifting an address
     // by the card shift -- address 0 corresponds to card number 0.  One
     // must subtract the card num of the bottom of the heap to obtain a
     // card table index.
+
     // The first card num of the sequence of live cards currently being
     // constructed.  -1 ==> no sequence.
     intptr_t start_card_num = -1;
+
     // The last card num of the sequence of live cards currently being
     // constructed.  -1 ==> no sequence.
     intptr_t last_card_num = -1;
 
     while (start < nextTop) {
-      if (_yield && _cm->do_yield_check()) {
-        // We yielded.  It might be for a full collection, in which case
-        // all bets are off; terminate the traversal.
-        if (_cm->has_aborted()) {
-          _changed = false;
-          return true;
-        } else {
-          // Otherwise, it might be a collection pause, and the region
-          // we're looking at might be in the collection set.  We'll
-          // abandon this region.
-          return false;
-        }
-      }
       oop obj = oop(start);
       int obj_sz = obj->size();
+
       // The card num of the start of the current object.
       intptr_t obj_card_num =
         intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift);
-
       HeapWord* obj_last = start + obj_sz - 1;
       intptr_t obj_last_card_num =
         intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift);
@@ -1414,110 +1546,404 @@
             start_card_num = obj_card_num;
           }
         }
-#if CARD_BM_TEST_MODE
-        /*
-        gclog_or_tty->print_cr("Setting bits from %d/%d.",
-                               obj_card_num - _bottom_card_num,
-                               obj_last_card_num - _bottom_card_num);
-        */
-        for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) {
-          _card_bm->par_at_put(j - _bottom_card_num, 1);
-        }
-#endif
       }
       // In any case, we set the last card num.
       last_card_num = obj_last_card_num;
 
       marked_bytes += (size_t)obj_sz * HeapWordSize;
+
       // Find the next marked object after this one.
       start = _bm->getNextMarkedWordAddress(start + 1, nextTop);
-      _changed = true;
     }
+
     // Handle the last range, if any.
     if (start_card_num != -1) {
       mark_card_num_range(start_card_num, last_card_num);
     }
-    if (_final) {
-      // Mark the allocated-since-marking portion...
-      HeapWord* tp = hr->top();
-      if (nextTop < tp) {
-        start_card_num =
-          intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
-        last_card_num =
-          intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift);
-        mark_card_num_range(start_card_num, last_card_num);
-        // This definitely means the region has live objects.
-        set_bit_for_region(hr);
-      }
+
+    // Mark the allocated-since-marking portion...
+    HeapWord* top = hr->top();
+    if (nextTop < top) {
+      start_card_num = intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
+      last_card_num = intptr_t(uintptr_t(top) >> CardTableModRefBS::card_shift);
+
+      mark_card_num_range(start_card_num, last_card_num);
+
+      // This definitely means the region has live objects.
+      set_bit_for_region(hr);
     }
 
-    hr->add_to_marked_bytes(marked_bytes);
     // Update the live region bitmap.
     if (marked_bytes > 0) {
       set_bit_for_region(hr);
     }
-    hr->set_top_at_conc_mark_count(nextTop);
+
+    // Set the marked bytes for the current region so that
+    // it can be queried by a calling verificiation routine
+    _region_marked_bytes = marked_bytes;
+
     _tot_live += hr->next_live_bytes();
     _tot_used += hr->used();
-    _words_done = words_done;
-
-    if (!_final) {
-      ++_regions_done;
-      if (_regions_done % 10 == 0) {
-        double end_vtime_sec = os::elapsedVTime();
-        double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec;
-        if (elapsed_vtime_sec > (10.0 / 1000.0)) {
-          jlong sleep_time_ms =
-            (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
-          os::sleep(Thread::current(), sleep_time_ms, false);
-          _start_vtime_sec = end_vtime_sec;
-        }
-      }
-    }
+    _tot_words_done = words_done;
 
     return false;
   }
 
-  bool changed() { return _changed;  }
-  void reset()   { _changed = false; _words_done = 0; }
-  void no_yield() { _yield = false; }
-  size_t words_done() { return _words_done; }
-  size_t tot_live() { return _tot_live; }
-  size_t tot_used() { return _tot_used; }
+  size_t region_marked_bytes() const { return _region_marked_bytes; }
+
+  // Debugging
+  size_t tot_words_done() const      { return _tot_words_done; }
+  size_t tot_live() const            { return _tot_live; }
+  size_t tot_used() const            { return _tot_used; }
+};
+
+// Heap region closure used for verifying the counting data
+// that was accumulated concurrently and aggregated during
+// the remark pause. This closure is applied to the heap
+// regions during the STW cleanup pause.
+
+class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
+  ConcurrentMark* _cm;
+  CalcLiveObjectsClosure _calc_cl;
+  BitMap* _region_bm;   // Region BM to be verified
+  BitMap* _card_bm;     // Card BM to be verified
+  bool _verbose;        // verbose output?
+
+  BitMap* _exp_region_bm; // Expected Region BM values
+  BitMap* _exp_card_bm;   // Expected card BM values
+
+  int _failures;
+
+public:
+  VerifyLiveObjectDataHRClosure(ConcurrentMark* cm,
+                                BitMap* region_bm,
+                                BitMap* card_bm,
+                                BitMap* exp_region_bm,
+                                BitMap* exp_card_bm,
+                                bool verbose) :
+    _cm(cm),
+    _calc_cl(_cm->nextMarkBitMap(), _cm, exp_region_bm, exp_card_bm),
+    _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
+    _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
+    _failures(0) { }
+
+  int failures() const { return _failures; }
+
+  bool doHeapRegion(HeapRegion* hr) {
+    if (hr->continuesHumongous()) {
+      // We will ignore these here and process them when their
+      // associated "starts humongous" region is processed (see
+      // set_bit_for_heap_region()). Note that we cannot rely on their
+      // associated "starts humongous" region to have their bit set to
+      // 1 since, due to the region chunking in the parallel region
+      // iteration, a "continues humongous" region might be visited
+      // before its associated "starts humongous".
+      return false;
+    }
+
+    int failures = 0;
+
+    // Call the CalcLiveObjectsClosure to walk the marking bitmap for
+    // this region and set the corresponding bits in the expected region
+    // and card bitmaps.
+    bool res = _calc_cl.doHeapRegion(hr);
+    assert(res == false, "should be continuing");
+
+    MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
+                    Mutex::_no_safepoint_check_flag);
+
+    // Verify that _top_at_conc_count == ntams
+    if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
+      if (_verbose) {
+        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: "
+                               "expected " PTR_FORMAT ", actual: " PTR_FORMAT,
+                               hr->hrs_index(), hr->next_top_at_mark_start(),
+                               hr->top_at_conc_mark_count());
+      }
+      failures += 1;
+    }
+
+    // Verify the marked bytes for this region.
+    size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
+    size_t act_marked_bytes = hr->next_marked_bytes();
+
+    // We're not OK if expected marked bytes > actual marked bytes. It means
+    // we have missed accounting some objects during the actual marking.
+    if (exp_marked_bytes > act_marked_bytes) {
+      if (_verbose) {
+        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: "
+                               "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
+                               hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
+      }
+      failures += 1;
+    }
+
+    // Verify the bit, for this region, in the actual and expected
+    // (which was just calculated) region bit maps.
+    // We're not OK if the bit in the calculated expected region
+    // bitmap is set and the bit in the actual region bitmap is not.
+    BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index();
+
+    bool expected = _exp_region_bm->at(index);
+    bool actual = _region_bm->at(index);
+    if (expected && !actual) {
+      if (_verbose) {
+        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: "
+                               "expected: %d, actual: %d",
+                               hr->hrs_index(), expected, actual);
+      }
+      failures += 1;
+    }
+
+    // Verify that the card bit maps for the cards spanned by the current
+    // region match. We have an error if we have a set bit in the expected
+    // bit map and the corresponding bit in the actual bitmap is not set.
+
+    BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
+    BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
+
+    for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
+      expected = _exp_card_bm->at(i);
+      actual = _card_bm->at(i);
+
+      if (expected && !actual) {
+        if (_verbose) {
+          gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": "
+                                 "expected: %d, actual: %d",
+                                 hr->hrs_index(), i, expected, actual);
+        }
+        failures += 1;
+      }
+    }
+
+    if (failures > 0 && _verbose)  {
+      gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
+                             "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
+                             HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
+                             _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
+    }
+
+    _failures += failures;
+
+    // We could stop iteration over the heap when we
+    // find the first voilating region by returning true.
+    return false;
+  }
 };
 
 
-void ConcurrentMark::calcDesiredRegions() {
-  _region_bm.clear();
-  _card_bm.clear();
-  CalcLiveObjectsClosure calccl(false /*final*/,
-                                nextMarkBitMap(), this,
-                                &_region_bm, &_card_bm);
-  G1CollectedHeap *g1h = G1CollectedHeap::heap();
-  g1h->heap_region_iterate(&calccl);
-
-  do {
-    calccl.reset();
-    g1h->heap_region_iterate(&calccl);
-  } while (calccl.changed());
-}
+class G1ParVerifyFinalCountTask: public AbstractGangTask {
+protected:
+  G1CollectedHeap* _g1h;
+  ConcurrentMark* _cm;
+  BitMap* _actual_region_bm;
+  BitMap* _actual_card_bm;
+
+  uint    _n_workers;
+
+  BitMap* _expected_region_bm;
+  BitMap* _expected_card_bm;
+
+  int  _failures;
+  bool _verbose;
+
+public:
+  G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
+                            BitMap* region_bm, BitMap* card_bm,
+                            BitMap* expected_region_bm, BitMap* expected_card_bm)
+    : AbstractGangTask("G1 verify final counting"),
+      _g1h(g1h), _cm(_g1h->concurrent_mark()),
+      _actual_region_bm(region_bm), _actual_card_bm(card_bm),
+      _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
+      _failures(0), _verbose(false),
+      _n_workers(0) {
+    assert(VerifyDuringGC, "don't call this otherwise");
+
+    // Use the value already set as the number of active threads
+    // in the call to run_task().
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      assert( _g1h->workers()->active_workers() > 0,
+        "Should have been previously set");
+      _n_workers = _g1h->workers()->active_workers();
+    } else {
+      _n_workers = 1;
+    }
+
+    assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
+    assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
+
+    _verbose = _cm->verbose_medium();
+  }
+
+  void work(uint worker_id) {
+    assert(worker_id < _n_workers, "invariant");
+
+    VerifyLiveObjectDataHRClosure verify_cl(_cm,
+                                            _actual_region_bm, _actual_card_bm,
+                                            _expected_region_bm,
+                                            _expected_card_bm,
+                                            _verbose);
+
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      _g1h->heap_region_par_iterate_chunked(&verify_cl,
+                                            worker_id,
+                                            _n_workers,
+                                            HeapRegion::VerifyCountClaimValue);
+    } else {
+      _g1h->heap_region_iterate(&verify_cl);
+    }
+
+    Atomic::add(verify_cl.failures(), &_failures);
+  }
+
+  int failures() const { return _failures; }
+};
+
+// Final update of count data (during cleanup).
+// Adds [top_at_count, NTAMS) to the marked bytes for each
+// region. Sets the bits in the card bitmap corresponding
+// to the interval [top_at_count, top], and sets the
+// liveness bit for each region containing live data
+// in the region bitmap.
+
+class FinalCountDataUpdateClosure: public HeapRegionClosure {
+  ConcurrentMark* _cm;
+  BitMap* _region_bm;
+  BitMap* _card_bm;
+
+  size_t _total_live_bytes;
+  size_t _total_used_bytes;
+  size_t _total_words_done;
+
+  void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
+    assert(start_idx <= last_idx, "sanity");
+
+    // Set the inclusive bit range [start_idx, last_idx].
+    // For small ranges (up to 8 cards) use a simple loop; otherwise
+    // use par_at_put_range.
+    if ((last_idx - start_idx) <= 8) {
+      for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
+        _card_bm->par_set_bit(i);
+      }
+    } else {
+      assert(last_idx < _card_bm->size(), "sanity");
+      // Note BitMap::par_at_put_range() is exclusive.
+      _card_bm->par_at_put_range(start_idx, last_idx+1, true);
+    }
+  }
+
+  // It takes a region that's not empty (i.e., it has at least one
+  // live object in it and sets its corresponding bit on the region
+  // bitmap to 1. If the region is "starts humongous" it will also set
+  // to 1 the bits on the region bitmap that correspond to its
+  // associated "continues humongous" regions.
+  void set_bit_for_region(HeapRegion* hr) {
+    assert(!hr->continuesHumongous(), "should have filtered those out");
+
+    size_t index = hr->hrs_index();
+    if (!hr->startsHumongous()) {
+      // Normal (non-humongous) case: just set the bit.
+      _region_bm->par_set_bit((BitMap::idx_t) index);
+    } else {
+      // Starts humongous case: calculate how many regions are part of
+      // this humongous region and then set the bit range.
+      G1CollectedHeap* g1h = G1CollectedHeap::heap();
+      HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
+      size_t end_index = last_hr->hrs_index() + 1;
+      _region_bm->par_at_put_range((BitMap::idx_t) index,
+                                   (BitMap::idx_t) end_index, true);
+    }
+  }
+
+ public:
+  FinalCountDataUpdateClosure(ConcurrentMark* cm,
+                              BitMap* region_bm,
+                              BitMap* card_bm) :
+    _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
+    _total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { }
+
+  bool doHeapRegion(HeapRegion* hr) {
+
+    if (hr->continuesHumongous()) {
+      // We will ignore these here and process them when their
+      // associated "starts humongous" region is processed (see
+      // set_bit_for_heap_region()). Note that we cannot rely on their
+      // associated "starts humongous" region to have their bit set to
+      // 1 since, due to the region chunking in the parallel region
+      // iteration, a "continues humongous" region might be visited
+      // before its associated "starts humongous".
+      return false;
+    }
+
+    HeapWord* start = hr->top_at_conc_mark_count();
+    HeapWord* ntams = hr->next_top_at_mark_start();
+    HeapWord* top   = hr->top();
+
+    assert(hr->bottom() <= start && start <= hr->end() &&
+           hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
+
+    size_t words_done = ntams - hr->bottom();
+
+    if (start < ntams) {
+      // Region was changed between remark and cleanup pauses
+      // We need to add (ntams - start) to the marked bytes
+      // for this region, and set bits for the range
+      // [ card_idx(start), card_idx(ntams) ) in the card bitmap.
+      size_t live_bytes = (ntams - start) * HeapWordSize;
+      hr->add_to_marked_bytes(live_bytes);
+
+      // Record the new top at conc count
+      hr->set_top_at_conc_mark_count(ntams);
+
+      // The setting of the bits in the card bitmap takes place below
+    }
+
+    // Mark the allocated-since-marking portion...
+    if (ntams < top) {
+      // This definitely means the region has live objects.
+      set_bit_for_region(hr);
+    }
+
+    // Now set the bits for [start, top]
+    BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
+    BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top);
+    set_card_bitmap_range(start_idx, last_idx);
+
+    // Set the bit for the region if it contains live data
+    if (hr->next_marked_bytes() > 0) {
+      set_bit_for_region(hr);
+    }
+
+    _total_words_done += words_done;
+    _total_used_bytes += hr->used();
+    _total_live_bytes += hr->next_marked_bytes();
+
+    return false;
+  }
+
+  size_t total_words_done() const { return _total_words_done; }
+  size_t total_live_bytes() const { return _total_live_bytes; }
+  size_t total_used_bytes() const { return _total_used_bytes; }
+};
 
 class G1ParFinalCountTask: public AbstractGangTask {
 protected:
   G1CollectedHeap* _g1h;
-  CMBitMap* _bm;
+  ConcurrentMark* _cm;
+  BitMap* _actual_region_bm;
+  BitMap* _actual_card_bm;
+
   uint    _n_workers;
+
   size_t *_live_bytes;
   size_t *_used_bytes;
-  BitMap* _region_bm;
-  BitMap* _card_bm;
+
 public:
-  G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm,
-                      BitMap* region_bm, BitMap* card_bm)
-    : AbstractGangTask("G1 final counting"), _g1h(g1h),
-    _bm(bm), _region_bm(region_bm), _card_bm(card_bm),
-    _n_workers(0)
-  {
+  G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
+    : AbstractGangTask("G1 final counting"),
+      _g1h(g1h), _cm(_g1h->concurrent_mark()),
+      _actual_region_bm(region_bm), _actual_card_bm(card_bm),
+      _n_workers(0) {
     // Use the value already set as the number of active threads
     // in the call to run_task().  Needed for the allocation of
     // _live_bytes and _used_bytes.
@@ -1539,29 +1965,32 @@
   }
 
   void work(uint worker_id) {
-    CalcLiveObjectsClosure calccl(true /*final*/,
-                                  _bm, _g1h->concurrent_mark(),
-                                  _region_bm, _card_bm);
-    calccl.no_yield();
+    assert(worker_id < _n_workers, "invariant");
+
+    FinalCountDataUpdateClosure final_update_cl(_cm,
+                                                _actual_region_bm,
+                                                _actual_card_bm);
+
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&calccl, worker_id,
-                                            (int) _n_workers,
+      _g1h->heap_region_par_iterate_chunked(&final_update_cl,
+                                            worker_id,
+                                            _n_workers,
                                             HeapRegion::FinalCountClaimValue);
     } else {
-      _g1h->heap_region_iterate(&calccl);
+      _g1h->heap_region_iterate(&final_update_cl);
     }
-    assert(calccl.complete(), "Shouldn't have yielded!");
-
-    assert(worker_id < _n_workers, "invariant");
-    _live_bytes[worker_id] = calccl.tot_live();
-    _used_bytes[worker_id] = calccl.tot_used();
-  }
+
+    _live_bytes[worker_id] = final_update_cl.total_live_bytes();
+    _used_bytes[worker_id] = final_update_cl.total_used_bytes();
+  }
+
   size_t live_bytes()  {
     size_t live_bytes = 0;
     for (uint i = 0; i < _n_workers; ++i)
       live_bytes += _live_bytes[i];
     return live_bytes;
   }
+
   size_t used_bytes()  {
     size_t used_bytes = 0;
     for (uint i = 0; i < _n_workers; ++i)
@@ -1724,8 +2153,7 @@
   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
                        BitMap* region_bm, BitMap* card_bm) :
     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
-    _region_bm(region_bm), _card_bm(card_bm)
-  {}
+    _region_bm(region_bm), _card_bm(card_bm) { }
 
   void work(uint worker_id) {
     if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -1772,11 +2200,10 @@
   uint n_workers;
 
   // Do counting once more with the world stopped for good measure.
-  G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
-                                        &_region_bm, &_card_bm);
+  G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
+
   if (G1CollectedHeap::use_parallel_gc_threads()) {
-    assert(g1h->check_heap_region_claim_values(
-                                               HeapRegion::InitialClaimValue),
+   assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
            "sanity check");
 
     g1h->set_par_threads();
@@ -1787,14 +2214,42 @@
     // Done with the parallel phase so reset to 0.
     g1h->set_par_threads(0);
 
-    assert(g1h->check_heap_region_claim_values(
-                                             HeapRegion::FinalCountClaimValue),
+    assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
            "sanity check");
   } else {
     n_workers = 1;
     g1_par_count_task.work(0);
   }
 
+  if (VerifyDuringGC) {
+    // Verify that the counting data accumulated during marking matches
+    // that calculated by walking the marking bitmap.
+
+    // Bitmaps to hold expected values
+    BitMap expected_region_bm(_region_bm.size(), false);
+    BitMap expected_card_bm(_card_bm.size(), false);
+
+    G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
+                                                 &_region_bm,
+                                                 &_card_bm,
+                                                 &expected_region_bm,
+                                                 &expected_card_bm);
+
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      g1h->set_par_threads((int)n_workers);
+      g1h->workers()->run_task(&g1_par_verify_task);
+      // Done with the parallel phase so reset to 0.
+      g1h->set_par_threads(0);
+
+      assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
+             "sanity check");
+    } else {
+      g1_par_verify_task.work(0);
+    }
+
+    guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
+  }
+
   size_t known_garbage_bytes =
     g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
   g1p->set_known_garbage_bytes(known_garbage_bytes);
@@ -1905,6 +2360,10 @@
   // races with it goes around and waits for completeCleanup to finish.
   g1h->increment_total_collections();
 
+  // We reclaimed old regions so we should calculate the sizes to make
+  // sure we update the old gen/space data.
+  g1h->g1mm()->update_sizes();
+
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(after)");
@@ -1983,12 +2442,11 @@
 class G1CMKeepAliveClosure: public OopClosure {
   G1CollectedHeap* _g1;
   ConcurrentMark*  _cm;
-  CMBitMap*        _bitMap;
  public:
-  G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
-                       CMBitMap* bitMap) :
-    _g1(g1), _cm(cm),
-    _bitMap(bitMap) {}
+  G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm) :
+    _g1(g1), _cm(cm) {
+    assert(Thread::current()->is_VM_thread(), "otherwise fix worker id");
+  }
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
@@ -2004,26 +2462,25 @@
     }
 
     if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
-      _bitMap->mark(addr);
+      _cm->mark_and_count(obj);
       _cm->mark_stack_push(obj);
     }
   }
 };
 
 class G1CMDrainMarkingStackClosure: public VoidClosure {
+  ConcurrentMark*               _cm;
   CMMarkStack*                  _markStack;
-  CMBitMap*                     _bitMap;
   G1CMKeepAliveClosure*         _oopClosure;
  public:
-  G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack,
+  G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMMarkStack* markStack,
                                G1CMKeepAliveClosure* oopClosure) :
-    _bitMap(bitMap),
+    _cm(cm),
     _markStack(markStack),
-    _oopClosure(oopClosure)
-  {}
+    _oopClosure(oopClosure) { }
 
   void do_void() {
-    _markStack->drain((OopClosure*)_oopClosure, _bitMap, false);
+    _markStack->drain((OopClosure*)_oopClosure, _cm->nextMarkBitMap(), false);
   }
 };
 
@@ -2102,8 +2559,7 @@
   CMTask* _task;
  public:
   G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
-    _cm(cm), _task(task)
-  {}
+    _cm(cm), _task(task) { }
 
   void do_void() {
     do {
@@ -2242,9 +2698,9 @@
     rp->setup_policy(clear_all_soft_refs);
     assert(_markStack.isEmpty(), "mark stack should be empty");
 
-    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
+    G1CMKeepAliveClosure g1_keep_alive(g1h, this);
     G1CMDrainMarkingStackClosure
-      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
+      g1_drain_mark_stack(this, &_markStack, &g1_keep_alive);
 
     // We use the work gang from the G1CollectedHeap and we utilize all
     // the worker threads.
@@ -2616,18 +3072,6 @@
 // during an evacuation pause). This was a late change to the code and
 // is currently not being taken advantage of.
 
-class CMGlobalObjectClosure : public ObjectClosure {
-private:
-  ConcurrentMark* _cm;
-
-public:
-  void do_object(oop obj) {
-    _cm->deal_with_reference(obj);
-  }
-
-  CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { }
-};
-
 void ConcurrentMark::deal_with_reference(oop obj) {
   if (verbose_high()) {
     gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
@@ -2672,6 +3116,18 @@
   }
 }
 
+class CMGlobalObjectClosure : public ObjectClosure {
+private:
+  ConcurrentMark* _cm;
+
+public:
+  void do_object(oop obj) {
+    _cm->deal_with_reference(obj);
+  }
+
+  CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { }
+};
+
 void ConcurrentMark::drainAllSATBBuffers() {
   guarantee(false, "drainAllSATBBuffers(): don't call this any more");
 
@@ -2693,15 +3149,6 @@
   assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
 }
 
-void ConcurrentMark::clear(oop p) {
-  assert(p != NULL && p->is_oop(), "expected an oop");
-  HeapWord* addr = (HeapWord*)p;
-  assert(addr >= _nextMarkBitMap->startWord() ||
-         addr < _nextMarkBitMap->endWord(), "in a region");
-
-  _nextMarkBitMap->clear(addr);
-}
-
 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
   // Note we are overriding the read-only view of the prev map here, via
   // the cast.
@@ -3015,6 +3462,192 @@
   }
 }
 
+// Aggregate the counting data that was constructed concurrently
+// with marking.
+class AggregateCountDataHRClosure: public HeapRegionClosure {
+  ConcurrentMark* _cm;
+  BitMap* _cm_card_bm;
+  size_t _max_task_num;
+
+ public:
+  AggregateCountDataHRClosure(ConcurrentMark *cm,
+                              BitMap* cm_card_bm,
+                              size_t max_task_num) :
+    _cm(cm), _cm_card_bm(cm_card_bm),
+    _max_task_num(max_task_num) { }
+
+  bool is_card_aligned(HeapWord* p) {
+    return ((uintptr_t(p) & (CardTableModRefBS::card_size - 1)) == 0);
+  }
+
+  bool doHeapRegion(HeapRegion* hr) {
+    if (hr->continuesHumongous()) {
+      // We will ignore these here and process them when their
+      // associated "starts humongous" region is processed.
+      // Note that we cannot rely on their associated
+      // "starts humongous" region to have their bit set to 1
+      // since, due to the region chunking in the parallel region
+      // iteration, a "continues humongous" region might be visited
+      // before its associated "starts humongous".
+      return false;
+    }
+
+    HeapWord* start = hr->bottom();
+    HeapWord* limit = hr->next_top_at_mark_start();
+    HeapWord* end = hr->end();
+
+    assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
+           err_msg("Preconditions not met - "
+                   "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
+                   "top: "PTR_FORMAT", end: "PTR_FORMAT,
+                   start, limit, hr->top(), hr->end()));
+
+    assert(hr->next_marked_bytes() == 0, "Precondition");
+
+    if (start == limit) {
+      // NTAMS of this region has not been set so nothing to do.
+      return false;
+    }
+
+    assert(is_card_aligned(start), "sanity");
+    assert(is_card_aligned(end), "sanity");
+
+    BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
+    BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
+    BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
+
+    // If ntams is not card aligned then we bump the index for
+    // limit so that we get the card spanning ntams.
+    if (!is_card_aligned(limit)) {
+      limit_idx += 1;
+    }
+
+    assert(limit_idx <= end_idx, "or else use atomics");
+
+    // Aggregate the "stripe" in the count data associated with hr.
+    size_t hrs_index = hr->hrs_index();
+    size_t marked_bytes = 0;
+
+    for (int i = 0; (size_t)i < _max_task_num; i += 1) {
+      size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
+      BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
+
+      // Fetch the marked_bytes in this region for task i and
+      // add it to the running total for this region.
+      marked_bytes += marked_bytes_array[hrs_index];
+
+      // Now union the bitmaps[0,max_task_num)[start_idx..limit_idx)
+      // into the global card bitmap.
+      BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
+
+      while (scan_idx < limit_idx) {
+        assert(task_card_bm->at(scan_idx) == true, "should be");
+        _cm_card_bm->set_bit(scan_idx);
+        assert(_cm_card_bm->at(scan_idx) == true, "should be");
+
+        // BitMap::get_next_one_offset() can handle the case when
+        // its left_offset parameter is greater than its right_offset
+        // parameter. If does, however, have an early exit if
+        // left_offset == right_offset. So let's limit the value
+        // passed in for left offset here.
+        BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
+        scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
+      }
+    }
+
+    // Update the marked bytes for this region.
+    hr->add_to_marked_bytes(marked_bytes);
+
+    // Now set the top at count to NTAMS.
+    hr->set_top_at_conc_mark_count(limit);
+
+    // Next heap region
+    return false;
+  }
+};
+
+class G1AggregateCountDataTask: public AbstractGangTask {
+protected:
+  G1CollectedHeap* _g1h;
+  ConcurrentMark* _cm;
+  BitMap* _cm_card_bm;
+  size_t _max_task_num;
+  int _active_workers;
+
+public:
+  G1AggregateCountDataTask(G1CollectedHeap* g1h,
+                           ConcurrentMark* cm,
+                           BitMap* cm_card_bm,
+                           size_t max_task_num,
+                           int n_workers) :
+    AbstractGangTask("Count Aggregation"),
+    _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
+    _max_task_num(max_task_num),
+    _active_workers(n_workers) { }
+
+  void work(uint worker_id) {
+    AggregateCountDataHRClosure cl(_cm, _cm_card_bm, _max_task_num);
+
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
+                                            _active_workers,
+                                            HeapRegion::AggregateCountClaimValue);
+    } else {
+      _g1h->heap_region_iterate(&cl);
+    }
+  }
+};
+
+
+void ConcurrentMark::aggregate_count_data() {
+  int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                        _g1h->workers()->active_workers() :
+                        1);
+
+  G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
+                                           _max_task_num, n_workers);
+
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
+           "sanity check");
+    _g1h->set_par_threads(n_workers);
+    _g1h->workers()->run_task(&g1_par_agg_task);
+    _g1h->set_par_threads(0);
+
+    assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
+           "sanity check");
+    _g1h->reset_heap_region_claim_values();
+  } else {
+    g1_par_agg_task.work(0);
+  }
+}
+
+// Clear the per-worker arrays used to store the per-region counting data
+void ConcurrentMark::clear_all_count_data() {
+  // Clear the global card bitmap - it will be filled during
+  // liveness count aggregation (during remark) and the
+  // final counting task.
+  _card_bm.clear();
+
+  // Clear the global region bitmap - it will be filled as part
+  // of the final counting task.
+  _region_bm.clear();
+
+  size_t max_regions = _g1h->max_regions();
+  assert(_max_task_num != 0, "unitialized");
+
+  for (int i = 0; (size_t) i < _max_task_num; i += 1) {
+    BitMap* task_card_bm = count_card_bitmap_for(i);
+    size_t* marked_bytes_array = count_marked_bytes_array_for(i);
+
+    assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
+    assert(marked_bytes_array != NULL, "uninitialized");
+
+    memset(marked_bytes_array, 0, (max_regions * sizeof(size_t)));
+    task_card_bm->clear();
+  }
+}
+
 void ConcurrentMark::print_stats() {
   if (verbose_stats()) {
     gclog_or_tty->print_cr("---------------------------------------------------------------------");
@@ -3350,6 +3983,8 @@
 void ConcurrentMark::abort() {
   // Clear all marks to force marking thread to do nothing
   _nextMarkBitMap->clearAll();
+  // Clear the liveness counting data
+  clear_all_count_data();
   // Empty mark stack
   clear_marking_state();
   for (int i = 0; i < (int)_max_task_num; ++i) {
@@ -3402,23 +4037,15 @@
                          (_init_times.sum() + _remark_times.sum() +
                           _cleanup_times.sum())/1000.0);
   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
-                "(%8.2f s marking, %8.2f s counting).",
+                "(%8.2f s marking).",
                 cmThread()->vtime_accum(),
-                cmThread()->vtime_mark_accum(),
-                cmThread()->vtime_count_accum());
+                cmThread()->vtime_mark_accum());
 }
 
 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
   _parallel_workers->print_worker_threads_on(st);
 }
 
-// Closures
-// XXX: there seems to be a lot of code  duplication here;
-// should refactor and consolidate the shared code.
-
-// This closure is used to mark refs into the CMS generation in
-// the CMS bit map. Called at the first checkpoint.
-
 // We take a break if someone is trying to stop the world.
 bool ConcurrentMark::do_yield_check(uint worker_id) {
   if (should_yield()) {
@@ -4704,6 +5331,8 @@
 
 CMTask::CMTask(int task_id,
                ConcurrentMark* cm,
+               size_t* marked_bytes,
+               BitMap* card_bm,
                CMTaskQueue* task_queue,
                CMTaskQueueSet* task_queues)
   : _g1h(G1CollectedHeap::heap()),
@@ -4713,7 +5342,9 @@
     _task_queue(task_queue),
     _task_queues(task_queues),
     _cm_oop_closure(NULL),
-    _aborted_region(MemRegion()) {
+    _aborted_region(MemRegion()),
+    _marked_bytes_array(marked_bytes),
+    _card_bm(card_bm) {
   guarantee(task_queue != NULL, "invariant");
   guarantee(task_queues != NULL, "invariant");
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -84,8 +84,8 @@
   }
 
   // iteration
-  bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); }
-  bool iterate(BitMapClosure* cl, MemRegion mr);
+  inline bool iterate(BitMapClosure* cl, MemRegion mr);
+  inline bool iterate(BitMapClosure* cl);
 
   // Return the address corresponding to the next marked bit at or after
   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
@@ -349,10 +349,62 @@
   high_verbose       // per object verbose
 } CMVerboseLevel;
 
+class YoungList;
+
+// Root Regions are regions that are not empty at the beginning of a
+// marking cycle and which we might collect during an evacuation pause
+// while the cycle is active. Given that, during evacuation pauses, we
+// do not copy objects that are explicitly marked, what we have to do
+// for the root regions is to scan them and mark all objects reachable
+// from them. According to the SATB assumptions, we only need to visit
+// each object once during marking. So, as long as we finish this scan
+// before the next evacuation pause, we can copy the objects from the
+// root regions without having to mark them or do anything else to them.
+//
+// Currently, we only support root region scanning once (at the start
+// of the marking cycle) and the root regions are all the survivor
+// regions populated during the initial-mark pause.
+class CMRootRegions VALUE_OBJ_CLASS_SPEC {
+private:
+  YoungList*           _young_list;
+  ConcurrentMark*      _cm;
+
+  volatile bool        _scan_in_progress;
+  volatile bool        _should_abort;
+  HeapRegion* volatile _next_survivor;
+
+public:
+  CMRootRegions();
+  // We actually do most of the initialization in this method.
+  void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
+
+  // Reset the claiming / scanning of the root regions.
+  void prepare_for_scan();
+
+  // Forces get_next() to return NULL so that the iteration aborts early.
+  void abort() { _should_abort = true; }
+
+  // Return true if the CM thread are actively scanning root regions,
+  // false otherwise.
+  bool scan_in_progress() { return _scan_in_progress; }
+
+  // Claim the next root region to scan atomically, or return NULL if
+  // all have been claimed.
+  HeapRegion* claim_next();
+
+  // Flag that we're done with root region scanning and notify anyone
+  // who's waiting on it. If aborted is false, assume that all regions
+  // have been claimed.
+  void scan_finished();
+
+  // If CM threads are still scanning root regions, wait until they
+  // are done. Return true if we had to wait, false otherwise.
+  bool wait_until_scan_finished();
+};
 
 class ConcurrentMarkThread;
 
-class ConcurrentMark: public CHeapObj {
+class ConcurrentMark : public CHeapObj {
   friend class ConcurrentMarkThread;
   friend class CMTask;
   friend class CMBitMapClosure;
@@ -386,7 +438,7 @@
 
   FreeRegionList        _cleanup_list;
 
-  // CMS marking support structures
+  // Concurrent marking support structures
   CMBitMap                _markBitMap1;
   CMBitMap                _markBitMap2;
   CMBitMapRO*             _prevMarkBitMap; // completed mark bitmap
@@ -400,6 +452,9 @@
   HeapWord*               _heap_start;
   HeapWord*               _heap_end;
 
+  // Root region tracking and claiming.
+  CMRootRegions           _root_regions;
+
   // For gray objects
   CMMarkStack             _markStack; // Grey objects behind global finger.
   CMRegionStack           _regionStack; // Grey regions behind global finger.
@@ -426,7 +481,6 @@
   WorkGangBarrierSync     _first_overflow_barrier_sync;
   WorkGangBarrierSync     _second_overflow_barrier_sync;
 
-
   // this is set by any task, when an overflow on the global data
   // structures is detected.
   volatile bool           _has_overflown;
@@ -554,9 +608,9 @@
   bool has_overflown()           { return _has_overflown; }
   void set_has_overflown()       { _has_overflown = true; }
   void clear_has_overflown()     { _has_overflown = false; }
+  bool restart_for_overflow()    { return _restart_for_overflow; }
 
   bool has_aborted()             { return _has_aborted; }
-  bool restart_for_overflow()    { return _restart_for_overflow; }
 
   // Methods to enter the two overflow sync barriers
   void enter_first_sync_barrier(int task_num);
@@ -578,6 +632,27 @@
     }
   }
 
+  // Live Data Counting data structures...
+  // These data structures are initialized at the start of
+  // marking. They are written to while marking is active.
+  // They are aggregated during remark; the aggregated values
+  // are then used to populate the _region_bm, _card_bm, and
+  // the total live bytes, which are then subsequently updated
+  // during cleanup.
+
+  // An array of bitmaps (one bit map per task). Each bitmap
+  // is used to record the cards spanned by the live objects
+  // marked by that task/worker.
+  BitMap*  _count_card_bitmaps;
+
+  // Used to record the number of marked live bytes
+  // (for each region, by worker thread).
+  size_t** _count_marked_bytes;
+
+  // Card index of the bottom of the G1 heap. Used for biasing indices into
+  // the card bitmaps.
+  intptr_t _heap_bottom_card_num;
+
 public:
   // Manipulation of the global mark stack.
   // Notice that the first mark_stack_push is CAS-based, whereas the
@@ -671,6 +746,8 @@
   // Returns true if there are any aborted memory regions.
   bool has_aborted_regions();
 
+  CMRootRegions* root_regions() { return &_root_regions; }
+
   bool concurrent_marking_in_progress() {
     return _concurrent_marking_in_progress;
   }
@@ -703,6 +780,7 @@
 
   ConcurrentMark(ReservedSpace rs, int max_regions);
   ~ConcurrentMark();
+
   ConcurrentMarkThread* cmThread() { return _cmThread; }
 
   CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
@@ -720,8 +798,17 @@
   // G1CollectedHeap
 
   // This notifies CM that a root during initial-mark needs to be
-  // grayed. It is MT-safe.
-  inline void grayRoot(oop obj, size_t word_size);
+  // grayed. It is MT-safe. word_size is the size of the object in
+  // words. It is passed explicitly as sometimes we cannot calculate
+  // it from the given object because it might be in an inconsistent
+  // state (e.g., in to-space and being copied). So the caller is
+  // responsible for dealing with this issue (e.g., get the size from
+  // the from-space image when the to-space image might be
+  // inconsistent) and always passing the size. hr is the region that
+  // contains the object and it's passed optionally from callers who
+  // might already have it (no point in recalculating it).
+  inline void grayRoot(oop obj, size_t word_size,
+                       uint worker_id, HeapRegion* hr = NULL);
 
   // It's used during evacuation pauses to gray a region, if
   // necessary, and it's MT-safe. It assumes that the caller has
@@ -772,6 +859,13 @@
   void checkpointRootsInitialPre();
   void checkpointRootsInitialPost();
 
+  // Scan all the root regions and mark everything reachable from
+  // them.
+  void scanRootRegions();
+
+  // Scan a single root region and mark everything reachable from it.
+  void scanRootRegion(HeapRegion* hr, uint worker_id);
+
   // Do concurrent phase of marking, to a tentative transitive closure.
   void markFromRoots();
 
@@ -781,15 +875,13 @@
 
   void checkpointRootsFinal(bool clear_all_soft_refs);
   void checkpointRootsFinalWork();
-  void calcDesiredRegions();
   void cleanup();
   void completeCleanup();
 
   // Mark in the previous bitmap.  NB: this is usually read-only, so use
   // this carefully!
   inline void markPrev(oop p);
-  inline void markNext(oop p);
-  void clear(oop p);
+
   // Clears marks for all objects in the given range, for the prev,
   // next, or both bitmaps.  NB: the previous bitmap is usually
   // read-only, so use this carefully!
@@ -913,6 +1005,114 @@
   bool verbose_high() {
     return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
   }
+
+  // Counting data structure accessors
+
+  // Returns the card number of the bottom of the G1 heap.
+  // Used in biasing indices into accounting card bitmaps.
+  intptr_t heap_bottom_card_num() const {
+    return _heap_bottom_card_num;
+  }
+
+  // Returns the card bitmap for a given task or worker id.
+  BitMap* count_card_bitmap_for(uint worker_id) {
+    assert(0 <= worker_id && worker_id < _max_task_num, "oob");
+    assert(_count_card_bitmaps != NULL, "uninitialized");
+    BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
+    assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
+    return task_card_bm;
+  }
+
+  // Returns the array containing the marked bytes for each region,
+  // for the given worker or task id.
+  size_t* count_marked_bytes_array_for(uint worker_id) {
+    assert(0 <= worker_id && worker_id < _max_task_num, "oob");
+    assert(_count_marked_bytes != NULL, "uninitialized");
+    size_t* marked_bytes_array = _count_marked_bytes[worker_id];
+    assert(marked_bytes_array != NULL, "uninitialized");
+    return marked_bytes_array;
+  }
+
+  // Returns the index in the liveness accounting card table bitmap
+  // for the given address
+  inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
+
+  // Counts the size of the given memory region in the the given
+  // marked_bytes array slot for the given HeapRegion.
+  // Sets the bits in the given card bitmap that are associated with the
+  // cards that are spanned by the memory region.
+  inline void count_region(MemRegion mr, HeapRegion* hr,
+                           size_t* marked_bytes_array,
+                           BitMap* task_card_bm);
+
+  // Counts the given memory region in the task/worker counting
+  // data structures for the given worker id.
+  inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
+
+  // Counts the given memory region in the task/worker counting
+  // data structures for the given worker id.
+  inline void count_region(MemRegion mr, uint worker_id);
+
+  // Counts the given object in the given task/worker counting
+  // data structures.
+  inline void count_object(oop obj, HeapRegion* hr,
+                           size_t* marked_bytes_array,
+                           BitMap* task_card_bm);
+
+  // Counts the given object in the task/worker counting data
+  // structures for the given worker id.
+  inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
+
+  // Attempts to mark the given object and, if successful, counts
+  // the object in the given task/worker counting structures.
+  inline bool par_mark_and_count(oop obj, HeapRegion* hr,
+                                 size_t* marked_bytes_array,
+                                 BitMap* task_card_bm);
+
+  // Attempts to mark the given object and, if successful, counts
+  // the object in the task/worker counting structures for the
+  // given worker id.
+  inline bool par_mark_and_count(oop obj, size_t word_size,
+                                 HeapRegion* hr, uint worker_id);
+
+  // Attempts to mark the given object and, if successful, counts
+  // the object in the task/worker counting structures for the
+  // given worker id.
+  inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
+
+  // Similar to the above routine but we don't know the heap region that
+  // contains the object to be marked/counted, which this routine looks up.
+  inline bool par_mark_and_count(oop obj, uint worker_id);
+
+  // Similar to the above routine but there are times when we cannot
+  // safely calculate the size of obj due to races and we, therefore,
+  // pass the size in as a parameter. It is the caller's reponsibility
+  // to ensure that the size passed in for obj is valid.
+  inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
+
+  // Unconditionally mark the given object, and unconditinally count
+  // the object in the counting structures for worker id 0.
+  // Should *not* be called from parallel code.
+  inline bool mark_and_count(oop obj, HeapRegion* hr);
+
+  // Similar to the above routine but we don't know the heap region that
+  // contains the object to be marked/counted, which this routine looks up.
+  // Should *not* be called from parallel code.
+  inline bool mark_and_count(oop obj);
+
+protected:
+  // Clear all the per-task bitmaps and arrays used to store the
+  // counting data.
+  void clear_all_count_data();
+
+  // Aggregates the counting data for each worker/task
+  // that was constructed while marking. Also sets
+  // the amount of marked bytes for each region and
+  // the top at concurrent mark count.
+  void aggregate_count_data();
+
+  // Verification routine
+  void verify_count_data();
 };
 
 // A class representing a marking task.
@@ -1031,6 +1231,12 @@
 
   TruncatedSeq                _marking_step_diffs_ms;
 
+  // Counting data structures. Embedding the task's marked_bytes_array
+  // and card bitmap into the actual task saves having to go through
+  // the ConcurrentMark object.
+  size_t*                     _marked_bytes_array;
+  BitMap*                     _card_bm;
+
   // LOTS of statistics related with this task
 #if _MARKING_STATS_
   NumberSeq                   _all_clock_intervals_ms;
@@ -1196,6 +1402,7 @@
   }
 
   CMTask(int task_num, ConcurrentMark *cm,
+         size_t* marked_bytes, BitMap* card_bm,
          CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
 
   // it prints statistics associated with this task
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -28,6 +28,214 @@
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 
+// Returns the index in the liveness accounting card bitmap
+// for the given address
+inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
+  // Below, the term "card num" means the result of shifting an address
+  // by the card shift -- address 0 corresponds to card number 0.  One
+  // must subtract the card num of the bottom of the heap to obtain a
+  // card table index.
+
+  intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
+  return card_num - heap_bottom_card_num();
+}
+
+// Counts the given memory region in the given task/worker
+// counting data structures.
+inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
+                                         size_t* marked_bytes_array,
+                                         BitMap* task_card_bm) {
+  G1CollectedHeap* g1h = _g1h;
+  HeapWord* start = mr.start();
+  HeapWord* last = mr.last();
+  size_t region_size_bytes = mr.byte_size();
+  size_t index = hr->hrs_index();
+
+  assert(!hr->continuesHumongous(), "should not be HC region");
+  assert(hr == g1h->heap_region_containing(start), "sanity");
+  assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
+  assert(marked_bytes_array != NULL, "pre-condition");
+  assert(task_card_bm != NULL, "pre-condition");
+
+  // Add to the task local marked bytes for this region.
+  marked_bytes_array[index] += region_size_bytes;
+
+  BitMap::idx_t start_idx = card_bitmap_index_for(start);
+  BitMap::idx_t last_idx = card_bitmap_index_for(last);
+
+  // The card bitmap is task/worker specific => no need to use 'par' routines.
+  // Set bits in the inclusive bit range [start_idx, last_idx].
+  //
+  // For small ranges use a simple loop; otherwise use set_range
+  // The range are the cards that are spanned by the object/region
+  // so 8 cards will allow objects/regions up to 4K to be handled
+  // using the loop.
+  if ((last_idx - start_idx) <= 8) {
+    for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
+     task_card_bm->set_bit(i);
+    }
+  } else {
+    assert(last_idx < task_card_bm->size(), "sanity");
+    // Note: BitMap::set_range() is exclusive.
+    task_card_bm->set_range(start_idx, last_idx+1);
+  }
+}
+
+// Counts the given memory region in the task/worker counting
+// data structures for the given worker id.
+inline void ConcurrentMark::count_region(MemRegion mr,
+                                         HeapRegion* hr,
+                                         uint worker_id) {
+  size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
+  BitMap* task_card_bm = count_card_bitmap_for(worker_id);
+  count_region(mr, hr, marked_bytes_array, task_card_bm);
+}
+
+// Counts the given memory region, which may be a single object, in the
+// task/worker counting data structures for the given worker id.
+inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
+  HeapWord* addr = mr.start();
+  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
+  count_region(mr, hr, worker_id);
+}
+
+// Counts the given object in the given task/worker counting data structures.
+inline void ConcurrentMark::count_object(oop obj,
+                                         HeapRegion* hr,
+                                         size_t* marked_bytes_array,
+                                         BitMap* task_card_bm) {
+  MemRegion mr((HeapWord*)obj, obj->size());
+  count_region(mr, hr, marked_bytes_array, task_card_bm);
+}
+
+// Counts the given object in the task/worker counting data
+// structures for the given worker id.
+inline void ConcurrentMark::count_object(oop obj,
+                                         HeapRegion* hr,
+                                         uint worker_id) {
+  size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
+  BitMap* task_card_bm = count_card_bitmap_for(worker_id);
+  HeapWord* addr = (HeapWord*) obj;
+  count_object(obj, hr, marked_bytes_array, task_card_bm);
+}
+
+// Attempts to mark the given object and, if successful, counts
+// the object in the given task/worker counting structures.
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               HeapRegion* hr,
+                                               size_t* marked_bytes_array,
+                                               BitMap* task_card_bm) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    // Update the task specific count data for the object.
+    count_object(obj, hr, marked_bytes_array, task_card_bm);
+    return true;
+  }
+  return false;
+}
+
+// Attempts to mark the given object and, if successful, counts
+// the object in the task/worker counting structures for the
+// given worker id.
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               size_t word_size,
+                                               HeapRegion* hr,
+                                               uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    MemRegion mr(addr, word_size);
+    count_region(mr, hr, worker_id);
+    return true;
+  }
+  return false;
+}
+
+// Attempts to mark the given object and, if successful, counts
+// the object in the task/worker counting structures for the
+// given worker id.
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               HeapRegion* hr,
+                                               uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    // Update the task specific count data for the object.
+    count_object(obj, hr, worker_id);
+    return true;
+  }
+  return false;
+}
+
+// As above - but we don't know the heap region containing the
+// object and so have to supply it.
+inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
+  return par_mark_and_count(obj, hr, worker_id);
+}
+
+// Similar to the above routine but we already know the size, in words, of
+// the object that we wish to mark/count
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               size_t word_size,
+                                               uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    // Update the task specific count data for the object.
+    MemRegion mr(addr, word_size);
+    count_region(mr, worker_id);
+    return true;
+  }
+  return false;
+}
+
+// Unconditionally mark the given object, and unconditinally count
+// the object in the counting structures for worker id 0.
+// Should *not* be called from parallel code.
+inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
+  HeapWord* addr = (HeapWord*)obj;
+  _nextMarkBitMap->mark(addr);
+  // Update the task specific count data for the object.
+  count_object(obj, hr, 0 /* worker_id */);
+  return true;
+}
+
+// As above - but we don't have the heap region containing the
+// object, so we have to supply it.
+inline bool ConcurrentMark::mark_and_count(oop obj) {
+  HeapWord* addr = (HeapWord*)obj;
+  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
+  return mark_and_count(obj, hr);
+}
+
+inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
+  HeapWord* start_addr = MAX2(startWord(), mr.start());
+  HeapWord* end_addr = MIN2(endWord(), mr.end());
+
+  if (end_addr > start_addr) {
+    // Right-open interval [start-offset, end-offset).
+    BitMap::idx_t start_offset = heapWordToOffset(start_addr);
+    BitMap::idx_t end_offset = heapWordToOffset(end_addr);
+
+    start_offset = _bm.get_next_one_offset(start_offset, end_offset);
+    while (start_offset < end_offset) {
+      HeapWord* obj_addr = offsetToHeapWord(start_offset);
+      oop obj = (oop) obj_addr;
+      if (!cl->do_bit(start_offset)) {
+        return false;
+      }
+      HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
+      BitMap::idx_t next_offset = heapWordToOffset(next_addr);
+      start_offset = _bm.get_next_one_offset(next_offset, end_offset);
+    }
+  }
+  return true;
+}
+
+inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
+  MemRegion mr(startWord(), sizeInWords());
+  return iterate(cl, mr);
+}
+
 inline void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
@@ -84,7 +292,7 @@
 
   HeapWord* objAddr = (HeapWord*) obj;
   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
- if (_g1h->is_in_g1_reserved(objAddr)) {
+  if (_g1h->is_in_g1_reserved(objAddr)) {
     assert(obj != NULL, "null check is implicit");
     if (!_nextMarkBitMap->isMarked(objAddr)) {
       // Only get the containing region if the object is not marked on the
@@ -98,9 +306,9 @@
         }
 
         // we need to mark it first
-        if (_nextMarkBitMap->parMark(objAddr)) {
+        if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
           // No OrderAccess:store_load() is needed. It is implicit in the
-          // CAS done in parMark(objAddr) above
+          // CAS done in CMBitMap::parMark() call in the routine above.
           HeapWord* global_finger = _cm->finger();
 
 #if _CHECK_BOTH_FINGERS_
@@ -160,25 +368,20 @@
   ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
 }
 
-inline void ConcurrentMark::markNext(oop p) {
-  assert(!_nextMarkBitMap->isMarked((HeapWord*) p), "sanity");
-  _nextMarkBitMap->mark((HeapWord*) p);
-}
-
-inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
+inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
+                                     uint worker_id, HeapRegion* hr) {
+  assert(obj != NULL, "pre-condition");
   HeapWord* addr = (HeapWord*) obj;
+  if (hr == NULL) {
+    hr = _g1h->heap_region_containing_raw(addr);
+  } else {
+    assert(hr->is_in(addr), "pre-condition");
+  }
+  assert(hr != NULL, "sanity");
+  // Given that we're looking for a region that contains an object
+  // header it's impossible to get back a HC region.
+  assert(!hr->continuesHumongous(), "sanity");
 
-  // Currently we don't do anything with word_size but we will use it
-  // in the very near future in the liveness calculation piggy-backing
-  // changes.
-
-#ifdef ASSERT
-  HeapRegion* hr = _g1h->heap_region_containing(addr);
-  assert(hr != NULL, "sanity");
-  assert(!hr->is_survivor(), "should not allocate survivors during IM");
-  assert(addr < hr->next_top_at_mark_start(),
-         err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT,
-                 addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
   // We cannot assert that word_size == obj->size() given that obj
   // might not be in a consistent state (another thread might be in
   // the process of copying it). So the best thing we can do is to
@@ -188,10 +391,11 @@
          err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
                  word_size * HeapWordSize, hr->capacity(),
                  HR_FORMAT_PARAMS(hr)));
-#endif // ASSERT
 
-  if (!_nextMarkBitMap->isMarked(addr)) {
-    _nextMarkBitMap->parMark(addr);
+  if (addr < hr->next_top_at_mark_start()) {
+    if (!_nextMarkBitMap->isMarked(addr)) {
+      par_mark_and_count(obj, word_size, hr, worker_id);
+    }
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,9 +44,7 @@
   _started(false),
   _in_progress(false),
   _vtime_accum(0.0),
-  _vtime_mark_accum(0.0),
-  _vtime_count_accum(0.0)
-{
+  _vtime_mark_accum(0.0) {
   create_and_start();
 }
 
@@ -94,9 +92,36 @@
       ResourceMark rm;
       HandleMark   hm;
       double cycle_start = os::elapsedVTime();
-      double mark_start_sec = os::elapsedTime();
       char verbose_str[128];
 
+      // We have to ensure that we finish scanning the root regions
+      // before the next GC takes place. To ensure this we have to
+      // make sure that we do not join the STS until the root regions
+      // have been scanned. If we did then it's possible that a
+      // subsequent GC could block us from joining the STS and proceed
+      // without the root regions have been scanned which would be a
+      // correctness issue.
+
+      double scan_start = os::elapsedTime();
+      if (!cm()->has_aborted()) {
+        if (PrintGC) {
+          gclog_or_tty->date_stamp(PrintGCDateStamps);
+          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
+        }
+
+        _cm->scanRootRegions();
+
+        double scan_end = os::elapsedTime();
+        if (PrintGC) {
+          gclog_or_tty->date_stamp(PrintGCDateStamps);
+          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
+                                 scan_end - scan_start);
+        }
+      }
+
+      double mark_start_sec = os::elapsedTime();
       if (PrintGC) {
         gclog_or_tty->date_stamp(PrintGCDateStamps);
         gclog_or_tty->stamp(PrintGCTimeStamps);
@@ -148,36 +173,12 @@
         }
       } while (cm()->restart_for_overflow());
 
-      double counting_start_time = os::elapsedVTime();
-      if (!cm()->has_aborted()) {
-        double count_start_sec = os::elapsedTime();
-        if (PrintGC) {
-          gclog_or_tty->date_stamp(PrintGCDateStamps);
-          gclog_or_tty->stamp(PrintGCTimeStamps);
-          gclog_or_tty->print_cr("[GC concurrent-count-start]");
-        }
-
-        _sts.join();
-        _cm->calcDesiredRegions();
-        _sts.leave();
-
-        if (!cm()->has_aborted()) {
-          double count_end_sec = os::elapsedTime();
-          if (PrintGC) {
-            gclog_or_tty->date_stamp(PrintGCDateStamps);
-            gclog_or_tty->stamp(PrintGCTimeStamps);
-            gclog_or_tty->print_cr("[GC concurrent-count-end, %1.7lf]",
-                                   count_end_sec - count_start_sec);
-          }
-        }
-      }
-
       double end_time = os::elapsedVTime();
-      _vtime_count_accum += (end_time - counting_start_time);
       // Update the total virtual time before doing this, since it will try
       // to measure it to get the vtime for this marking.  We purposely
       // neglect the presumably-short "completeCleanup" phase here.
       _vtime_accum = (end_time - _vtime_start);
+
       if (!cm()->has_aborted()) {
         if (g1_policy->adaptive_young_list_length()) {
           double now = os::elapsedTime();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,6 @@
   double _vtime_accum;  // Accumulated virtual time.
 
   double _vtime_mark_accum;
-  double _vtime_count_accum;
 
  public:
   virtual void run();
@@ -69,8 +68,6 @@
   double vtime_accum();
   // Marking virtual time so far
   double vtime_mark_accum();
-  // Counting virtual time so far.
-  double vtime_count_accum() { return _vtime_count_accum; }
 
   ConcurrentMark* cm()     { return _cm; }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -174,13 +174,10 @@
   }
 };
 
-YoungList::YoungList(G1CollectedHeap* g1h)
-  : _g1h(g1h), _head(NULL),
-    _length(0),
-    _last_sampled_rs_lengths(0),
-    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
-{
-  guarantee( check_list_empty(false), "just making sure..." );
+YoungList::YoungList(G1CollectedHeap* g1h) :
+    _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
+    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
+  guarantee(check_list_empty(false), "just making sure...");
 }
 
 void YoungList::push_region(HeapRegion *hr) {
@@ -1029,6 +1026,15 @@
   assert(isHumongous(word_size), "attempt_allocation_humongous() "
          "should only be called for humongous allocations");
 
+  // Humongous objects can exhaust the heap quickly, so we should check if we
+  // need to start a marking cycle at each humongous object allocation. We do
+  // the check before we do the actual allocation. The reason for doing it
+  // before the allocation is that we avoid having to keep track of the newly
+  // allocated memory while we do a GC.
+  if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
+    collect(GCCause::_g1_humongous_allocation);
+  }
+
   // We will loop until a) we manage to successfully perform the
   // allocation or b) we successfully schedule a collection which
   // fails to perform the allocation. b) is the only case when we'll
@@ -1111,7 +1117,11 @@
     return _mutator_alloc_region.attempt_allocation_locked(word_size,
                                                       false /* bot_updates */);
   } else {
-    return humongous_obj_allocate(word_size);
+    HeapWord* result = humongous_obj_allocate(word_size);
+    if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
+      g1_policy()->set_initiate_conc_mark_if_possible();
+    }
+    return result;
   }
 
   ShouldNotReachHere();
@@ -1257,7 +1267,18 @@
     double start = os::elapsedTime();
     g1_policy()->record_full_collection_start();
 
+    // Note: When we have a more flexible GC logging framework that
+    // allows us to add optional attributes to a GC log record we
+    // could consider timing and reporting how long we wait in the
+    // following two methods.
     wait_while_free_regions_coming();
+    // If we start the compaction before the CM threads finish
+    // scanning the root regions we might trip them over as we'll
+    // be moving objects / updating references. So let's wait until
+    // they are done. By telling them to abort, they should complete
+    // early.
+    _cm->root_regions()->abort();
+    _cm->root_regions()->wait_until_scan_finished();
     append_secondary_free_list_if_not_empty_with_lock();
 
     gc_prologue(true);
@@ -1286,7 +1307,8 @@
     ref_processor_cm()->verify_no_references_recorded();
 
     // Abandon current iterations of concurrent marking and concurrent
-    // refinement, if any are in progress.
+    // refinement, if any are in progress. We have to do this before
+    // wait_until_scan_finished() below.
     concurrent_mark()->abort();
 
     // Make sure we'll choose a new allocation region afterwards.
@@ -2295,7 +2317,8 @@
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   return
     ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
-     (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
+     (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
+      cause == GCCause::_g1_humongous_allocation);
 }
 
 #ifndef PRODUCT
@@ -3545,19 +3568,25 @@
   verify_region_sets_optional();
   verify_dirty_young_regions();
 
+  // This call will decide whether this pause is an initial-mark
+  // pause. If it is, during_initial_mark_pause() will return true
+  // for the duration of this pause.
+  g1_policy()->decide_on_conc_mark_initiation();
+
+  // We do not allow initial-mark to be piggy-backed on a mixed GC.
+  assert(!g1_policy()->during_initial_mark_pause() ||
+          g1_policy()->gcs_are_young(), "sanity");
+
+  // We also do not allow mixed GCs during marking.
+  assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
+
+  // Record whether this pause is an initial mark. When the current
+  // thread has completed its logging output and it's safe to signal
+  // the CM thread, the flag's value in the policy has been reset.
+  bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
+
+  // Inner scope for scope based logging, timers, and stats collection
   {
-    // This call will decide whether this pause is an initial-mark
-    // pause. If it is, during_initial_mark_pause() will return true
-    // for the duration of this pause.
-    g1_policy()->decide_on_conc_mark_initiation();
-
-    // We do not allow initial-mark to be piggy-backed on a mixed GC.
-    assert(!g1_policy()->during_initial_mark_pause() ||
-            g1_policy()->gcs_are_young(), "sanity");
-
-    // We also do not allow mixed GCs during marking.
-    assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
-
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->gcs_are_young()) {
@@ -3613,7 +3642,6 @@
         Universe::verify(/* allow dirty */ false,
                          /* silent      */ false,
                          /* option      */ VerifyOption_G1UsePrevMarking);
-
       }
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -3656,6 +3684,18 @@
         g1_policy()->record_collection_pause_start(start_time_sec,
                                                    start_used_bytes);
 
+        double scan_wait_start = os::elapsedTime();
+        // We have to wait until the CM threads finish scanning the
+        // root regions as it's the only way to ensure that all the
+        // objects on them have been correctly scanned before we start
+        // moving them during the GC.
+        bool waited = _cm->root_regions()->wait_until_scan_finished();
+        if (waited) {
+          double scan_wait_end = os::elapsedTime();
+          double wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
+          g1_policy()->record_root_region_scan_wait_time(wait_time_ms);
+        }
+
 #if YOUNG_LIST_VERBOSE
         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
         _young_list->print();
@@ -3765,16 +3805,14 @@
         }
 
         if (g1_policy()->during_initial_mark_pause()) {
+          // We have to do this before we notify the CM threads that
+          // they can start working to make sure that all the
+          // appropriate initialization is done on the CM object.
           concurrent_mark()->checkpointRootsInitialPost();
           set_marking_started();
-          // CAUTION: after the doConcurrentMark() call below,
-          // the concurrent marking thread(s) could be running
-          // concurrently with us. Make sure that anything after
-          // this point does not assume that we are the only GC thread
-          // running. Note: of course, the actual marking work will
-          // not start until the safepoint itself is released in
-          // ConcurrentGCThread::safepoint_desynchronize().
-          doConcurrentMark();
+          // Note that we don't actually trigger the CM thread at
+          // this point. We do that later when we're sure that
+          // the current thread has completed its logging output.
         }
 
         allocate_dummy_regions();
@@ -3884,6 +3922,15 @@
     }
   }
 
+  // The closing of the inner scope, immediately above, will complete
+  // the PrintGC logging output. The record_collection_pause_end() call
+  // above will complete the logging output of PrintGCDetails.
+  //
+  // It is not yet to safe, however, to tell the concurrent mark to
+  // start as we have some optional output below. We don't want the
+  // output from the concurrent mark thread interfering with this
+  // logging output either.
+
   _hrs.verify_optional();
   verify_region_sets_optional();
 
@@ -3901,6 +3948,21 @@
     g1_rem_set()->print_summary_info();
   }
 
+  // It should now be safe to tell the concurrent mark thread to start
+  // without its logging output interfering with the logging output
+  // that came from the pause.
+
+  if (should_start_conc_mark) {
+    // CAUTION: after the doConcurrentMark() call below,
+    // the concurrent marking thread(s) could be running
+    // concurrently with us. Make sure that anything after
+    // this point does not assume that we are the only GC thread
+    // running. Note: of course, the actual marking work will
+    // not start until the safepoint itself is released in
+    // ConcurrentGCThread::safepoint_desynchronize().
+    doConcurrentMark();
+  }
+
   return true;
 }
 
@@ -4162,7 +4224,7 @@
 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
 
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
     _dcq(&g1h->dirty_card_queue_set()),
@@ -4283,6 +4345,7 @@
                                      G1ParScanThreadState* par_scan_state) :
   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
   _par_scan_state(par_scan_state),
+  _worker_id(par_scan_state->queue_num()),
   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
   _mark_in_progress(_g1->mark_in_progress()) { }
 
@@ -4294,7 +4357,7 @@
 #endif // ASSERT
 
   // We know that the object is not moving so it's safe to read its size.
-  _cm->grayRoot(obj, (size_t) obj->size());
+  _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
 }
 
 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
@@ -4316,7 +4379,7 @@
   // worker so we cannot trust that its to-space image is
   // well-formed. So we have to read its size from its from-space
   // image which we know should not be changing.
-  _cm->grayRoot(to_obj, (size_t) from_obj->size());
+  _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
@@ -4406,6 +4469,8 @@
   assert(barrier != G1BarrierRS || obj != NULL,
          "Precondition: G1BarrierRS implies obj is non-NULL");
 
+  assert(_worker_id == _par_scan_state->queue_num(), "sanity");
+
   // here the null check is implicit in the cset_fast_test() test
   if (_g1->in_cset_fast_test(obj)) {
     oop forwardee;
@@ -4424,7 +4489,7 @@
 
     // When scanning the RS, we only care about objs in CS.
     if (barrier == G1BarrierRS) {
-      _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
+      _par_scan_state->update_rs(_from, p, _worker_id);
     }
   } else {
     // The object is not in collection set. If we're a root scanning
@@ -4436,7 +4501,7 @@
   }
 
   if (barrier == G1BarrierEvac && obj != NULL) {
-    _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
+    _par_scan_state->update_rs(_from, p, _worker_id);
   }
 
   if (do_gen_barrier && obj != NULL) {
@@ -5666,16 +5731,6 @@
 
       // And the region is empty.
       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
-
-      // If marking is in progress then clear any objects marked in
-      // the current region. Note mark_in_progress() returns false,
-      // even during an initial mark pause, until the set_marking_started()
-      // call which takes place later in the pause.
-      if (mark_in_progress()) {
-        assert(!g1_policy()->during_initial_mark_pause(), "sanity");
-        _cm->nextMarkBitMap()->clearRange(used_mr);
-      }
-
       free_region(cur, &pre_used, &local_free_list, false /* par */);
     } else {
       cur->uninstall_surv_rate_group();
@@ -5742,8 +5797,9 @@
 }
 
 void G1CollectedHeap::reset_free_regions_coming() {
+  assert(free_regions_coming(), "pre-condition");
+
   {
-    assert(free_regions_coming(), "pre-condition");
     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
     _free_regions_coming = false;
     SecondaryFreeList_lock->notify_all();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -355,6 +355,7 @@
   // explicitly started if:
   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+  // (c) cause == _g1_humongous_allocation
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
   // Keeps track of how many "full collections" (i.e., Full GCs or
@@ -1172,6 +1173,10 @@
     _old_set.remove(hr);
   }
 
+  size_t non_young_capacity_bytes() {
+    return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
+  }
+
   void set_free_regions_coming();
   void reset_free_regions_coming();
   bool free_regions_coming() { return _free_regions_coming; }
@@ -1904,7 +1909,7 @@
   G1ParScanPartialArrayClosure* _partial_scan_cl;
 
   int _hash_seed;
-  int _queue_num;
+  uint _queue_num;
 
   size_t _term_attempts;
 
@@ -1948,7 +1953,7 @@
   }
 
 public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
+  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
 
   ~G1ParScanThreadState() {
     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
@@ -2040,7 +2045,7 @@
   }
 
   int* hash_seed() { return &_hash_seed; }
-  int  queue_num() { return _queue_num; }
+  uint queue_num() { return _queue_num; }
 
   size_t term_attempts() const  { return _term_attempts; }
   void note_term_attempt() { _term_attempts++; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -141,6 +141,7 @@
 
   _cur_clear_ct_time_ms(0.0),
   _mark_closure_time_ms(0.0),
+  _root_region_scan_wait_time_ms(0.0),
 
   _cur_ref_proc_time_ms(0.0),
   _cur_ref_enq_time_ms(0.0),
@@ -213,8 +214,6 @@
   _survivor_bytes_before_gc(0),
   _capacity_before_gc(0),
 
-  _prev_collection_pause_used_at_end_bytes(0),
-
   _eden_cset_region_length(0),
   _survivor_cset_region_length(0),
   _old_cset_region_length(0),
@@ -905,19 +904,10 @@
     gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
   }
 
-  if (!during_initial_mark_pause()) {
-    // We only need to do this here as the policy will only be applied
-    // to the GC we're about to start. so, no point is calculating this
-    // every time we calculate / recalculate the target young length.
-    update_survivors_policy();
-  } else {
-    // The marking phase has a "we only copy implicitly live
-    // objects during marking" invariant. The easiest way to ensure it
-    // holds is not to allocate any survivor regions and tenure all
-    // objects. In the future we might change this and handle survivor
-    // regions specially during marking.
-    tenure_all_objects();
-  }
+  // We only need to do this here as the policy will only be applied
+  // to the GC we're about to start. so, no point is calculating this
+  // every time we calculate / recalculate the target young length.
+  update_survivors_policy();
 
   assert(_g1->used() == _g1->recalculate_used(),
          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
@@ -969,6 +959,9 @@
   // This is initialized to zero here and is set during
   // the evacuation pause if marking is in progress.
   _cur_satb_drain_time_ms = 0.0;
+  // This is initialized to zero here and is set during the evacuation
+  // pause if we actually waited for the root region scanning to finish.
+  _root_region_scan_wait_time_ms = 0.0;
 
   _last_gc_was_young = false;
 
@@ -1140,6 +1133,50 @@
   return ret;
 }
 
+bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
+  if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
+    return false;
+  }
+
+  size_t marking_initiating_used_threshold =
+    (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
+  size_t cur_used_bytes = _g1->non_young_capacity_bytes();
+  size_t alloc_byte_size = alloc_word_size * HeapWordSize;
+
+  if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
+    if (gcs_are_young()) {
+      ergo_verbose5(ErgoConcCycles,
+        "request concurrent cycle initiation",
+        ergo_format_reason("occupancy higher than threshold")
+        ergo_format_byte("occupancy")
+        ergo_format_byte("allocation request")
+        ergo_format_byte_perc("threshold")
+        ergo_format_str("source"),
+        cur_used_bytes,
+        alloc_byte_size,
+        marking_initiating_used_threshold,
+        (double) InitiatingHeapOccupancyPercent,
+        source);
+      return true;
+    } else {
+      ergo_verbose5(ErgoConcCycles,
+        "do not request concurrent cycle initiation",
+        ergo_format_reason("still doing mixed collections")
+        ergo_format_byte("occupancy")
+        ergo_format_byte("allocation request")
+        ergo_format_byte_perc("threshold")
+        ergo_format_str("source"),
+        cur_used_bytes,
+        alloc_byte_size,
+        marking_initiating_used_threshold,
+        (double) InitiatingHeapOccupancyPercent,
+        source);
+    }
+  }
+
+  return false;
+}
+
 // Anything below that is considered to be zero
 #define MIN_TIMER_GRANULARITY 0.0000001
 
@@ -1166,44 +1203,16 @@
 #endif // PRODUCT
 
   last_pause_included_initial_mark = during_initial_mark_pause();
-  if (last_pause_included_initial_mark)
+  if (last_pause_included_initial_mark) {
     record_concurrent_mark_init_end(0.0);
-
-  size_t marking_initiating_used_threshold =
-    (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
-
-  if (!_g1->mark_in_progress() && !_last_young_gc) {
-    assert(!last_pause_included_initial_mark, "invariant");
-    if (cur_used_bytes > marking_initiating_used_threshold) {
-      if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
-        assert(!during_initial_mark_pause(), "we should not see this here");
-
-        ergo_verbose3(ErgoConcCycles,
-                      "request concurrent cycle initiation",
-                      ergo_format_reason("occupancy higher than threshold")
-                      ergo_format_byte("occupancy")
-                      ergo_format_byte_perc("threshold"),
-                      cur_used_bytes,
-                      marking_initiating_used_threshold,
-                      (double) InitiatingHeapOccupancyPercent);
-
-        // Note: this might have already been set, if during the last
-        // pause we decided to start a cycle but at the beginning of
-        // this pause we decided to postpone it. That's OK.
-        set_initiate_conc_mark_if_possible();
-      } else {
-        ergo_verbose2(ErgoConcCycles,
-                  "do not request concurrent cycle initiation",
-                  ergo_format_reason("occupancy lower than previous occupancy")
-                  ergo_format_byte("occupancy")
-                  ergo_format_byte("previous occupancy"),
-                  cur_used_bytes,
-                  _prev_collection_pause_used_at_end_bytes);
-      }
-    }
   }
 
-  _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
+  if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
+    // Note: this might have already been set, if during the last
+    // pause we decided to start a cycle but at the beginning of
+    // this pause we decided to postpone it. That's OK.
+    set_initiate_conc_mark_if_possible();
+  }
 
   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
                           end_time_sec, false);
@@ -1257,6 +1266,10 @@
   // is in progress.
   other_time_ms -= _cur_satb_drain_time_ms;
 
+  // Subtract the root region scanning wait time. It's initialized to
+  // zero at the start of the pause.
+  other_time_ms -= _root_region_scan_wait_time_ms;
+
   if (parallel) {
     other_time_ms -= _cur_collection_par_time_ms;
   } else {
@@ -1289,6 +1302,8 @@
     // each other. Therefore we unconditionally record the SATB drain
     // time - even if it's zero.
     body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
+    body_summary->record_root_region_scan_wait_time_ms(
+                                               _root_region_scan_wait_time_ms);
 
     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
     body_summary->record_satb_filtering_time_ms(satb_filtering_time);
@@ -1385,6 +1400,9 @@
                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
                            elapsed_ms / 1000.0);
 
+    if (_root_region_scan_wait_time_ms > 0.0) {
+      print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
+    }
     if (parallel) {
       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
@@ -1988,6 +2006,7 @@
   if (summary->get_total_seq()->num() > 0) {
     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
     if (body_summary != NULL) {
+      print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
       if (parallel) {
         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
@@ -2029,15 +2048,17 @@
           // parallel
           NumberSeq* other_parts[] = {
             body_summary->get_satb_drain_seq(),
+            body_summary->get_root_region_scan_wait_seq(),
             body_summary->get_parallel_seq(),
             body_summary->get_clear_ct_seq()
           };
           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
-                                                3, other_parts);
+                                          4, other_parts);
         } else {
           // serial
           NumberSeq* other_parts[] = {
             body_summary->get_satb_drain_seq(),
+            body_summary->get_root_region_scan_wait_seq(),
             body_summary->get_update_rs_seq(),
             body_summary->get_ext_root_scan_seq(),
             body_summary->get_satb_filtering_seq(),
@@ -2045,7 +2066,7 @@
             body_summary->get_obj_copy_seq()
           };
           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
-                                                6, other_parts);
+                                          7, other_parts);
         }
         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
       }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -65,6 +65,7 @@
 
 class MainBodySummary: public CHeapObj {
   define_num_seq(satb_drain) // optional
+  define_num_seq(root_region_scan_wait)
   define_num_seq(parallel) // parallel only
     define_num_seq(ext_root_scan)
     define_num_seq(satb_filtering)
@@ -177,7 +178,6 @@
   double _cur_collection_start_sec;
   size_t _cur_collection_pause_used_at_start_bytes;
   size_t _cur_collection_pause_used_regions_at_start;
-  size_t _prev_collection_pause_used_at_end_bytes;
   double _cur_collection_par_time_ms;
   double _cur_satb_drain_time_ms;
   double _cur_clear_ct_time_ms;
@@ -716,6 +716,7 @@
   double _mark_remark_start_sec;
   double _mark_cleanup_start_sec;
   double _mark_closure_time_ms;
+  double _root_region_scan_wait_time_ms;
 
   // Update the young list target length either by setting it to the
   // desired fixed value or by calculating it using G1's pause
@@ -800,6 +801,8 @@
 
   GenRemSet::Name  rem_set_name()     { return GenRemSet::CardTable; }
 
+  bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
+
   // Update the heuristic info to record a collection pause of the given
   // start time, where the given number of bytes were used at the start.
   // This may involve changing the desired size of a collection set.
@@ -816,6 +819,10 @@
     _mark_closure_time_ms = mark_closure_time_ms;
   }
 
+  void record_root_region_scan_wait_time(double time_ms) {
+    _root_region_scan_wait_time_ms = time_ms;
+  }
+
   void record_concurrent_mark_remark_start();
   void record_concurrent_mark_remark_end();
 
@@ -1146,11 +1153,6 @@
     _survivor_surv_rate_group->stop_adding_regions();
   }
 
-  void tenure_all_objects() {
-    _max_survivor_regions = 0;
-    _tenuring_threshold = 0;
-  }
-
   void record_survivor_regions(size_t      regions,
                                HeapRegion* head,
                                HeapRegion* tail) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -70,16 +70,20 @@
   OopsInHeapRegionClosure *_update_rset_cl;
   bool _during_initial_mark;
   bool _during_conc_mark;
+  uint _worker_id;
+
 public:
   RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
                                  HeapRegion* hr,
                                  OopsInHeapRegionClosure* update_rset_cl,
                                  bool during_initial_mark,
-                                 bool during_conc_mark) :
+                                 bool during_conc_mark,
+                                 uint worker_id) :
     _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
     _update_rset_cl(update_rset_cl),
     _during_initial_mark(during_initial_mark),
-    _during_conc_mark(during_conc_mark) { }
+    _during_conc_mark(during_conc_mark),
+    _worker_id(worker_id) { }
 
   size_t marked_bytes() { return _marked_bytes; }
 
@@ -123,7 +127,7 @@
         // explicitly and all objects in the CSet are considered
         // (implicitly) live. So, we won't mark them explicitly and
         // we'll leave them over NTAMS.
-        _cm->markNext(obj);
+        _cm->grayRoot(obj, obj_size, _worker_id, _hr);
       }
       _marked_bytes += (obj_size * HeapWordSize);
       obj->set_mark(markOopDesc::prototype());
@@ -155,12 +159,14 @@
   G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
   OopsInHeapRegionClosure *_update_rset_cl;
+  uint _worker_id;
 
 public:
   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
-                                OopsInHeapRegionClosure* update_rset_cl) :
+                                OopsInHeapRegionClosure* update_rset_cl,
+                                uint worker_id) :
     _g1h(g1h), _update_rset_cl(update_rset_cl),
-    _cm(_g1h->concurrent_mark()) { }
+    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
 
   bool doHeapRegion(HeapRegion *hr) {
     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
@@ -173,7 +179,8 @@
       if (hr->evacuation_failed()) {
         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
                                             during_initial_mark,
-                                            during_conc_mark);
+                                            during_conc_mark,
+                                            _worker_id);
 
         MemRegion mr(hr->bottom(), hr->end());
         // We'll recreate the prev marking info so we'll first clear
@@ -226,7 +233,7 @@
       update_rset_cl = &immediate_update;
     }
 
-    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl);
+    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
 
     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -89,16 +89,15 @@
 //
 // * Min Capacity
 //
-//    We set this to 0 for all spaces. We could consider setting the old
-//    min capacity to the min capacity of the heap (see 7078465).
+//    We set this to 0 for all spaces.
 //
 // * Max Capacity
 //
 //    For jstat, we set the max capacity of all spaces to heap_capacity,
-//    given that we don't always have a reasonably upper bound on how big
-//    each space can grow. For the memory pools, we actually make the max
-//    capacity undefined. We could consider setting the old max capacity
-//    to the max capacity of the heap (see 7078465).
+//    given that we don't always have a reasonable upper bound on how big
+//    each space can grow. For the memory pools, we make the max
+//    capacity undefined with the exception of the old memory pool for
+//    which we make the max capacity same as the max heap capacity.
 //
 // If we had more accurate occupancy / capacity information per
 // region set the above calculations would be greatly simplified and
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -51,6 +51,7 @@
   G1RemSet* _g1_rem;
   ConcurrentMark* _cm;
   G1ParScanThreadState* _par_scan_state;
+  uint _worker_id;
   bool _during_initial_mark;
   bool _mark_in_progress;
 public:
@@ -219,6 +220,7 @@
 
 // Closure for iterating over object fields during concurrent marking
 class G1CMOopClosure : public OopClosure {
+private:
   G1CollectedHeap*   _g1h;
   ConcurrentMark*    _cm;
   CMTask*            _task;
@@ -229,4 +231,92 @@
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
+// Closure to scan the root regions during concurrent marking
+class G1RootRegionScanClosure : public OopClosure {
+private:
+  G1CollectedHeap* _g1h;
+  ConcurrentMark*  _cm;
+  uint _worker_id;
+public:
+  G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
+                          uint worker_id) :
+    _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(      oop* p) { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+};
+
+// Closure that applies the given two closures in sequence.
+// Used by the RSet refinement code (when updating RSets
+// during an evacuation pause) to record cards containing
+// pointers into the collection set.
+
+class G1Mux2Closure : public OopClosure {
+  OopClosure* _c1;
+  OopClosure* _c2;
+public:
+  G1Mux2Closure(OopClosure *c1, OopClosure *c2);
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)        { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+};
+
+// A closure that returns true if it is actually applied
+// to a reference
+
+class G1TriggerClosure : public OopClosure {
+  bool _triggered;
+public:
+  G1TriggerClosure();
+  bool triggered() const { return _triggered; }
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)        { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+};
+
+// A closure which uses a triggering closure to determine
+// whether to apply an oop closure.
+
+class G1InvokeIfNotTriggeredClosure: public OopClosure {
+  G1TriggerClosure* _trigger_cl;
+  OopClosure* _oop_cl;
+public:
+  G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)        { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+};
+
+class G1UpdateRSOrPushRefOopClosure: public OopClosure {
+  G1CollectedHeap* _g1;
+  G1RemSet* _g1_rem_set;
+  HeapRegion* _from;
+  OopsInHeapRegionClosure* _push_ref_cl;
+  bool _record_refs_into_cset;
+  int _worker_i;
+
+public:
+  G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
+                                G1RemSet* rs,
+                                OopsInHeapRegionClosure* push_ref_cl,
+                                bool record_refs_into_cset,
+                                int worker_i = 0);
+
+  void set_from(HeapRegion* from) {
+    assert(from != NULL, "from region must be non-NULL");
+    _from = from;
+  }
+
+  bool self_forwarded(oop obj) {
+    bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
+    return result;
+  }
+
+  bool apply_to_weak_ref_discovered_field() { return true; }
+
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+  virtual void do_oop(oop* p)       { do_oop_nv(p); }
+};
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,8 @@
 // perf-critical inner loop.
 #define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
 
-template <class T> inline void FilterIntoCSClosure::do_oop_nv(T* p) {
+template <class T>
+inline void FilterIntoCSClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop) &&
       _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
@@ -53,7 +54,8 @@
 
 #define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
 
-template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
+template <class T>
+inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
@@ -67,7 +69,8 @@
 }
 
 // This closure is applied to the fields of the objects that have just been copied.
-template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
+template <class T>
+inline void G1ParScanClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
 
   if (!oopDesc::is_null(heap_oop)) {
@@ -96,7 +99,8 @@
   }
 }
 
-template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
+template <class T>
+inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
 
   if (!oopDesc::is_null(heap_oop)) {
@@ -111,7 +115,8 @@
   }
 }
 
-template <class T> inline void G1CMOopClosure::do_oop_nv(T* p) {
+template <class T>
+inline void G1CMOopClosure::do_oop_nv(T* p) {
   assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
   assert(!_g1h->is_on_master_free_list(
                     _g1h->heap_region_containing((HeapWord*) p)), "invariant");
@@ -125,4 +130,97 @@
   _task->deal_with_reference(obj);
 }
 
+template <class T>
+inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
+    if (hr != NULL) {
+      _cm->grayRoot(obj, obj->size(), _worker_id, hr);
+    }
+  }
+}
+
+template <class T>
+inline void G1Mux2Closure::do_oop_nv(T* p) {
+  // Apply first closure; then apply the second.
+  _c1->do_oop(p);
+  _c2->do_oop(p);
+}
+
+template <class T>
+inline void G1TriggerClosure::do_oop_nv(T* p) {
+  // Record that this closure was actually applied (triggered).
+  _triggered = true;
+}
+
+template <class T>
+inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
+  if (!_trigger_cl->triggered()) {
+    _oop_cl->do_oop(p);
+  }
+}
+
+template <class T>
+inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
+  oop obj = oopDesc::load_decode_heap_oop(p);
+#ifdef ASSERT
+  // can't do because of races
+  // assert(obj == NULL || obj->is_oop(), "expected an oop");
+
+  // Do the safe subset of is_oop
+  if (obj != NULL) {
+#ifdef CHECK_UNHANDLED_OOPS
+    oopDesc* o = obj.obj();
+#else
+    oopDesc* o = obj;
+#endif // CHECK_UNHANDLED_OOPS
+    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
+    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
+  }
+#endif // ASSERT
+
+  assert(_from != NULL, "from region must be non-NULL");
+
+  HeapRegion* to = _g1->heap_region_containing(obj);
+  if (to != NULL && _from != to) {
+    // The _record_refs_into_cset flag is true during the RSet
+    // updating part of an evacuation pause. It is false at all
+    // other times:
+    //  * rebuilding the rembered sets after a full GC
+    //  * during concurrent refinement.
+    //  * updating the remembered sets of regions in the collection
+    //    set in the event of an evacuation failure (when deferred
+    //    updates are enabled).
+
+    if (_record_refs_into_cset && to->in_collection_set()) {
+      // We are recording references that point into the collection
+      // set and this particular reference does exactly that...
+      // If the referenced object has already been forwarded
+      // to itself, we are handling an evacuation failure and
+      // we have already visited/tried to copy this object
+      // there is no need to retry.
+      if (!self_forwarded(obj)) {
+        assert(_push_ref_cl != NULL, "should not be null");
+        // Push the reference in the refs queue of the G1ParScanThreadState
+        // instance for this worker thread.
+        _push_ref_cl->do_oop(p);
+      }
+
+      // Deferred updates to the CSet are either discarded (in the normal case),
+      // or processed (if an evacuation failure occurs) at the end
+      // of the collection.
+      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
+    } else {
+      // We either don't care about pushing references that point into the
+      // collection set (i.e. we're not during an evacuation pause) _or_
+      // the reference doesn't point into the collection set. Either way
+      // we add the reference directly to the RSet of the region containing
+      // the referenced object.
+      _g1_rem_set->par_write_ref(_from, p, _worker_i);
+    }
+  }
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -569,40 +569,26 @@
 
 static IntHistogram out_of_histo(50, 50);
 
-class TriggerClosure : public OopClosure {
-  bool _trigger;
-public:
-  TriggerClosure() : _trigger(false) { }
-  bool value() const { return _trigger; }
-  template <class T> void do_oop_nv(T* p) { _trigger = true; }
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-};
+
+G1TriggerClosure::G1TriggerClosure() :
+  _triggered(false) { }
+
+G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl,
+                                                             OopClosure* oop_cl)  :
+  _trigger_cl(t_cl), _oop_cl(oop_cl) { }
 
-class InvokeIfNotTriggeredClosure: public OopClosure {
-  TriggerClosure* _t;
-  OopClosure* _oc;
-public:
-  InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
-    _t(t), _oc(oc) { }
-  template <class T> void do_oop_nv(T* p) {
-    if (!_t->value()) _oc->do_oop(p);
-  }
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-};
+G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
+  _c1(c1), _c2(c2) { }
 
-class Mux2Closure : public OopClosure {
-  OopClosure* _c1;
-  OopClosure* _c2;
-public:
-  Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
-  template <class T> void do_oop_nv(T* p) {
-    _c1->do_oop(p); _c2->do_oop(p);
-  }
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-};
+G1UpdateRSOrPushRefOopClosure::
+G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
+                              G1RemSet* rs,
+                              OopsInHeapRegionClosure* push_ref_cl,
+                              bool record_refs_into_cset,
+                              int worker_i) :
+  _g1(g1h), _g1_rem_set(rs), _from(NULL),
+  _record_refs_into_cset(record_refs_into_cset),
+  _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
 
 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
                                                    bool check_for_refs_into_cset) {
@@ -629,17 +615,17 @@
     assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
     oops_in_heap_closure = _cset_rs_update_cl[worker_i];
   }
-  UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
-                                               _g1->g1_rem_set(),
-                                               oops_in_heap_closure,
-                                               check_for_refs_into_cset,
-                                               worker_i);
+  G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
+                                                 _g1->g1_rem_set(),
+                                                 oops_in_heap_closure,
+                                                 check_for_refs_into_cset,
+                                                 worker_i);
   update_rs_oop_cl.set_from(r);
 
-  TriggerClosure trigger_cl;
+  G1TriggerClosure trigger_cl;
   FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
-  InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
-  Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
+  G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
+  G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
 
   FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
                         (check_for_refs_into_cset ?
@@ -688,7 +674,7 @@
     _conc_refine_cards++;
   }
 
-  return trigger_cl.value();
+  return trigger_cl.triggered();
 }
 
 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,44 +191,5 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 };
 
-class UpdateRSOrPushRefOopClosure: public OopClosure {
-  G1CollectedHeap* _g1;
-  G1RemSet* _g1_rem_set;
-  HeapRegion* _from;
-  OopsInHeapRegionClosure* _push_ref_cl;
-  bool _record_refs_into_cset;
-  int _worker_i;
-
-  template <class T> void do_oop_work(T* p);
-
-public:
-  UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
-                              G1RemSet* rs,
-                              OopsInHeapRegionClosure* push_ref_cl,
-                              bool record_refs_into_cset,
-                              int worker_i = 0) :
-    _g1(g1h),
-    _g1_rem_set(rs),
-    _from(NULL),
-    _record_refs_into_cset(record_refs_into_cset),
-    _push_ref_cl(push_ref_cl),
-    _worker_i(worker_i) { }
-
-  void set_from(HeapRegion* from) {
-    assert(from != NULL, "from region must be non-NULL");
-    _from = from;
-  }
-
-  bool self_forwarded(oop obj) {
-    bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
-    return result;
-  }
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
-
-  bool apply_to_weak_ref_discovered_field() { return true; }
-};
-
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,66 +85,4 @@
   }
 }
 
-template <class T>
-inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
-  oop obj = oopDesc::load_decode_heap_oop(p);
-#ifdef ASSERT
-  // can't do because of races
-  // assert(obj == NULL || obj->is_oop(), "expected an oop");
-
-  // Do the safe subset of is_oop
-  if (obj != NULL) {
-#ifdef CHECK_UNHANDLED_OOPS
-    oopDesc* o = obj.obj();
-#else
-    oopDesc* o = obj;
-#endif // CHECK_UNHANDLED_OOPS
-    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
-    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
-  }
-#endif // ASSERT
-
-  assert(_from != NULL, "from region must be non-NULL");
-
-  HeapRegion* to = _g1->heap_region_containing(obj);
-  if (to != NULL && _from != to) {
-    // The _record_refs_into_cset flag is true during the RSet
-    // updating part of an evacuation pause. It is false at all
-    // other times:
-    //  * rebuilding the rembered sets after a full GC
-    //  * during concurrent refinement.
-    //  * updating the remembered sets of regions in the collection
-    //    set in the event of an evacuation failure (when deferred
-    //    updates are enabled).
-
-    if (_record_refs_into_cset && to->in_collection_set()) {
-      // We are recording references that point into the collection
-      // set and this particular reference does exactly that...
-      // If the referenced object has already been forwarded
-      // to itself, we are handling an evacuation failure and
-      // we have already visited/tried to copy this object
-      // there is no need to retry.
-      if (!self_forwarded(obj)) {
-        assert(_push_ref_cl != NULL, "should not be null");
-        // Push the reference in the refs queue of the G1ParScanThreadState
-        // instance for this worker thread.
-        _push_ref_cl->do_oop(p);
-      }
-
-      // Deferred updates to the CSet are either discarded (in the normal case),
-      // or processed (if an evacuation failure occurs) at the end
-      // of the collection.
-      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
-    } else {
-      // We either don't care about pushing references that point into the
-      // collection set (i.e. we're not during an evacuation pause) _or_
-      // the reference doesn't point into the collection set. Either way
-      // we add the reference directly to the RSet of the region containing
-      // the referenced object.
-      _g1_rem_set->par_write_ref(_from, p, _worker_i);
-    }
-  }
-}
-
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,14 @@
 
 // Forward declarations.
 enum G1Barrier {
-  G1BarrierNone, G1BarrierRS, G1BarrierEvac
+  G1BarrierNone,
+  G1BarrierRS,
+  G1BarrierEvac
 };
 
-template<bool do_gen_barrier, G1Barrier barrier,
-         bool do_mark_object>
+template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 class G1ParCopyClosure;
+
 class G1ParScanClosure;
 class G1ParPushHeapRSClosure;
 
@@ -46,6 +48,13 @@
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
 class G1CMOopClosure;
+class G1RootRegionScanClosure;
+
+// Specialized oop closures from g1RemSet.cpp
+class G1Mux2Closure;
+class G1TriggerClosure;
+class G1InvokeIfNotTriggeredClosure;
+class G1UpdateRSOrPushRefOopClosure;
 
 #ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
 #error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
@@ -57,7 +66,12 @@
       f(G1ParPushHeapRSClosure,_nv)                     \
       f(FilterIntoCSClosure,_nv)                        \
       f(FilterOutOfRegionClosure,_nv)                   \
-      f(G1CMOopClosure,_nv)
+      f(G1CMOopClosure,_nv)                             \
+      f(G1RootRegionScanClosure,_nv)                    \
+      f(G1Mux2Closure,_nv)                              \
+      f(G1TriggerClosure,_nv)                           \
+      f(G1InvokeIfNotTriggeredClosure,_nv)              \
+      f(G1UpdateRSOrPushRefOopClosure,_nv)
 
 #ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
 #error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -659,7 +659,7 @@
   // If we're within a stop-world GC, then we might look at a card in a
   // GC alloc region that extends onto a GC LAB, which may not be
   // parseable.  Stop such at the "saved_mark" of the region.
-  if (G1CollectedHeap::heap()->is_gc_active()) {
+  if (g1h->is_gc_active()) {
     mr = mr.intersection(used_region_at_save_marks());
   } else {
     mr = mr.intersection(used_region());
@@ -688,53 +688,63 @@
     OrderAccess::storeload();
   }
 
+  // Cache the boundaries of the memory region in some const locals
+  HeapWord* const start = mr.start();
+  HeapWord* const end = mr.end();
+
   // We used to use "block_start_careful" here.  But we're actually happy
   // to update the BOT while we do this...
-  HeapWord* cur = block_start(mr.start());
-  assert(cur <= mr.start(), "Postcondition");
+  HeapWord* cur = block_start(start);
+  assert(cur <= start, "Postcondition");
+
+  oop obj;
 
-  while (cur <= mr.start()) {
-    if (oop(cur)->klass_or_null() == NULL) {
+  HeapWord* next = cur;
+  while (next <= start) {
+    cur = next;
+    obj = oop(cur);
+    if (obj->klass_or_null() == NULL) {
       // Ran into an unparseable point.
       return cur;
     }
     // Otherwise...
-    int sz = oop(cur)->size();
-    if (cur + sz > mr.start()) break;
-    // Otherwise, go on.
-    cur = cur + sz;
+    next = (cur + obj->size());
   }
-  oop obj;
-  obj = oop(cur);
-  // If we finish this loop...
-  assert(cur <= mr.start()
-         && obj->klass_or_null() != NULL
-         && cur + obj->size() > mr.start(),
+
+  // If we finish the above loop...We have a parseable object that
+  // begins on or before the start of the memory region, and ends
+  // inside or spans the entire region.
+
+  assert(obj == oop(cur), "sanity");
+  assert(cur <= start &&
+         obj->klass_or_null() != NULL &&
+         (cur + obj->size()) > start,
          "Loop postcondition");
+
   if (!g1h->is_obj_dead(obj)) {
     obj->oop_iterate(cl, mr);
   }
 
-  HeapWord* next;
-  while (cur < mr.end()) {
+  while (cur < end) {
     obj = oop(cur);
     if (obj->klass_or_null() == NULL) {
       // Ran into an unparseable point.
       return cur;
     };
+
     // Otherwise:
     next = (cur + obj->size());
+
     if (!g1h->is_obj_dead(obj)) {
-      if (next < mr.end()) {
+      if (next < end || !obj->is_objArray()) {
+        // This object either does not span the MemRegion
+        // boundary, or if it does it's not an array.
+        // Apply closure to whole object.
         obj->oop_iterate(cl);
       } else {
-        // this obj spans the boundary.  If it's an array, stop at the
-        // boundary.
-        if (obj->is_objArray()) {
-          obj->oop_iterate(cl, mr);
-        } else {
-          obj->oop_iterate(cl);
-        }
+        // This obj is an array that spans the boundary.
+        // Stop at the boundary.
+        obj->oop_iterate(cl, mr);
       }
     }
     cur = next;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -374,7 +374,9 @@
     ParVerifyClaimValue        = 4,
     RebuildRSClaimValue        = 5,
     CompleteMarkCSetClaimValue = 6,
-    ParEvacFailureClaimValue   = 7
+    ParEvacFailureClaimValue   = 7,
+    AggregateCountClaimValue   = 8,
+    VerifyCountClaimValue      = 9
   };
 
   inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -72,10 +72,11 @@
 }
 
 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
-  if (during_initial_mark) {
-    if (is_survivor()) {
-      assert(false, "should not allocate survivors during IM");
-    } else {
+  if (is_survivor()) {
+    // This is how we always allocate survivors.
+    assert(_next_top_at_mark_start == bottom(), "invariant");
+  } else {
+    if (during_initial_mark) {
       // During initial-mark we'll explicitly mark any objects on old
       // regions that are pointed to by roots. Given that explicit
       // marks only make sense under NTAMS it'd be nice if we could
@@ -84,11 +85,6 @@
       // NTAMS to the end of the region so all marks will be below
       // NTAMS. We'll set it to the actual top when we retire this region.
       _next_top_at_mark_start = end();
-    }
-  } else {
-    if (is_survivor()) {
-      // This is how we always allocate survivors.
-      assert(_next_top_at_mark_start == bottom(), "invariant");
     } else {
       // We could have re-used this old region as to-space over a
       // couple of GCs since the start of the concurrent marking
@@ -101,19 +97,15 @@
 }
 
 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
-  if (during_initial_mark) {
-    if (is_survivor()) {
-      assert(false, "should not allocate survivors during IM");
-    } else {
+  if (is_survivor()) {
+    // This is how we always allocate survivors.
+    assert(_next_top_at_mark_start == bottom(), "invariant");
+  } else {
+    if (during_initial_mark) {
       // See the comment for note_start_of_copying() for the details
       // on this.
       assert(_next_top_at_mark_start == end(), "pre-condition");
       _next_top_at_mark_start = top();
-    }
-  } else {
-    if (is_survivor()) {
-      // This is how we always allocate survivors.
-      assert(_next_top_at_mark_start == bottom(), "invariant");
     } else {
       // See the comment for note_start_of_copying() for the details
       // on this.
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,7 @@
 class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
   friend class hrs_ext_msg;
   friend class HRSPhaseSetter;
+  friend class VMStructs;
 
 protected:
   static size_t calculate_region_num(HeapRegion* hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,8 @@
   nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
+  nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
+  nonstatic_field(G1CollectedHeap, _humongous_set,      HeapRegionSetBase)    \
                                                                               \
   nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
   nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
@@ -47,6 +49,10 @@
   nonstatic_field(G1MonitoringSupport, _survivor_used,      size_t)           \
   nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
   nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
+                                                                              \
+  nonstatic_field(HeapRegionSetBase,   _length,             size_t)           \
+  nonstatic_field(HeapRegionSetBase,   _region_num,         size_t)           \
+  nonstatic_field(HeapRegionSetBase,   _total_used_bytes,   size_t)           \
 
 
 #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
@@ -55,6 +61,7 @@
                                                                               \
   declare_type(HeapRegion, ContiguousSpace)                                   \
   declare_toplevel_type(HeapRegionSeq)                                        \
+  declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(G1MonitoringSupport)                                  \
                                                                               \
   declare_toplevel_type(G1CollectedHeap*)                                     \
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,8 +74,9 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert(!_should_initiate_conc_mark ||
   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
-   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
-         "only a GC locker or a System.gc() induced GC should start a cycle");
+   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
+    _gc_cause == GCCause::_g1_humongous_allocation),
+         "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,9 @@
     case _g1_inc_collection_pause:
       return "G1 Evacuation Pause";
 
+    case _g1_humongous_allocation:
+      return "G1 Humongous Allocation";
+
     case _last_ditch_collection:
       return "Last ditch collection";
 
--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,6 +66,7 @@
     _adaptive_size_policy,
 
     _g1_inc_collection_pause,
+    _g1_humongous_allocation,
 
     _last_ditch_collection,
     _last_gc_cause
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -859,7 +859,9 @@
   const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
   const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
 
+  assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
   nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread);
+  assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
 
   if (osr_nm != NULL) {
     // We may need to do on-stack replacement which requires that no
--- a/hotspot/src/share/vm/oops/klass.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/oops/klass.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -158,6 +158,9 @@
   kl->set_next_sibling(NULL);
   kl->set_alloc_count(0);
   kl->set_alloc_size(0);
+#ifdef TRACE_SET_KLASS_TRACE_ID
+  TRACE_SET_KLASS_TRACE_ID(kl, 0);
+#endif
 
   kl->set_prototype_header(markOopDesc::prototype());
   kl->set_biased_lock_revocation_count(0);
--- a/hotspot/src/share/vm/oops/klass.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/oops/klass.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -33,6 +33,7 @@
 #include "oops/klassPS.hpp"
 #include "oops/oop.hpp"
 #include "runtime/orderAccess.hpp"
+#include "trace/traceMacros.hpp"
 #include "utilities/accessFlags.hpp"
 #ifndef SERIALGC
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
@@ -80,6 +81,7 @@
 //    [last_biased_lock_bulk_revocation_time] (64 bits)
 //    [prototype_header]
 //    [biased_lock_revocation_count]
+//    [trace_id]
 
 
 // Forward declarations.
@@ -263,6 +265,9 @@
   markOop  _prototype_header;   // Used when biased locking is both enabled and disabled for this type
   jint     _biased_lock_revocation_count;
 
+#ifdef TRACE_DEFINE_KLASS_TRACE_ID
+  TRACE_DEFINE_KLASS_TRACE_ID;
+#endif
  public:
 
   // returns the enclosing klassOop
@@ -683,6 +688,9 @@
   jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
   void  set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
 
+#ifdef TRACE_DEFINE_KLASS_METHODS
+  TRACE_DEFINE_KLASS_METHODS;
+#endif
 
   // garbage collection support
   virtual void follow_weak_klass_links(
--- a/hotspot/src/share/vm/oops/methodKlass.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -83,6 +83,7 @@
   m->set_max_stack(0);
   m->set_max_locals(0);
   m->set_intrinsic_id(vmIntrinsics::_none);
+  m->set_jfr_towrite(false);
   m->set_method_data(NULL);
   m->set_interpreter_throwout_count(0);
   m->set_vtable_index(methodOopDesc::garbage_vtable_index);
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -77,7 +77,7 @@
 // | method_size             | max_stack                  |
 // | max_locals              | size_of_parameters         |
 // |------------------------------------------------------|
-// | intrinsic_id, (unused)  |  throwout_count            |
+// |intrinsic_id|   flags    |  throwout_count            |
 // |------------------------------------------------------|
 // | num_breakpoints         |  (unused)                  |
 // |------------------------------------------------------|
@@ -124,6 +124,8 @@
   u2                _max_locals;                 // Number of local variables used by this method
   u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
   u1                _intrinsic_id;               // vmSymbols::intrinsic_id (0 == _none)
+  u1                _jfr_towrite : 1,            // Flags
+                                 : 7;
   u2                _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
   u2                _number_of_breakpoints;      // fullspeed debugging support
   InvocationCounter _invocation_counter;         // Incremented before each activation of the method - used to trigger frequency-based optimizations
@@ -225,6 +227,7 @@
   void clear_number_of_breakpoints()             { _number_of_breakpoints = 0; }
 
   // index into instanceKlass methods() array
+  // note: also used by jfr
   u2 method_idnum() const           { return constMethod()->method_idnum(); }
   void set_method_idnum(u2 idnum)   { constMethod()->set_method_idnum(idnum); }
 
@@ -650,6 +653,9 @@
   void init_intrinsic_id();     // updates from _none if a match
   static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
 
+  bool jfr_towrite()                 { return _jfr_towrite; }
+  void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
+
   // On-stack replacement support
   bool has_osr_nmethod(int level, bool match_level) {
    return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
--- a/hotspot/src/share/vm/opto/block.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/opto/block.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -284,13 +284,13 @@
   // helper function that adds caller save registers to MachProjNode
   void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   // Schedule a call next in the block
-  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
+  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 
   // Perform basic-block local scheduling
-  Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot);
+  Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
   void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
   void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
-  bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
+  bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
   // Cleanup if any code lands between a Call and his Catch
   void call_catch_cleanup(Block_Array &bbs);
   // Detect implicit-null-check opportunities.  Basically, find NULL checks
--- a/hotspot/src/share/vm/opto/gcm.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1344,8 +1344,8 @@
 
   // Schedule locally.  Right now a simple topological sort.
   // Later, do a real latency aware scheduler.
-  int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique());
-  memset( ready_cnt, -1, C->unique() * sizeof(int) );
+  uint max_idx = C->unique();
+  GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
   visited.Clear();
   for (i = 0; i < _num_blocks; i++) {
     if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
--- a/hotspot/src/share/vm/opto/lcm.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -404,7 +404,7 @@
 // remaining cases (most), choose the instruction with the greatest latency
 // (that is, the most number of pseudo-cycles required to the end of the
 // routine). If there is a tie, choose the instruction with the most inputs.
-Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot) {
+Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
 
   // If only a single entry on the stack, use it
   uint cnt = worklist.size();
@@ -465,7 +465,7 @@
 
         // More than this instruction pending for successor to be ready,
         // don't choose this if other opportunities are ready
-        if (ready_cnt[use->_idx] > 1)
+        if (ready_cnt.at(use->_idx) > 1)
           n_choice = 1;
       }
 
@@ -565,7 +565,7 @@
 
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -574,8 +574,9 @@
   for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
     Node* n = mcall->fast_out(i);
     assert( n->is_MachProj(), "" );
-    --ready_cnt[n->_idx];
-    assert( !ready_cnt[n->_idx], "" );
+    int n_cnt = ready_cnt.at(n->_idx)-1;
+    ready_cnt.at_put(n->_idx, n_cnt);
+    assert( n_cnt == 0, "" );
     // Schedule next to call
     _nodes.map(node_cnt++, n);
     // Collect defined registers
@@ -590,7 +591,9 @@
       Node* m = n->fast_out(j); // Get user
       if( bbs[m->_idx] != this ) continue;
       if( m->is_Phi() ) continue;
-      if( !--ready_cnt[m->_idx] )
+      int m_cnt = ready_cnt.at(m->_idx)-1;
+      ready_cnt.at_put(m->_idx, m_cnt);
+      if( m_cnt == 0 )
         worklist.push(m);
     }
 
@@ -655,7 +658,7 @@
 
 //------------------------------schedule_local---------------------------------
 // Topological sort within a block.  Someday become a real scheduler.
-bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, VectorSet &next_call) {
+bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
   // Already "sorted" are the block start Node (as the first entry), and
   // the block-ending Node and any trailing control projections.  We leave
   // these alone.  PhiNodes and ParmNodes are made to follow the block start
@@ -695,7 +698,7 @@
         if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
           local++;              // One more block-local input
       }
-      ready_cnt[n->_idx] = local; // Count em up
+      ready_cnt.at_put(n->_idx, local); // Count em up
 
 #ifdef ASSERT
       if( UseConcMarkSweepGC || UseG1GC ) {
@@ -729,7 +732,7 @@
     }
   }
   for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
-    ready_cnt[_nodes[i2]->_idx] = 0;
+    ready_cnt.at_put(_nodes[i2]->_idx, 0);
 
   // All the prescheduled guys do not hold back internal nodes
   uint i3;
@@ -737,8 +740,10 @@
     Node *n = _nodes[i3];       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if( cfg->_bbs[m->_idx] ==this ) // Local-block user
-        ready_cnt[m->_idx]--;   // Fix ready count
+      if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
+        int m_cnt = ready_cnt.at(m->_idx)-1;
+        ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
+      }
     }
   }
 
@@ -747,7 +752,7 @@
   Node_List worklist;
   for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
     Node *m = _nodes[i4];
-    if( !ready_cnt[m->_idx] ) {   // Zero ready count?
+    if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
       if (m->is_iteratively_computed()) {
         // Push induction variable increments last to allow other uses
         // of the phi to be scheduled first. The select() method breaks
@@ -775,14 +780,14 @@
       for (uint j=0; j<_nodes.size(); j++) {
         Node     *n = _nodes[j];
         int     idx = n->_idx;
-        tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
+        tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
         tty->print("latency:%3d  ", cfg->_node_latency->at_grow(idx));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
 #endif
 
-  uint max_idx = matcher.C->unique();
+  uint max_idx = (uint)ready_cnt.length();
   // Pull from worklist and schedule
   while( worklist.size() ) {    // Worklist is not ready
 
@@ -840,11 +845,13 @@
       Node* m = n->fast_out(i5); // Get user
       if( cfg->_bbs[m->_idx] != this ) continue;
       if( m->is_Phi() ) continue;
-      if (m->_idx > max_idx) { // new node, skip it
+      if (m->_idx >= max_idx) { // new node, skip it
         assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
         continue;
       }
-      if( !--ready_cnt[m->_idx] )
+      int m_cnt = ready_cnt.at(m->_idx)-1;
+      ready_cnt.at_put(m->_idx, m_cnt);
+      if( m_cnt == 0 )
         worklist.push(m);
     }
   }
--- a/hotspot/src/share/vm/opto/memnode.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1718,8 +1718,10 @@
   bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
   if (ReduceFieldZeroing || is_instance) {
     Node* value = can_see_stored_value(mem,phase);
-    if (value != NULL && value->is_Con())
+    if (value != NULL && value->is_Con()) {
+      assert(value->bottom_type()->higher_equal(_type),"sanity");
       return value->bottom_type();
+    }
   }
 
   if (is_instance) {
@@ -1759,6 +1761,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadBNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make((con << 24) >> 24);
+  }
+  return LoadNode::Value(phase);
+}
+
 //--------------------------LoadUBNode::Ideal-------------------------------------
 //
 //  If the previous store is to the same address as this load,
@@ -1775,6 +1791,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadUBNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make(con & 0xFF);
+  }
+  return LoadNode::Value(phase);
+}
+
 //--------------------------LoadUSNode::Ideal-------------------------------------
 //
 //  If the previous store is to the same address as this load,
@@ -1791,6 +1821,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadUSNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make(con & 0xFFFF);
+  }
+  return LoadNode::Value(phase);
+}
+
 //--------------------------LoadSNode::Ideal--------------------------------------
 //
 //  If the previous store is to the same address as this load,
@@ -1809,6 +1853,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadSNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make((con << 16) >> 16);
+  }
+  return LoadNode::Value(phase);
+}
+
 //=============================================================================
 //----------------------------LoadKlassNode::make------------------------------
 // Polymorphic factory method:
--- a/hotspot/src/share/vm/opto/memnode.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -215,6 +215,7 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
   virtual int store_Opcode() const { return Op_StoreB; }
   virtual BasicType memory_type() const { return T_BYTE; }
 };
@@ -228,6 +229,7 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
   virtual int store_Opcode() const { return Op_StoreB; }
   virtual BasicType memory_type() const { return T_BYTE; }
 };
@@ -241,10 +243,25 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
   virtual int store_Opcode() const { return Op_StoreC; }
   virtual BasicType memory_type() const { return T_CHAR; }
 };
 
+//------------------------------LoadSNode--------------------------------------
+// Load a short (16bits signed) from memory
+class LoadSNode : public LoadNode {
+public:
+  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
+    : LoadNode(c,mem,adr,at,ti) {}
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const { return Op_RegI; }
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
+  virtual int store_Opcode() const { return Op_StoreC; }
+  virtual BasicType memory_type() const { return T_SHORT; }
+};
+
 //------------------------------LoadINode--------------------------------------
 // Load an integer from memory
 class LoadINode : public LoadNode {
@@ -433,19 +450,6 @@
 };
 
 
-//------------------------------LoadSNode--------------------------------------
-// Load a short (16bits signed) from memory
-class LoadSNode : public LoadNode {
-public:
-  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
-    : LoadNode(c,mem,adr,at,ti) {}
-  virtual int Opcode() const;
-  virtual uint ideal_reg() const { return Op_RegI; }
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual int store_Opcode() const { return Op_StoreC; }
-  virtual BasicType memory_type() const { return T_SHORT; }
-};
-
 //------------------------------StoreNode--------------------------------------
 // Store value; requires Store, Address and Value
 class StoreNode : public MemNode {
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -71,14 +71,14 @@
   // Throw uncommon trap if class is not loaded or the value we are casting
   // _from_ is not loaded, and value is not null.  If the value _is_ NULL,
   // then the checkcast does nothing.
-  const TypeInstPtr *tp = _gvn.type(obj)->isa_instptr();
-  if (!will_link || (tp && !tp->is_loaded())) {
+  const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
+  if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
     if (C->log() != NULL) {
       if (!will_link) {
         C->log()->elem("assert_null reason='checkcast' klass='%d'",
                        C->log()->identify(klass));
       }
-      if (tp && !tp->is_loaded()) {
+      if (tp && tp->klass() && !tp->klass()->is_loaded()) {
         // %%% Cannot happen?
         C->log()->elem("assert_null reason='checkcast source' klass='%d'",
                        C->log()->identify(tp->klass()));
--- a/hotspot/src/share/vm/prims/jni.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/prims/jni.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -48,6 +48,7 @@
 #include "oops/typeArrayOop.hpp"
 #include "prims/jni.h"
 #include "prims/jniCheck.hpp"
+#include "prims/jniExport.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
@@ -66,6 +67,8 @@
 #include "runtime/signature.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/runtimeService.hpp"
+#include "trace/tracing.hpp"
+#include "trace/traceEventTypes.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
@@ -5139,6 +5142,11 @@
     if (JvmtiExport::should_post_thread_life()) {
        JvmtiExport::post_thread_start(thread);
     }
+
+    EVENT_BEGIN(TraceEventThreadStart, event);
+    EVENT_COMMIT(event,
+        EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+
     // Check if we should compile all classes on bootclasspath
     NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
     // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
@@ -5337,6 +5345,10 @@
     JvmtiExport::post_thread_start(thread);
   }
 
+  EVENT_BEGIN(TraceEventThreadStart, event);
+  EVENT_COMMIT(event,
+      EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+
   *(JNIEnv**)penv = thread->jni_environment();
 
   // Now leaving the VM, so change thread_state. This is normally automatically taken care
@@ -5464,8 +5476,7 @@
     return ret;
   }
 
-  if (JvmtiExport::is_jvmti_version(version)) {
-    ret = JvmtiExport::get_jvmti_interface(vm, penv, version);
+  if (JniExportedInterface::GetExportedInterface(vm, penv, version, &ret)) {
     return ret;
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/prims/jniExport.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_PRIMS_JNI_EXPORT_HPP
+#define SHARE_VM_PRIMS_JNI_EXPORT_HPP
+
+#include "prims/jni.h"
+#include "prims/jvmtiExport.hpp"
+
+class JniExportedInterface {
+ public:
+  static bool GetExportedInterface(JavaVM* vm, void** penv, jint version, jint* iface) {
+    if (JvmtiExport::is_jvmti_version(version)) {
+      *iface = JvmtiExport::get_jvmti_interface(vm, penv, version);
+      return true;
+    }
+    return false;
+  }
+};
+
+#endif // SHARE_VM_PRIMS_JNI_EXPORT_HPP
--- a/hotspot/src/share/vm/prims/jvmtiThreadState.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiThreadState.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -319,6 +319,15 @@
   // clearing the flag indicates we are done with the PopFrame() dance
   clr_pending_step_for_popframe();
 
+  // If exception was thrown in this frame, need to reset jvmti thread state.
+  // Single stepping may not get enabled correctly by the agent since
+  // exception state is passed in MethodExit event which may be sent at some
+  // time in the future. JDWP agent ignores MethodExit events if caused by
+  // an exception.
+  //
+  if (is_exception_detected()) {
+    clear_exception_detected();
+  }
   // If step is pending for popframe then it may not be
   // a repeat step. The new_bci and method_id is same as current_bci
   // and current method_id after pop and step for recursive calls.
@@ -385,6 +394,15 @@
   // the ForceEarlyReturn() dance
   clr_pending_step_for_earlyret();
 
+  // If exception was thrown in this frame, need to reset jvmti thread state.
+  // Single stepping may not get enabled correctly by the agent since
+  // exception state is passed in MethodExit event which may be sent at some
+  // time in the future. JDWP agent ignores MethodExit events if caused by
+  // an exception.
+  //
+  if (is_exception_detected()) {
+    clear_exception_detected();
+  }
   // If step is pending for earlyret then it may not be a repeat step.
   // The new_bci and method_id is same as current_bci and current
   // method_id after earlyret and step for recursive calls.
--- a/hotspot/src/share/vm/prims/jvmtiThreadState.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiThreadState.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -165,6 +165,10 @@
   inline bool is_exception_caught()    { return _exception_caught;  }
   inline void set_exception_detected() { _exception_detected = true;
                                          _exception_caught = false; }
+  inline void clear_exception_detected() {
+    _exception_detected = false;
+    assert(_exception_caught == false, "_exception_caught is out of phase");
+  }
   inline void set_exception_caught()   { _exception_caught = true;
                                          _exception_detected = false; }
 
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -194,9 +194,6 @@
 // MethodHandles::generate_adapters
 //
 void MethodHandles::generate_adapters() {
-#ifdef TARGET_ARCH_NYI_6939861
-  if (FLAG_IS_DEFAULT(UseRicochetFrames))  UseRicochetFrames = false;
-#endif
   if (!EnableInvokeDynamic || SystemDictionary::MethodHandle_klass() == NULL)  return;
 
   assert(_adapter_code == NULL, "generate only once");
@@ -230,18 +227,6 @@
 }
 
 
-#ifdef TARGET_ARCH_NYI_6939861
-// these defs belong in methodHandles_<arch>.cpp
-frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
-  ShouldNotCallThis();
-  return fr;
-}
-void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* f, const RegisterMap* reg_map) {
-  ShouldNotCallThis();
-}
-#endif //TARGET_ARCH_NYI_6939861
-
-
 //------------------------------------------------------------------------------
 // MethodHandles::ek_supported
 //
@@ -251,28 +236,11 @@
   case _adapter_unused_13:
     return false;  // not defined yet
   case _adapter_prim_to_ref:
-    return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF);
+    return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF);
   case _adapter_collect_args:
-    return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS);
+    return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS);
   case _adapter_fold_args:
-    return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS);
-  case _adapter_opt_return_any:
-    return UseRicochetFrames;
-#ifdef TARGET_ARCH_NYI_6939861
-  // ports before 6939861 supported only three kinds of spread ops
-  case _adapter_spread_args:
-    // restrict spreads to three kinds:
-    switch (ek) {
-    case _adapter_opt_spread_0:
-    case _adapter_opt_spread_1:
-    case _adapter_opt_spread_more:
-      break;
-    default:
-      return false;
-      break;
-    }
-    break;
-#endif //TARGET_ARCH_NYI_6939861
+    return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS);
   }
   return true;
 }
@@ -1988,9 +1956,6 @@
     case _adapter_prim_to_ref:    // boxer MH to use
     case _adapter_collect_args:   // method handle which collects the args
     case _adapter_fold_args:      // method handle which collects the args
-      if (!UseRicochetFrames) {
-        { err = "box/collect/fold operators are not supported"; break; }
-      }
       if (!java_lang_invoke_MethodHandle::is_instance(argument()))
         { err = "MethodHandle adapter argument required"; break; }
       arg_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(argument()));
@@ -2370,7 +2335,6 @@
 
   case _adapter_prim_to_ref:
     {
-      assert(UseRicochetFrames, "else don't come here");
       // vminfo will be the location to insert the return value
       vminfo = argslot;
       ek_opt = _adapter_opt_collect_ref;
@@ -2436,20 +2400,6 @@
 
   case _adapter_spread_args:
     {
-#ifdef TARGET_ARCH_NYI_6939861
-      // ports before 6939861 supported only three kinds of spread ops
-      if (!UseRicochetFrames) {
-        int array_size   = slots_pushed + 1;
-        assert(array_size >= 0, "");
-        vminfo = array_size;
-        switch (array_size) {
-        case 0:   ek_opt = _adapter_opt_spread_0;       break;
-        case 1:   ek_opt = _adapter_opt_spread_1;       break;
-        default:  ek_opt = _adapter_opt_spread_more;    break;
-        }
-        break;
-      }
-#endif //TARGET_ARCH_NYI_6939861
       // vminfo will be the required length of the array
       int array_size = (slots_pushed + 1) / (type2size[dest] == 2 ? 2 : 1);
       vminfo = array_size;
@@ -2494,7 +2444,6 @@
 
   case _adapter_collect_args:
     {
-      assert(UseRicochetFrames, "else don't come here");
       int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument()));
       // vminfo will be the location to insert the return value
       vminfo = argslot;
@@ -2563,7 +2512,6 @@
 
   case _adapter_fold_args:
     {
-      assert(UseRicochetFrames, "else don't come here");
       int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument()));
       // vminfo will be the location to insert the return value
       vminfo = argslot + elem_slots;
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -738,46 +738,6 @@
 #ifdef TARGET_ARCH_ppc
 # include "methodHandles_ppc.hpp"
 #endif
-
-#ifdef TARGET_ARCH_NYI_6939861
-  // Here are some backward compatible declarations until the 6939861 ports are updated.
-  #define _adapter_flyby    (_EK_LIMIT + 10)
-  #define _adapter_ricochet (_EK_LIMIT + 11)
-  #define _adapter_opt_spread_1    _adapter_opt_spread_1_ref
-  #define _adapter_opt_spread_more _adapter_opt_spread_ref
-  enum {
-    _INSERT_NO_MASK   = -1,
-    _INSERT_REF_MASK  = 0,
-    _INSERT_INT_MASK  = 1,
-    _INSERT_LONG_MASK = 3
-  };
-  static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
-    arg_type = ek_bound_mh_arg_type(ek);
-    arg_mask = 0;
-    arg_slots = type2size[arg_type];;
-  }
-  static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
-    int swap_slots = ek_adapter_opt_swap_slots(ek);
-    rotate = ek_adapter_opt_swap_mode(ek);
-    swap_bytes = swap_slots * Interpreter::stackElementSize;
-  }
-  static int get_ek_adapter_opt_spread_info(EntryKind ek) {
-    return ek_adapter_opt_spread_count(ek);
-  }
-
-  static void insert_arg_slots(MacroAssembler* _masm,
-                               RegisterOrConstant arg_slots,
-                               int arg_mask,
-                               Register argslot_reg,
-                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
-
-  static void remove_arg_slots(MacroAssembler* _masm,
-                               RegisterOrConstant arg_slots,
-                               Register argslot_reg,
-                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
-
-  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
-#endif //TARGET_ARCH_NYI_6939861
 };
 
 
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -271,13 +271,10 @@
 }
 
 // Create MDO if necessary.
-void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) {
+void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
   if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
   if (mh->method_data() == NULL) {
-    methodOopDesc::build_interpreter_method_data(mh, THREAD);
-    if (HAS_PENDING_EXCEPTION) {
-      CLEAR_PENDING_EXCEPTION;
-    }
+    methodOopDesc::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
   }
 }
 
@@ -426,22 +423,22 @@
 }
 
 // Update the rate and submit compile
-void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
   update_rate(os::javaTimeMillis(), mh());
-  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
+  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
 }
 
 // Handle the invocation event.
 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                                      CompLevel level, nmethod* nm, TRAPS) {
+                                                      CompLevel level, nmethod* nm, JavaThread* thread) {
   if (should_create_mdo(mh(), level)) {
-    create_mdo(mh, THREAD);
+    create_mdo(mh, thread);
   }
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
+      compile(mh, InvocationEntryBci, next_level, thread);
     }
   }
 }
@@ -449,13 +446,13 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                                       int bci, CompLevel level, nmethod* nm, TRAPS) {
+                                                       int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   if (should_create_mdo(mh(), level)) {
-    create_mdo(mh, THREAD);
+    create_mdo(mh, thread);
   }
   // Check if MDO should be created for the inlined method
   if (should_create_mdo(imh(), level)) {
-    create_mdo(imh, THREAD);
+    create_mdo(imh, thread);
   }
 
   if (is_compilation_enabled()) {
@@ -463,7 +460,7 @@
     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
     // At the very least compile the OSR version
     if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
-      compile(imh, bci, next_osr_level, THREAD);
+      compile(imh, bci, next_osr_level, thread);
     }
 
     // Use loop event as an opportunity to also check if there's been
@@ -502,14 +499,14 @@
           next_level = CompLevel_full_profile;
         }
         if (cur_level != next_level) {
-          compile(mh, InvocationEntryBci, next_level, THREAD);
+          compile(mh, InvocationEntryBci, next_level, thread);
         }
       }
     } else {
       cur_level = comp_level(imh());
       next_level = call_event(imh(), cur_level);
       if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
-        compile(imh, InvocationEntryBci, next_level, THREAD);
+        compile(imh, InvocationEntryBci, next_level, thread);
       }
     }
   }
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -197,7 +197,7 @@
   // determines whether we should do that.
   inline bool should_create_mdo(methodOop method, CompLevel cur_level);
   // Create MDO if necessary.
-  void create_mdo(methodHandle mh, TRAPS);
+  void create_mdo(methodHandle mh, JavaThread* thread);
   // Is method profiled enough?
   bool is_method_profiled(methodOop method);
 
@@ -208,12 +208,12 @@
   jlong start_time() const     { return _start_time; }
 
   // Submit a given method for compilation (and update the rate).
-  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
   // event() from SimpleThresholdPolicy would call these.
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, nmethod* nm, TRAPS);
+                                       CompLevel level, nmethod* nm, JavaThread* thread);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, nmethod* nm, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, JavaThread* thread);
 public:
   AdvancedThresholdPolicy() : _start_time(0) { }
   // Select task is called by CompileBroker. We should return a task or NULL.
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1040,6 +1040,16 @@
 }
 
 #ifndef KERNEL
+static void disable_adaptive_size_policy(const char* collector_name) {
+  if (UseAdaptiveSizePolicy) {
+    if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
+      warning("disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
+              collector_name);
+    }
+    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
+  }
+}
+
 // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
 // if it's not explictly set or unset. If the user has chosen
 // UseParNewGC and not explicitly set ParallelGCThreads we
@@ -1049,11 +1059,8 @@
          "control point invariant");
   assert(UseParNewGC, "Error");
 
-  // Turn off AdaptiveSizePolicy by default for parnew until it is
-  // complete.
-  if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
-    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
-  }
+  // Turn off AdaptiveSizePolicy for parnew until it is complete.
+  disable_adaptive_size_policy("UseParNewGC");
 
   if (ParallelGCThreads == 0) {
     FLAG_SET_DEFAULT(ParallelGCThreads,
@@ -1110,11 +1117,8 @@
     FLAG_SET_ERGO(bool, UseParNewGC, true);
   }
 
-  // Turn off AdaptiveSizePolicy by default for cms until it is
-  // complete.
-  if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
-    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
-  }
+  // Turn off AdaptiveSizePolicy for CMS until it is complete.
+  disable_adaptive_size_policy("UseConcMarkSweepGC");
 
   // In either case, adjust ParallelGCThreads and/or UseParNewGC
   // as needed.
--- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -306,29 +306,27 @@
   return (current >= initial + target);
 }
 
-nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
+nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci,
+                                    int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
   assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
   NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
-  if (JvmtiExport::can_post_interpreter_events()) {
-    assert(THREAD->is_Java_thread(), "Wrong type of thread");
-    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
-      // If certain JVMTI events (e.g. frame pop event) are requested then the
-      // thread is forced to remain in interpreted code. This is
-      // implemented partly by a check in the run_compiled_code
-      // section of the interpreter whether we should skip running
-      // compiled code, and partly by skipping OSR compiles for
-      // interpreted-only threads.
-      if (bci != InvocationEntryBci) {
-        reset_counter_for_back_branch_event(method);
-        return NULL;
-      }
+  if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
+    // If certain JVMTI events (e.g. frame pop event) are requested then the
+    // thread is forced to remain in interpreted code. This is
+    // implemented partly by a check in the run_compiled_code
+    // section of the interpreter whether we should skip running
+    // compiled code, and partly by skipping OSR compiles for
+    // interpreted-only threads.
+    if (bci != InvocationEntryBci) {
+      reset_counter_for_back_branch_event(method);
+      return NULL;
     }
   }
   if (bci == InvocationEntryBci) {
     // when code cache is full, compilation gets switched off, UseCompiler
     // is set to false
     if (!method->has_compiled_code() && UseCompiler) {
-      method_invocation_event(method, CHECK_NULL);
+      method_invocation_event(method, thread);
     } else {
       // Force counter overflow on method entry, even if no compilation
       // happened.  (The method_invocation_event call does this also.)
@@ -344,7 +342,7 @@
     NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
     // when code cache is full, we should not compile any more...
     if (osr_nm == NULL && UseCompiler) {
-      method_back_branch_event(method, bci, CHECK_NULL);
+      method_back_branch_event(method, bci, thread);
       osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
     }
     if (osr_nm == NULL) {
@@ -395,7 +393,7 @@
 
 // SimpleCompPolicy - compile current method
 
-void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
+void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
   int hot_count = m->invocation_count();
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
@@ -405,18 +403,18 @@
     if (nm == NULL ) {
       const char* comment = "count";
       CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
-                                    m, hot_count, comment, CHECK);
+                                    m, hot_count, comment, thread);
     }
   }
 }
 
-void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
+void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
   if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
     CompileBroker::compile_method(m, bci, CompLevel_highest_tier,
-                                  m, hot_count, comment, CHECK);
+                                  m, hot_count, comment, thread);
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   }
 }
@@ -427,14 +425,13 @@
 
 
 // Consider m for compilation
-void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
+void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
   int hot_count = m->invocation_count();
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
   if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
-    ResourceMark rm(THREAD);
-    JavaThread *thread = (JavaThread*)THREAD;
+    ResourceMark rm(thread);
     frame       fr     = thread->last_frame();
     assert(fr.is_interpreted_frame(), "must be interpreted");
     assert(fr.interpreter_frame_method() == m(), "bad method");
@@ -461,17 +458,17 @@
       assert(top != NULL, "findTopInlinableFrame returned null");
       if (TraceCompilationPolicy) top->print();
       CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier,
-                                    m, hot_count, comment, CHECK);
+                                    m, hot_count, comment, thread);
     }
   }
 }
 
-void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
+void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
   if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
-    CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK);
+    CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, thread);
 
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   }
--- a/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -64,7 +64,7 @@
   virtual int compiler_count(CompLevel comp_level) = 0;
   // main notification entry, return a pointer to an nmethod if the OSR is required,
   // returns NULL otherwise.
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) = 0;
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) = 0;
   // safepoint() is called at the end of the safepoint
   virtual void do_safepoint_work() = 0;
   // reprofile request
@@ -105,15 +105,15 @@
   virtual bool is_mature(methodOop method);
   virtual void initialize();
   virtual CompileTask* select_task(CompileQueue* compile_queue);
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
-  virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
-  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
+  virtual void method_invocation_event(methodHandle m, JavaThread* thread) = 0;
+  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread) = 0;
 };
 
 class SimpleCompPolicy : public NonTieredCompPolicy {
  public:
-  virtual void method_invocation_event(methodHandle m, TRAPS);
-  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
+  virtual void method_invocation_event(methodHandle m, JavaThread* thread);
+  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
 };
 
 // StackWalkCompPolicy - existing C2 policy
@@ -121,8 +121,8 @@
 #ifdef COMPILER2
 class StackWalkCompPolicy : public NonTieredCompPolicy {
  public:
-  virtual void method_invocation_event(methodHandle m, TRAPS);
-  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
+  virtual void method_invocation_event(methodHandle m, JavaThread* thread);
+  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
 
  private:
   RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
--- a/hotspot/src/share/vm/runtime/frame.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1315,7 +1315,6 @@
 }
 #endif
 
-
 #ifdef ASSERT
 void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
   assert(is_interpreted_frame(), "Not an interpreted frame");
@@ -1331,27 +1330,35 @@
   guarantee((current - low_mark) % monitor_size  ==  0         , "Misaligned bottom of BasicObjectLock*");
   guarantee( current >= low_mark                               , "Current BasicObjectLock* below than low_mark");
 }
+#endif
 
+#ifndef PRODUCT
+void frame::describe(FrameValues& values, int frame_no) {
+  // boundaries: sp and the 'real' frame pointer
+  values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1);
+  intptr_t* frame_pointer = real_fp(); // Note: may differ from fp()
 
-void frame::describe(FrameValues& values, int frame_no) {
+  // print frame info at the highest boundary
+  intptr_t* info_address = MAX2(sp(), frame_pointer);
+
+  if (info_address != frame_pointer) {
+    // print frame_pointer explicitly if not marked by the frame info
+    values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1);
+  }
+
   if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
     // Label values common to most frames
     values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
-    values.describe(-1, sp(), err_msg("sp for #%d", frame_no));
-    if (is_compiled_frame()) {
-      values.describe(-1, sp() + _cb->frame_size(), err_msg("computed fp for #%d", frame_no));
-    } else {
-      values.describe(-1, fp(), err_msg("fp for #%d", frame_no));
-    }
   }
+
   if (is_interpreted_frame()) {
     methodOop m = interpreter_frame_method();
     int bci = interpreter_frame_bci();
 
     // Label the method and current bci
-    values.describe(-1, MAX2(sp(), fp()),
+    values.describe(-1, info_address,
                     FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
-    values.describe(-1, MAX2(sp(), fp()),
+    values.describe(-1, info_address,
                     err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
     if (m->max_locals() > 0) {
       intptr_t* l0 = interpreter_frame_local_at(0);
@@ -1383,21 +1390,36 @@
     }
   } else if (is_entry_frame()) {
     // For now just label the frame
-    values.describe(-1, MAX2(sp(), fp()), err_msg("#%d entry frame", frame_no), 2);
+    values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
   } else if (is_compiled_frame()) {
     // For now just label the frame
     nmethod* nm = cb()->as_nmethod_or_null();
-    values.describe(-1, MAX2(sp(), fp()),
+    values.describe(-1, info_address,
                     FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
                                        nm, nm->method()->name_and_sig_as_C_string(),
-                                       is_deoptimized_frame() ? " (deoptimized" : ""), 2);
+                                       (_deopt_state == is_deoptimized) ?
+                                       " (deoptimized)" :
+                                       ((_deopt_state == unknown) ? " (state unknown)" : "")),
+                    2);
   } else if (is_native_frame()) {
     // For now just label the frame
     nmethod* nm = cb()->as_nmethod_or_null();
-    values.describe(-1, MAX2(sp(), fp()),
+    values.describe(-1, info_address,
                     FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
                                        nm, nm->method()->name_and_sig_as_C_string()), 2);
+  } else if (is_ricochet_frame()) {
+      values.describe(-1, info_address, err_msg("#%d ricochet frame", frame_no), 2);
+  } else {
+    // provide default info if not handled before
+    char *info = (char *) "special frame";
+    if ((_cb != NULL) &&
+        (_cb->name() != NULL)) {
+      info = (char *)_cb->name();
+    }
+    values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2);
   }
+
+  // platform dependent additional data
   describe_pd(values, frame_no);
 }
 
@@ -1414,7 +1436,7 @@
 }
 
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {
   FrameValue fv;
@@ -1427,6 +1449,7 @@
 }
 
 
+#ifdef ASSERT
 void FrameValues::validate() {
   _values.sort(compare);
   bool error = false;
@@ -1452,7 +1475,7 @@
   }
   assert(!error, "invalid layout");
 }
-
+#endif // ASSERT
 
 void FrameValues::print(JavaThread* thread) {
   _values.sort(compare);
@@ -1501,4 +1524,4 @@
   }
 }
 
-#endif
+#endif // ndef PRODUCT
--- a/hotspot/src/share/vm/runtime/frame.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/frame.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -221,6 +221,15 @@
   // returns the stack pointer of the calling frame
   intptr_t* sender_sp() const;
 
+  // Returns the real 'frame pointer' for the current frame.
+  // This is the value expected by the platform ABI when it defines a
+  // frame pointer register. It may differ from the effective value of
+  // the FP register when that register is used in the JVM for other
+  // purposes (like compiled frames on some platforms).
+  // On other platforms, it is defined so that the stack area used by
+  // this frame goes from real_fp() to sp().
+  intptr_t* real_fp() const;
+
   // Deoptimization info, if needed (platform dependent).
   // Stored in the initial_info field of the unroll info, to be used by
   // the platform dependent deoptimization blobs.
@@ -485,7 +494,7 @@
 
 };
 
-#ifdef ASSERT
+#ifndef PRODUCT
 // A simple class to describe a location on the stack
 class FrameValue VALUE_OBJ_CLASS_SPEC {
  public:
@@ -515,7 +524,9 @@
   // Used by frame functions to describe locations.
   void describe(int owner, intptr_t* location, const char* description, int priority = 0);
 
+#ifdef ASSERT
   void validate();
+#endif
   void print(JavaThread* thread);
 };
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3477,16 +3477,19 @@
           "    Linux this policy requires root privilege.")                 \
                                                                             \
   product(bool, ThreadPriorityVerbose, false,                               \
-          "print priority changes")                                         \
+          "Print priority changes")                                         \
                                                                             \
   product(intx, DefaultThreadPriority, -1,                                  \
-          "what native priority threads run at if not specified elsewhere (-1 means no change)") \
+          "The native priority at which threads run if not elsewhere "      \
+          "specified (-1 means no change)")                                 \
                                                                             \
   product(intx, CompilerThreadPriority, -1,                                 \
-          "what priority should compiler threads run at (-1 means no change)") \
+          "The native priority at which compiler threads should run "       \
+          "(-1 means no change)")                                           \
                                                                             \
   product(intx, VMThreadPriority, -1,                                       \
-          "what priority should VM threads run at (-1 means no change)")    \
+          "The native priority at which the VM thread should run "          \
+          "(-1 means no change)")                                           \
                                                                             \
   product(bool, CompilerThreadHintNoPreempt, true,                          \
           "(Solaris only) Give compiler threads an extra quanta")           \
@@ -3505,6 +3508,15 @@
   product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \
   product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \
                                                                             \
+  experimental(bool, UseCriticalJavaThreadPriority, false,                  \
+          "Java thread priority 10 maps to critical scheduling priority")   \
+                                                                            \
+  experimental(bool, UseCriticalCompilerThreadPriority, false,              \
+          "Compiler thread(s) run at critical scheduling priority")         \
+                                                                            \
+  experimental(bool, UseCriticalCMSThreadPriority, false,                   \
+          "ConcurrentMarkSweep thread runs at critical scheduling priority")\
+                                                                            \
   /* compiler debugging */                                                  \
   notproduct(intx, CompileTheWorldStartAt,     1,                           \
           "First class to consider when using +CompileTheWorld")            \
@@ -3574,7 +3586,7 @@
           "Threshold at which tier 3 compilation is invoked (invocation "   \
           "minimum must be satisfied.")                                     \
                                                                             \
-  product(intx, Tier3BackEdgeThreshold,  7000,                              \
+  product(intx, Tier3BackEdgeThreshold,  60000,                             \
           "Back edge threshold at which tier 3 OSR compilation is invoked") \
                                                                             \
   product(intx, Tier4InvocationThreshold, 5000,                             \
@@ -3826,10 +3838,6 @@
   develop(bool, StressMethodHandleWalk, false,                              \
           "Process all method handles with MethodHandleWalk")               \
                                                                             \
-  diagnostic(bool, UseRicochetFrames, true,                                 \
-          "use ricochet stack frames for method handle combination, "       \
-          "if the platform supports them")                                  \
-                                                                            \
   experimental(bool, TrustFinalNonStaticFields, false,                      \
           "trust final non-static declarations for constant folding")       \
                                                                             \
@@ -3875,7 +3883,7 @@
   product(bool, UseVMInterruptibleIO, false,                                \
           "(Unstable, Solaris-specific) Thread interrupt before or with "   \
           "EINTR for I/O operations results in OS_INTRPT. The default value"\
-          " of this flag is true for JDK 6 and earliers")
+          " of this flag is true for JDK 6 and earlier")
 
 /*
  *  Macros for factoring of globals
--- a/hotspot/src/share/vm/runtime/java.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/java.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -57,6 +57,8 @@
 #include "runtime/task.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/vm_operations.hpp"
+#include "trace/tracing.hpp"
+#include "trace/traceEventTypes.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/histogram.hpp"
@@ -502,6 +504,11 @@
   if (JvmtiExport::should_post_thread_life()) {
     JvmtiExport::post_thread_end(thread);
   }
+
+  EVENT_BEGIN(TraceEventThreadEnd, event);
+  EVENT_COMMIT(event,
+      EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+
   // Always call even when there are not JVMTI environments yet, since environments
   // may be attached late and JVMTI must track phases of VM execution
   JvmtiExport::post_vm_death();
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,13 +126,20 @@
 Mutex*   FreeList_lock                = NULL;
 Monitor* SecondaryFreeList_lock       = NULL;
 Mutex*   OldSets_lock                 = NULL;
+Monitor* RootRegionScan_lock          = NULL;
 Mutex*   MMUTracker_lock              = NULL;
 Mutex*   HotCardCache_lock            = NULL;
 
 Monitor* GCTaskManager_lock           = NULL;
 
 Mutex*   Management_lock              = NULL;
-Monitor* Service_lock               = NULL;
+Monitor* Service_lock                 = NULL;
+Mutex*   Stacktrace_lock              = NULL;
+
+Monitor* JfrQuery_lock                = NULL;
+Monitor* JfrMsg_lock                  = NULL;
+Mutex*   JfrBuffer_lock               = NULL;
+Mutex*   JfrStream_lock               = NULL;
 
 #define MAX_NUM_MUTEX 128
 static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -193,6 +200,7 @@
     def(FreeList_lock              , Mutex,   leaf     ,   true );
     def(SecondaryFreeList_lock     , Monitor, leaf     ,   true );
     def(OldSets_lock               , Mutex  , leaf     ,   true );
+    def(RootRegionScan_lock        , Monitor, leaf     ,   true );
     def(MMUTracker_lock            , Mutex  , leaf     ,   true );
     def(HotCardCache_lock          , Mutex  , special  ,   true );
     def(EvacFailureStack_lock      , Mutex  , nonleaf  ,   true );
@@ -207,6 +215,7 @@
   def(Patching_lock                , Mutex  , special,     true ); // used for safepointing and code patching.
   def(ObjAllocPost_lock            , Monitor, special,     false);
   def(Service_lock                 , Monitor, special,     true ); // used for service thread operations
+  def(Stacktrace_lock              , Mutex,   special,     true ); // used for JFR stacktrace database
   def(JmethodIdCreation_lock       , Mutex  , leaf,        true ); // used for creating jmethodIDs.
 
   def(SystemDictionary_lock        , Monitor, leaf,        true ); // lookups done by VM thread
@@ -271,6 +280,11 @@
   def(Debug3_lock                  , Mutex  , nonleaf+4,   true );
   def(ProfileVM_lock               , Monitor, nonleaf+4,   false); // used for profiling of the VMThread
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
+
+  def(JfrQuery_lock                , Monitor, nonleaf,     true);  // JFR locks, keep these in consecutive order
+  def(JfrMsg_lock                  , Monitor, nonleaf+2,   true);
+  def(JfrBuffer_lock               , Mutex,   nonleaf+3,   true);
+  def(JfrStream_lock               , Mutex,   nonleaf+4,   true);
 }
 
 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,7 +115,7 @@
 
 #ifndef PRODUCT
 extern Mutex*   FullGCALot_lock;                 // a lock to make FullGCALot MT safe
-#endif
+#endif // PRODUCT
 extern Mutex*   Debug1_lock;                     // A bunch of pre-allocated locks that can be used for tracing
 extern Mutex*   Debug2_lock;                     // down synchronization related bugs!
 extern Mutex*   Debug3_lock;
@@ -129,12 +129,19 @@
 extern Mutex*   FreeList_lock;                   // protects the free region list during safepoints
 extern Monitor* SecondaryFreeList_lock;          // protects the secondary free region list
 extern Mutex*   OldSets_lock;                    // protects the old region sets
+extern Monitor* RootRegionScan_lock;             // used to notify that the CM threads have finished scanning the IM snapshot regions
 extern Mutex*   MMUTracker_lock;                 // protects the MMU
                                                  // tracker data structures
 extern Mutex*   HotCardCache_lock;               // protects the hot card cache
 
 extern Mutex*   Management_lock;                 // a lock used to serialize JVM management
 extern Monitor* Service_lock;                    // a lock used for service thread operation
+extern Mutex*   Stacktrace_lock;                 // used to guard access to the stacktrace table
+
+extern Monitor* JfrQuery_lock;                   // protects JFR use
+extern Monitor* JfrMsg_lock;                     // protects JFR messaging
+extern Mutex*   JfrBuffer_lock;                  // protects JFR buffer operations
+extern Mutex*   JfrStream_lock;                  // protects JFR stream access
 
 // A MutexLocker provides mutual exclusion with respect to a given mutex
 // for the scope which contains the locker.  The lock is an OS lock, not
--- a/hotspot/src/share/vm/runtime/os.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1101,6 +1101,7 @@
         "%/lib/jsse.jar:"
         "%/lib/jce.jar:"
         "%/lib/charsets.jar:"
+        "%/lib/jfr.jar:"
 #ifdef __APPLE__
         "%/lib/JObjC.jar:"
 #endif
--- a/hotspot/src/share/vm/runtime/os.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,8 +73,9 @@
   MinPriority      =  1,     // Minimum priority
   NormPriority     =  5,     // Normal (non-daemon) priority
   NearMaxPriority  =  9,     // High priority, used for VMThread
-  MaxPriority      = 10      // Highest priority, used for WatcherThread
+  MaxPriority      = 10,     // Highest priority, used for WatcherThread
                              // ensures that VMThread doesn't starve profiler
+  CriticalPriority = 11      // Critical thread priority
 };
 
 // Typedef for structured exception handling support
@@ -733,7 +734,7 @@
   // Thread priority helpers (implemented in OS-specific part)
   static OSReturn set_native_priority(Thread* thread, int native_prio);
   static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
-  static int java_to_os_priority[MaxPriority + 1];
+  static int java_to_os_priority[CriticalPriority + 1];
   // Hint to the underlying OS that a task switch would not be good.
   // Void return because it's a hint and can fail.
   static void hint_no_preempt();
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -121,7 +121,6 @@
 void SharedRuntime::generate_ricochet_blob() {
   if (!EnableInvokeDynamic)  return;  // leave it as a null
 
-#ifndef TARGET_ARCH_NYI_6939861
   // allocate space for the code
   ResourceMark rm;
   // setup code generation tools
@@ -142,7 +141,6 @@
   }
 
   _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
-#endif
 }
 
 
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -177,13 +177,11 @@
 }
 
 nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
-                                      int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
+                                      int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
   if (comp_level == CompLevel_none &&
-      JvmtiExport::can_post_interpreter_events()) {
-    assert(THREAD->is_Java_thread(), "Should be java thread");
-    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
-      return NULL;
-    }
+      JvmtiExport::can_post_interpreter_events() &&
+      thread->is_interp_only_mode()) {
+    return NULL;
   }
   nmethod *osr_nm = NULL;
 
@@ -197,9 +195,9 @@
   }
 
   if (bci == InvocationEntryBci) {
-    method_invocation_event(method, inlinee, comp_level, nm, THREAD);
+    method_invocation_event(method, inlinee, comp_level, nm, thread);
   } else {
-    method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
+    method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
     // method == inlinee if the event originated in the main method
     int highest_level = inlinee->highest_osr_comp_level();
     if (highest_level > comp_level) {
@@ -210,7 +208,7 @@
 }
 
 // Check if the method can be compiled, change level if necessary
-void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
   assert(level <= TieredStopAtLevel, "Invalid compilation level");
   if (level == CompLevel_none) {
     return;
@@ -221,7 +219,7 @@
   // pure C1.
   if (!can_be_compiled(mh, level)) {
     if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
-        compile(mh, bci, CompLevel_simple, THREAD);
+        compile(mh, bci, CompLevel_simple, thread);
     }
     return;
   }
@@ -232,14 +230,14 @@
     if (PrintTieredEvents) {
       print_event(COMPILE, mh, mh, bci, level);
     }
-    submit_compile(mh, bci, level, THREAD);
+    submit_compile(mh, bci, level, thread);
   }
 }
 
 // Tell the broker to compile the method
-void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
-  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
+  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
 }
 
 // Call and loop predicates determine whether a transition to a higher
@@ -366,11 +364,11 @@
 
 // Handle the invocation event.
 void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                              CompLevel level, nmethod* nm, TRAPS) {
+                                              CompLevel level, nmethod* nm, JavaThread* thread) {
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
+      compile(mh, InvocationEntryBci, next_level, thread);
     }
   }
 }
@@ -378,7 +376,7 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                                     int bci, CompLevel level, nmethod* nm, TRAPS) {
+                                                     int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
     // Use loop event as an opportinity to also check there's been
@@ -391,13 +389,13 @@
                       next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
     bool is_compiling = false;
     if (next_level != cur_level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
+      compile(mh, InvocationEntryBci, next_level, thread);
       is_compiling = true;
     }
 
     // Do the OSR version
     if (!is_compiling && next_osr_level != level) {
-      compile(mh, bci, next_osr_level, THREAD);
+      compile(mh, bci, next_osr_level, thread);
     }
   }
 }
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -67,9 +67,9 @@
   // Print policy-specific information if necessary
   virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
   // Check if the method can be compiled, change level if necessary
-  void compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  void compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
   // Submit a given method for compilation
-  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
   // Simple methods are as good being compiled with C1 as C2.
   // This function tells if it's such a function.
   inline bool is_trivial(methodOop method);
@@ -88,9 +88,9 @@
     return CompLevel_none;
   }
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, nmethod* nm, TRAPS);
+                                       CompLevel level, nmethod* nm, JavaThread* thread);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, nmethod* nm, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, JavaThread* thread);
 public:
   SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
   virtual int compiler_count(CompLevel comp_level) {
@@ -104,7 +104,7 @@
   virtual void disable_compilation(methodOop method) { }
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
   virtual nmethod* event(methodHandle method, methodHandle inlinee,
-                         int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
+                         int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
   // Select task is called by CompileBroker. We should return a task or NULL.
   virtual CompileTask* select_task(CompileQueue* compile_queue);
   // Tell the runtime if we think a given method is adequately profiled.
--- a/hotspot/src/share/vm/runtime/thread.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -73,6 +73,7 @@
 #include "services/attachListener.hpp"
 #include "services/management.hpp"
 #include "services/threadService.hpp"
+#include "trace/traceEventTypes.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
@@ -232,6 +233,7 @@
   CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
   _jvmti_env_iteration_count = 0;
   set_allocated_bytes(0);
+  set_trace_buffer(NULL);
   _vm_operation_started_count = 0;
   _vm_operation_completed_count = 0;
   _current_pending_monitor = NULL;
@@ -1512,6 +1514,10 @@
     JvmtiExport::post_thread_start(this);
   }
 
+  EVENT_BEGIN(TraceEventThreadStart, event);
+  EVENT_COMMIT(event,
+     EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
+
   // We call another function to do the rest so we are sure that the stack addresses used
   // from there will be lower than the stack base just computed
   thread_main_inner();
@@ -1641,6 +1647,15 @@
       }
     }
 
+    // Called before the java thread exit since we want to read info
+    // from java_lang_Thread object
+    EVENT_BEGIN(TraceEventThreadEnd, event);
+    EVENT_COMMIT(event,
+        EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
+
+    // Call after last event on thread
+    EVENT_THREAD_EXIT(this);
+
     // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
     // the execution of the method. If that is not enough, then we don't really care. Thread.stop
     // is deprecated anyhow.
@@ -3186,6 +3201,11 @@
     return status;
   }
 
+  // Must be run after init_ft which initializes ft_enabled
+  if (TRACE_INITIALIZE() != JNI_OK) {
+    vm_exit_during_initialization("Failed to initialize tracing backend");
+  }
+
   // Should be done after the heap is fully created
   main_thread->cache_global_variables();
 
@@ -3423,6 +3443,10 @@
     create_vm_init_libraries();
   }
 
+  if (!TRACE_START()) {
+    vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+  }
+
   // Notify JVMTI agents that VM initialization is complete - nop if no agents.
   JvmtiExport::post_vm_initialized();
 
--- a/hotspot/src/share/vm/runtime/thread.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -41,6 +41,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/unhandledOops.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/top.hpp"
 #ifndef SERIALGC
@@ -246,6 +247,8 @@
   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
                                                 // the Java heap
 
+  TRACE_BUFFER _trace_buffer;                   // Thread-local buffer for tracing
+
   int   _vm_operation_started_count;            // VM_Operation support
   int   _vm_operation_completed_count;          // VM_Operation support
 
@@ -414,6 +417,9 @@
     return allocated_bytes;
   }
 
+  TRACE_BUFFER trace_buffer()              { return _trace_buffer; }
+  void set_trace_buffer(TRACE_BUFFER buf)  { _trace_buffer = buf; }
+
   // VM operation support
   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -93,6 +93,7 @@
   template(HeapWalkOperation)                     \
   template(HeapIterateOperation)                  \
   template(ReportJavaOutOfMemory)                 \
+  template(JFRCheckpoint)                         \
   template(Exit)                                  \
 
 class VM_Operation: public CHeapObj {
--- a/hotspot/src/share/vm/services/diagnosticArgument.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/diagnosticArgument.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,12 +59,13 @@
 
 template <> void DCmdArgument<bool>::parse_value(const char* str,
                                                  size_t len, TRAPS) {
+  // len is the length of the current token starting at str
   if (len == 0) {
     set_value(true);
   } else {
-    if (strcasecmp(str, "true") == 0) {
+    if (len == strlen("true") && strncasecmp(str, "true", len) == 0) {
        set_value(true);
-    } else if (strcasecmp(str, "false") == 0) {
+    } else if (len == strlen("false") && strncasecmp(str, "false", len) == 0) {
        set_value(false);
     } else {
       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
--- a/hotspot/src/share/vm/services/diagnosticCommand.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -31,6 +31,33 @@
 #include "services/heapDumper.hpp"
 #include "services/management.hpp"
 
+void DCmdRegistrant::register_dcmds(){
+  // Registration of the diagnostic commands
+  // First boolean argument specifies if the command is enabled
+  // Second boolean argument specifies if the command is hidden
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HelpDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VersionDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CommandLineDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintSystemPropertiesDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintVMFlagsDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(true, false));
+#ifndef SERVICES_KERNEL   // Heap dumping not supported
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(true, false));
+#endif // SERVICES_KERNEL
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(true, false));
+
+}
+
+#ifndef HAVE_EXTRA_DCMD
+void DCmdRegistrant::register_dcmds_ext(){
+   // Do nothing here
+}
+#endif
+
+
 HelpDCmd::HelpDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap),
   _all("-all", "Show help for all commands", "BOOLEAN", false, "false"),
   _cmd("command name", "The name of the command for which we want help",
--- a/hotspot/src/share/vm/services/diagnosticCommand.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/diagnosticCommand.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -34,6 +34,7 @@
 #include "services/diagnosticArgument.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "services/diagnosticFramework.hpp"
+#include "services/diagnosticCommand_ext.hpp"
 
 class HelpDCmd : public DCmdWithParser {
 protected:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/diagnosticCommand_ext.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. DO
+ * NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_EXT_HPP
+#define SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_EXT_HPP
+
+#undef HAVE_EXTRA_DCMD
+
+#endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP
--- a/hotspot/src/share/vm/services/diagnosticFramework.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/diagnosticFramework.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -387,4 +387,17 @@
   }
 };
 
+// This class provides a convenient way to register Dcmds, without a need to change
+// management.cpp every time. Body of these two methods resides in
+// diagnosticCommand.cpp
+
+class DCmdRegistrant : public AllStatic {
+
+private:
+    static void register_dcmds();
+    static void register_dcmds_ext();
+
+    friend class Management;
+};
+
 #endif // SHARE_VM_SERVICES_DIAGNOSTICFRAMEWORK_HPP
--- a/hotspot/src/share/vm/services/g1MemoryPool.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/g1MemoryPool.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -78,7 +78,7 @@
   G1MemoryPoolSuper(g1h,
                     "G1 Old Gen",
                     g1h->g1mm()->old_space_committed(), /* init_size */
-                    _undefined_max,
+                    g1h->g1mm()->old_gen_max(),
                     true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
--- a/hotspot/src/share/vm/services/g1MemoryPool.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/g1MemoryPool.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -101,7 +101,7 @@
     return _g1mm->old_space_used();
   }
   size_t max_size() const {
-    return _undefined_max;
+    return _g1mm->old_gen_max();
   }
   MemoryUsage get_memory_usage();
 };
--- a/hotspot/src/share/vm/services/gcNotifier.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/gcNotifier.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -44,7 +44,8 @@
   // Make a copy of the last GC statistics
   // GC may occur between now and the creation of the notification
   int num_pools = MemoryService::num_memory_pools();
-  GCStatInfo* stat = new GCStatInfo(num_pools);
+  // stat is deallocated inside GCNotificationRequest
+  GCStatInfo* stat = new(ResourceObj::C_HEAP) GCStatInfo(num_pools);
   mgr->get_last_gc_stat(stat);
   GCNotificationRequest *request = new GCNotificationRequest(os::javaTimeMillis(),mgr,action,cause,stat);
   addRequest(request);
--- a/hotspot/src/share/vm/services/management.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/management.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -119,21 +119,8 @@
   _optional_support.isThreadAllocatedMemorySupported = 1;
 
   // Registration of the diagnostic commands
-  // First boolean argument specifies if the command is enabled
-  // Second boolean argument specifies if the command is hidden
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HelpDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VersionDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CommandLineDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintSystemPropertiesDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintVMFlagsDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(true, false));
-#ifndef SERVICES_KERNEL   // Heap dumping not supported
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(true, false));
-#endif // SERVICES_KERNEL
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(true, false));
+  DCmdRegistrant::register_dcmds();
+  DCmdRegistrant::register_dcmds_ext();
 }
 
 void Management::initialize(TRAPS) {
@@ -2047,15 +2034,15 @@
   // Make a copy of the last GC statistics
   // GC may occur while constructing the last GC information
   int num_pools = MemoryService::num_memory_pools();
-  GCStatInfo* stat = new GCStatInfo(num_pools);
-  if (mgr->get_last_gc_stat(stat) == 0) {
+  GCStatInfo stat(num_pools);
+  if (mgr->get_last_gc_stat(&stat) == 0) {
     gc_stat->gc_index = 0;
     return;
   }
 
-  gc_stat->gc_index = stat->gc_index();
-  gc_stat->start_time = Management::ticks_to_ms(stat->start_time());
-  gc_stat->end_time = Management::ticks_to_ms(stat->end_time());
+  gc_stat->gc_index = stat.gc_index();
+  gc_stat->start_time = Management::ticks_to_ms(stat.start_time());
+  gc_stat->end_time = Management::ticks_to_ms(stat.end_time());
 
   // Current implementation does not have GC extension attributes
   gc_stat->num_gc_ext_attributes = 0;
@@ -2073,17 +2060,17 @@
   objArrayHandle usage_after_gc_ah(THREAD, au);
 
   for (int i = 0; i < num_pools; i++) {
-    Handle before_usage = MemoryService::create_MemoryUsage_obj(stat->before_gc_usage_for_pool(i), CHECK);
+    Handle before_usage = MemoryService::create_MemoryUsage_obj(stat.before_gc_usage_for_pool(i), CHECK);
     Handle after_usage;
 
-    MemoryUsage u = stat->after_gc_usage_for_pool(i);
+    MemoryUsage u = stat.after_gc_usage_for_pool(i);
     if (u.max_size() == 0 && u.used() > 0) {
       // If max size == 0, this pool is a survivor space.
       // Set max size = -1 since the pools will be swapped after GC.
       MemoryUsage usage(u.init_size(), u.used(), u.committed(), (size_t)-1);
       after_usage = MemoryService::create_MemoryUsage_obj(usage, CHECK);
     } else {
-      after_usage = MemoryService::create_MemoryUsage_obj(stat->after_gc_usage_for_pool(i), CHECK);
+      after_usage = MemoryService::create_MemoryUsage_obj(stat.after_gc_usage_for_pool(i), CHECK);
     }
     usage_before_gc_ah->obj_at_put(i, before_usage());
     usage_after_gc_ah->obj_at_put(i, after_usage());
--- a/hotspot/src/share/vm/services/memoryManager.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/memoryManager.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -214,8 +214,8 @@
 
 void GCMemoryManager::initialize_gc_stat_info() {
   assert(MemoryService::num_memory_pools() > 0, "should have one or more memory pools");
-  _last_gc_stat = new GCStatInfo(MemoryService::num_memory_pools());
-  _current_gc_stat = new GCStatInfo(MemoryService::num_memory_pools());
+  _last_gc_stat = new(ResourceObj::C_HEAP) GCStatInfo(MemoryService::num_memory_pools());
+  _current_gc_stat = new(ResourceObj::C_HEAP) GCStatInfo(MemoryService::num_memory_pools());
   // tracking concurrent collections we need two objects: one to update, and one to
   // hold the publicly available "last (completed) gc" information.
 }
--- a/hotspot/src/share/vm/services/memoryManager.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/services/memoryManager.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -108,7 +108,7 @@
   const char* name()         { return "CodeCacheManager"; }
 };
 
-class GCStatInfo : public CHeapObj {
+class GCStatInfo : public ResourceObj {
 private:
   size_t _index;
   jlong  _start_time;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/trace/traceEventTypes.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP
+#define SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP
+
+/* Empty, just a placeholder for tracing events */
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/trace/traceMacros.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_TRACE_TRACE_MACRO_HPP
+#define SHARE_VM_TRACE_TRACE_MACRO_HPP
+
+#define EVENT_BEGIN(type, name)
+#define EVENT_SET(name, field, value)
+#define EVENT_COMMIT(name, ...)
+#define EVENT_STARTED(name, time)
+#define EVENT_ENDED(name, time)
+#define EVENT_THREAD_EXIT(thread)
+
+#define TRACE_ENABLED 0
+
+#define TRACE_INIT_ID(k)
+#define TRACE_BUFFER void*
+
+#define TRACE_START() true
+#define TRACE_INITIALIZE() 0
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/trace/tracing.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_TRACE_TRACING_HPP
+#define SHARE_VM_TRACE_TRACING_HPP
+
+#include "trace/traceMacros.hpp"
+
+#endif
--- a/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -178,8 +178,30 @@
     for (; !(res & 1); res_offset++) {
       res = res >> 1;
     }
-    assert(res_offset >= l_offset &&
-           res_offset < r_offset, "just checking");
+
+#ifdef ASSERT
+    // In the following assert, if r_offset is not bitamp word aligned,
+    // checking that res_offset is strictly less than r_offset is too
+    // strong and will trip the assert.
+    //
+    // Consider the case where l_offset is bit 15 and r_offset is bit 17
+    // of the same map word, and where bits [15:16:17:18] == [00:00:00:01].
+    // All the bits in the range [l_offset:r_offset) are 0.
+    // The loop that calculates res_offset, above, would yield the offset
+    // of bit 18 because it's in the same map word as l_offset and there
+    // is a set bit in that map word above l_offset (i.e. res != NoBits).
+    //
+    // In this case, however, we can assert is that res_offset is strictly
+    // less than size() since we know that there is at least one set bit
+    // at an offset above, but in the same map word as, r_offset.
+    // Otherwise, if r_offset is word aligned then it will not be in the
+    // same map word as l_offset (unless it equals l_offset). So either
+    // there won't be a set bit between l_offset and the end of it's map
+    // word (i.e. res == NoBits), or res_offset will be less than r_offset.
+
+    idx_t limit = is_word_aligned(r_offset) ? r_offset : size();
+    assert(res_offset >= l_offset && res_offset < limit, "just checking");
+#endif // ASSERT
     return MIN2(res_offset, r_offset);
   }
   // skip over all word length 0-bit runs
--- a/hotspot/src/share/vm/utilities/decoder.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/decoder.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -24,80 +24,85 @@
 
 #include "precompiled.hpp"
 #include "prims/jvm.h"
+#include "runtime/mutexLocker.hpp"
 #include "utilities/decoder.hpp"
 
-Decoder::decoder_status  Decoder::_decoder_status = Decoder::no_error;
-bool                     Decoder::_initialized = false;
-
-#if !defined(_WINDOWS) && !defined(__APPLE__)
-
-// Implementation of common functionalities among Solaris and Linux
-#include "utilities/elfFile.hpp"
-
-ElfFile* Decoder::_opened_elf_files = NULL;
-
-bool Decoder::can_decode_C_frame_in_vm() {
-  return true;
-}
+#if defined(_WINDOWS)
+  #include "decoder_windows.hpp"
+#elif defined(__APPLE__)
+  #include "decoder_machO.hpp"
+#else
+  #include "decoder_elf.hpp"
+#endif
 
-void Decoder::initialize() {
-  _initialized = true;
-}
+NullDecoder*  Decoder::_decoder = NULL;
+NullDecoder   Decoder::_do_nothing_decoder;
+Mutex*           Decoder::_decoder_lock = new Mutex(Mutex::safepoint,
+                                "DecoderLock");
 
-void Decoder::uninitialize() {
-  if (_opened_elf_files != NULL) {
-    delete _opened_elf_files;
-    _opened_elf_files = NULL;
-  }
-  _initialized = false;
-}
+// _decoder_lock should already acquired before enter this method
+NullDecoder* Decoder::get_decoder() {
+  assert(_decoder_lock != NULL && _decoder_lock->owned_by_self(),
+    "Require DecoderLock to enter");
 
-Decoder::decoder_status Decoder::decode(address addr, const char* filepath, char *buf, int buflen, int *offset) {
-  if (_decoder_status != no_error) {
-    return _decoder_status;
-  }
-
-  ElfFile* file = get_elf_file(filepath);
-  if (_decoder_status != no_error) {
-    return _decoder_status;
+  if (_decoder != NULL) {
+    return _decoder;
   }
 
-  const char* symbol = file->decode(addr, offset);
-  if (file->get_status() == out_of_memory) {
-    _decoder_status = out_of_memory;
-    return _decoder_status;
-  } else if (symbol != NULL) {
-    if (!demangle(symbol, buf, buflen)) {
-      jio_snprintf(buf, buflen, "%s", symbol);
+  // Decoder is a secondary service. Although, it is good to have,
+  // but we can live without it.
+#if defined(_WINDOWS)
+  _decoder = new (std::nothrow) WindowsDecoder();
+#elif defined (__APPLE__)
+    _decoder = new (std::nothrow)MachODecoder();
+#else
+    _decoder = new (std::nothrow)ElfDecoder();
+#endif
+
+  if (_decoder == NULL || _decoder->has_error()) {
+    if (_decoder != NULL) {
+      delete _decoder;
     }
-    return no_error;
-  } else {
-    return symbol_not_found;
+    _decoder = &_do_nothing_decoder;
   }
+  return _decoder;
+}
+
+bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
+  assert(_decoder_lock != NULL, "Just check");
+  MutexLockerEx locker(_decoder_lock, true);
+  NullDecoder* decoder = get_decoder();
+  assert(decoder != NULL, "null decoder");
+
+  return decoder->decode(addr, buf, buflen, offset, modulepath);
 }
 
-ElfFile* Decoder::get_elf_file(const char* filepath) {
-  if (_decoder_status != no_error) {
-    return NULL;
-  }
-  ElfFile* file = _opened_elf_files;
-  while (file != NULL) {
-    if (file->same_elf_file(filepath)) {
-      return file;
-    }
-    file = file->m_next;
+bool Decoder::demangle(const char* symbol, char* buf, int buflen) {
+  assert(_decoder_lock != NULL, "Just check");
+  MutexLockerEx locker(_decoder_lock, true);
+  NullDecoder* decoder = get_decoder();
+  assert(decoder != NULL, "null decoder");
+  return decoder->demangle(symbol, buf, buflen);
+}
+
+bool Decoder::can_decode_C_frame_in_vm() {
+  assert(_decoder_lock != NULL, "Just check");
+  MutexLockerEx locker(_decoder_lock, true);
+  NullDecoder* decoder = get_decoder();
+  assert(decoder != NULL, "null decoder");
+  return decoder->can_decode_C_frame_in_vm();
+}
+
+// shutdown real decoder and replace it with
+// _do_nothing_decoder
+void Decoder::shutdown() {
+  assert(_decoder_lock != NULL, "Just check");
+  MutexLockerEx locker(_decoder_lock, true);
+
+  if (_decoder != NULL && _decoder != &_do_nothing_decoder) {
+    delete _decoder;
   }
 
-  file = new ElfFile(filepath);
-  if (file == NULL) {
-    _decoder_status = out_of_memory;
-  }
-  if (_opened_elf_files != NULL) {
-    file->m_next = _opened_elf_files;
-  }
-
-  _opened_elf_files = file;
-  return file;
+  _decoder = &_do_nothing_decoder;
 }
 
-#endif
--- a/hotspot/src/share/vm/utilities/decoder.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/decoder.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,83 +23,78 @@
  */
 
 
-#ifndef __DECODER_HPP
-#define __DECODER_HPP
+#ifndef SHARE_VM_UTILITIES_DECODER_HPP
+#define SHARE_VM_UTILITIES_DECODER_HPP
 
 #include "memory/allocation.hpp"
-
-#ifdef _WINDOWS
-#include <windows.h>
-#include <imagehlp.h>
-
-// functions needed for decoding symbols
-typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
-typedef BOOL  (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
-typedef BOOL  (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
-typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
+#include "runtime/mutex.hpp"
 
-#elif defined(__APPLE__)
-
-#else
-
-class ElfFile;
-
-#endif // _WINDOWS
-
-
-class Decoder: public StackObj {
-
- public:
+class NullDecoder: public CHeapObj {
+public:
   // status code for decoding native C frame
   enum decoder_status {
-         no_error,             // successfully decoded frames
+         not_available = -10,  // real decoder is not available
+         no_error = 0,         // successfully decoded frames
          out_of_memory,        // out of memory
          file_invalid,         // invalid elf file
          file_not_found,       // could not found symbol file (on windows), such as jvm.pdb or jvm.map
          helper_not_found,     // could not load dbghelp.dll (Windows only)
          helper_func_error,    // decoding functions not found (Windows only)
-         helper_init_error,    // SymInitialize failed (Windows only)
-         symbol_not_found      // could not find the symbol
+         helper_init_error     // SymInitialize failed (Windows only)
   };
 
- public:
-  Decoder() { initialize(); };
-  ~Decoder() { uninitialize(); };
+  NullDecoder() {
+    _decoder_status = not_available;
+  }
+
+  ~NullDecoder() {};
+
+  virtual bool decode(address pc, char* buf, int buflen, int* offset,
+    const char* modulepath = NULL) {
+    return false;
+  }
+
+  virtual bool demangle(const char* symbol, char* buf, int buflen) {
+    return false;
+  }
+
+  virtual bool can_decode_C_frame_in_vm() const {
+    return false;
+  }
 
+  virtual decoder_status status() const {
+    return _decoder_status;
+  }
+
+  virtual bool has_error() const {
+    return is_error(_decoder_status);
+  }
+
+  static bool is_error(decoder_status status) {
+    return (status > 0);
+  }
+
+protected:
+  decoder_status  _decoder_status;
+};
+
+
+class Decoder: AllStatic {
+public:
+  static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL);
+  static bool demangle(const char* symbol, char* buf, int buflen);
   static bool can_decode_C_frame_in_vm();
 
-  static void initialize();
-  static void uninitialize();
-
-#ifdef _WINDOWS
-  static decoder_status    decode(address addr, char *buf, int buflen, int *offset);
-#else
-  static decoder_status    decode(address addr, const char* filepath, char *buf, int buflen, int *offset);
-#endif
-
-  static bool              demangle(const char* symbol, char *buf, int buflen);
-
-  static decoder_status    get_status() { return _decoder_status; };
+  static void shutdown();
+protected:
+  static NullDecoder* get_decoder();
 
-#if !defined(_WINDOWS) && !defined(__APPLE__)
- private:
-  static ElfFile*         get_elf_file(const char* filepath);
-#endif // _WINDOWS
-
-
- private:
-  static decoder_status     _decoder_status;
-  static bool               _initialized;
+private:
+  static NullDecoder*     _decoder;
+  static NullDecoder      _do_nothing_decoder;
 
-#ifdef _WINDOWS
-  static HMODULE                   _dbghelp_handle;
-  static bool                      _can_decode_in_vm;
-  static pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
-  static pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
-#elif __APPLE__
-#else
-  static ElfFile*                  _opened_elf_files;
-#endif // _WINDOWS
+protected:
+  static Mutex*       _decoder_lock;
 };
 
-#endif // __DECODER_HPP
+#endif // SHARE_VM_UTILITIES_DECODER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/decoder_elf.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#if !defined(_WINDOWS) && !defined(__APPLE__)
+#include "decoder_elf.hpp"
+
+ElfDecoder::~ElfDecoder() {
+  if (_opened_elf_files != NULL) {
+    delete _opened_elf_files;
+    _opened_elf_files = NULL;
+  }
+}
+
+bool ElfDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* filepath) {
+  assert(filepath, "null file path");
+  assert(buf != NULL && buflen > 0, "Invalid buffer");
+  if (has_error()) return false;
+  ElfFile* file = get_elf_file(filepath);
+  if (file == NULL) {
+    return false;
+  }
+
+  if (!file->decode(addr, buf, buflen, offset)) {
+    return false;
+  }
+  if (buf[0] != '\0') {
+    demangle(buf, buf, buflen);
+  }
+  return true;
+}
+
+ElfFile* ElfDecoder::get_elf_file(const char* filepath) {
+  ElfFile* file;
+
+  file = _opened_elf_files;
+  while (file != NULL) {
+    if (file->same_elf_file(filepath)) {
+      return file;
+    }
+    file = file->next();
+  }
+
+  file = new (std::nothrow)ElfFile(filepath);
+  if (file != NULL) {
+    if (_opened_elf_files != NULL) {
+      file->set_next(_opened_elf_files);
+    }
+    _opened_elf_files = file;
+  }
+
+  return file;
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/decoder_elf.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_DECODER_ELF_HPP
+#define SHARE_VM_UTILITIES_DECODER_ELF_HPP
+
+#if !defined(_WINDOWS) && !defined(__APPLE__)
+
+#include "utilities/decoder.hpp"
+#include "utilities/elfFile.hpp"
+
+class ElfDecoder: public NullDecoder {
+
+public:
+  ElfDecoder() {
+    _opened_elf_files = NULL;
+    _decoder_status = no_error;
+  }
+  ~ElfDecoder();
+
+  bool can_decode_C_frame_in_vm() const { return true; }
+
+  bool demangle(const char* symbol, char *buf, int buflen);
+  bool decode(address addr, char *buf, int buflen, int* offset, const char* filepath = NULL);
+
+private:
+  ElfFile*         get_elf_file(const char* filepath);
+
+private:
+  ElfFile*         _opened_elf_files;
+};
+
+#endif
+#endif // SHARE_VM_UTILITIES_DECODER_ELF_HPP
--- a/hotspot/src/share/vm/utilities/elfFile.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/elfFile.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -44,7 +44,7 @@
   m_string_tables = NULL;
   m_symbol_tables = NULL;
   m_next = NULL;
-  m_status = Decoder::no_error;
+  m_status = NullDecoder::no_error;
 
   int len = strlen(filepath) + 1;
   m_filepath = (const char*)os::malloc(len * sizeof(char));
@@ -54,10 +54,10 @@
     if (m_file != NULL) {
       load_tables();
     } else {
-      m_status = Decoder::file_not_found;
+      m_status = NullDecoder::file_not_found;
     }
   } else {
-    m_status = Decoder::out_of_memory;
+    m_status = NullDecoder::out_of_memory;
   }
 }
 
@@ -96,41 +96,41 @@
 
 bool ElfFile::load_tables() {
   assert(m_file, "file not open");
-  assert(m_status == Decoder::no_error, "already in error");
+  assert(!NullDecoder::is_error(m_status), "already in error");
 
   // read elf file header
   if (fread(&m_elfHdr, sizeof(m_elfHdr), 1, m_file) != 1) {
-    m_status = Decoder::file_invalid;
+    m_status = NullDecoder::file_invalid;
     return false;
   }
 
   if (!is_elf_file(m_elfHdr)) {
-    m_status = Decoder::file_invalid;
+    m_status = NullDecoder::file_invalid;
     return false;
   }
 
   // walk elf file's section headers, and load string tables
   Elf_Shdr shdr;
   if (!fseek(m_file, m_elfHdr.e_shoff, SEEK_SET)) {
-    if (m_status != Decoder::no_error) return false;
+    if (NullDecoder::is_error(m_status)) return false;
 
     for (int index = 0; index < m_elfHdr.e_shnum; index ++) {
       if (fread((void*)&shdr, sizeof(Elf_Shdr), 1, m_file) != 1) {
-        m_status = Decoder::file_invalid;
+        m_status = NullDecoder::file_invalid;
         return false;
       }
       // string table
       if (shdr.sh_type == SHT_STRTAB) {
         ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index);
         if (table == NULL) {
-          m_status = Decoder::out_of_memory;
+          m_status = NullDecoder::out_of_memory;
           return false;
         }
         add_string_table(table);
       } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) {
         ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr);
         if (table == NULL) {
-          m_status = Decoder::out_of_memory;
+          m_status = NullDecoder::out_of_memory;
           return false;
         }
         add_symbol_table(table);
@@ -140,32 +140,33 @@
   return true;
 }
 
-const char* ElfFile::decode(address addr, int* offset) {
+bool ElfFile::decode(address addr, char* buf, int buflen, int* offset) {
   // something already went wrong, just give up
-  if (m_status != Decoder::no_error) {
-    return NULL;
+  if (NullDecoder::is_error(m_status)) {
+    return false;
   }
-
   ElfSymbolTable* symbol_table = m_symbol_tables;
   int string_table_index;
   int pos_in_string_table;
   int off = INT_MAX;
   bool found_symbol = false;
   while (symbol_table != NULL) {
-    if (Decoder::no_error == symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off)) {
+    if (symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off)) {
       found_symbol = true;
     }
     symbol_table = symbol_table->m_next;
   }
-  if (!found_symbol) return NULL;
+  if (!found_symbol) return false;
 
   ElfStringTable* string_table = get_string_table(string_table_index);
+
   if (string_table == NULL) {
-    m_status = Decoder::file_invalid;
-    return NULL;
+    m_status = NullDecoder::file_invalid;
+    return false;
   }
   if (offset) *offset = off;
-  return string_table->string_at(pos_in_string_table);
+
+  return string_table->string_at(pos_in_string_table, buf, buflen);
 }
 
 
--- a/hotspot/src/share/vm/utilities/elfFile.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/elfFile.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,8 @@
  *
  */
 
-#ifndef __ELF_FILE_HPP
-#define __ELF_FILE_HPP
+#ifndef SHARE_VM_UTILITIES_ELF_FILE_HPP
+#define SHARE_VM_UTILITIES_ELF_FILE_HPP
 
 #if !defined(_WINDOWS) && !defined(__APPLE__)
 
@@ -83,12 +83,12 @@
 // part of code to be very defensive, and bait out if anything went wrong.
 
 class ElfFile: public CHeapObj {
-  friend class Decoder;
+  friend class ElfDecoder;
  public:
   ElfFile(const char* filepath);
   ~ElfFile();
 
-  const char* decode(address addr, int* offset);
+  bool decode(address addr, char* buf, int buflen, int* offset);
   const char* filepath() {
     return m_filepath;
   }
@@ -99,7 +99,7 @@
     return (m_filepath && !strcmp(filepath, m_filepath));
   }
 
-  Decoder::decoder_status get_status() {
+  NullDecoder::decoder_status get_status() {
     return m_status;
   }
 
@@ -119,8 +119,9 @@
   // return a string table at specified section index
   ElfStringTable* get_string_table(int index);
 
-  // look up an address and return the nearest symbol
-  const char* look_up(Elf_Shdr shdr, address addr, int* offset);
+protected:
+   ElfFile*  next() const { return m_next; }
+   void set_next(ElfFile* file) { m_next = file; }
 
  protected:
     ElfFile*         m_next;
@@ -131,17 +132,17 @@
   FILE* m_file;
 
   // Elf header
-  Elf_Ehdr            m_elfHdr;
+  Elf_Ehdr                     m_elfHdr;
 
   // symbol tables
-  ElfSymbolTable*     m_symbol_tables;
+  ElfSymbolTable*              m_symbol_tables;
 
   // string tables
-  ElfStringTable*     m_string_tables;
+  ElfStringTable*              m_string_tables;
 
-  Decoder::decoder_status  m_status;
+  NullDecoder::decoder_status  m_status;
 };
 
 #endif // _WINDOWS
 
-#endif // __ELF_FILE_HPP
+#endif // SHARE_VM_UTILITIES_ELF_FILE_HPP
--- a/hotspot/src/share/vm/utilities/elfStringTable.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/elfStringTable.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -38,7 +38,7 @@
   m_index = index;
   m_next = NULL;
   m_file = file;
-  m_status = Decoder::no_error;
+  m_status = NullDecoder::no_error;
 
   // try to load the string table
   long cur_offset = ftell(file);
@@ -48,7 +48,7 @@
     if (fseek(file, shdr.sh_offset, SEEK_SET) ||
       fread((void*)m_table, shdr.sh_size, 1, file) != 1 ||
       fseek(file, cur_offset, SEEK_SET)) {
-      m_status = Decoder::file_invalid;
+      m_status = NullDecoder::file_invalid;
       os::free((void*)m_table);
       m_table = NULL;
     }
@@ -67,22 +67,23 @@
   }
 }
 
-const char* ElfStringTable::string_at(int pos) {
-  if (m_status != Decoder::no_error) {
-    return NULL;
+bool ElfStringTable::string_at(int pos, char* buf, int buflen) {
+  if (NullDecoder::is_error(m_status)) {
+    return false;
   }
   if (m_table != NULL) {
-    return (const char*)(m_table + pos);
+    jio_snprintf(buf, buflen, "%s", (const char*)(m_table + pos));
+    return true;
   } else {
     long cur_pos = ftell(m_file);
     if (cur_pos == -1 ||
       fseek(m_file, m_shdr.sh_offset + pos, SEEK_SET) ||
-      fread(m_symbol, 1, MAX_SYMBOL_LEN, m_file) <= 0 ||
+      fread(buf, 1, buflen, m_file) <= 0 ||
       fseek(m_file, cur_pos, SEEK_SET)) {
-      m_status = Decoder::file_invalid;
-      return NULL;
+      m_status = NullDecoder::file_invalid;
+      return false;
     }
-    return (const char*)m_symbol;
+    return true;
   }
 }
 
--- a/hotspot/src/share/vm/utilities/elfStringTable.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/elfStringTable.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,8 @@
  *
  */
 
-#ifndef __ELF_STRING_TABLE_HPP
-#define __ELF_STRING_TABLE_HPP
+#ifndef SHARE_VM_UTILITIES_ELF_STRING_TABLE_HPP
+#define SHARE_VM_UTILITIES_ELF_STRING_TABLE_HPP
 
 #if !defined(_WINDOWS) && !defined(__APPLE__)
 
@@ -35,9 +35,6 @@
 // The string table represents a string table section in an elf file.
 // Whenever there is enough memory, it will load whole string table as
 // one blob. Otherwise, it will load string from file when requested.
-
-#define MAX_SYMBOL_LEN  256
-
 class ElfStringTable: CHeapObj {
   friend class ElfFile;
  public:
@@ -48,10 +45,10 @@
   int index() { return m_index; };
 
   // get string at specified offset
-  const char* string_at(int offset);
+  bool string_at(int offset, char* buf, int buflen);
 
   // get status code
-  Decoder::decoder_status get_status() { return m_status; };
+  NullDecoder::decoder_status get_status() { return m_status; };
 
  protected:
   ElfStringTable*        m_next;
@@ -69,13 +66,10 @@
   // section header
   Elf_Shdr                 m_shdr;
 
-  // buffer for reading individual string
-  char                     m_symbol[MAX_SYMBOL_LEN];
-
   // error code
-  Decoder::decoder_status  m_status;
+  NullDecoder::decoder_status  m_status;
 };
 
-#endif // _WINDOWS
+#endif // _WINDOWS and _APPLE
 
-#endif // __ELF_STRING_TABLE_HPP
+#endif // SHARE_VM_UTILITIES_ELF_STRING_TABLE_HPP
--- a/hotspot/src/share/vm/utilities/elfSymbolTable.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/elfSymbolTable.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -34,7 +34,7 @@
   m_symbols = NULL;
   m_next = NULL;
   m_file = file;
-  m_status = Decoder::no_error;
+  m_status = NullDecoder::no_error;
 
   // try to load the string table
   long cur_offset = ftell(file);
@@ -45,16 +45,16 @@
       if (fseek(file, shdr.sh_offset, SEEK_SET) ||
         fread((void*)m_symbols, shdr.sh_size, 1, file) != 1 ||
         fseek(file, cur_offset, SEEK_SET)) {
-        m_status = Decoder::file_invalid;
+        m_status = NullDecoder::file_invalid;
         os::free(m_symbols);
         m_symbols = NULL;
       }
     }
-    if (m_status == Decoder::no_error) {
+    if (!NullDecoder::is_error(m_status)) {
       memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr));
     }
   } else {
-    m_status = Decoder::file_invalid;
+    m_status = NullDecoder::file_invalid;
   }
 }
 
@@ -68,13 +68,13 @@
   }
 }
 
-Decoder::decoder_status ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex, int* offset) {
+bool ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex, int* offset) {
   assert(stringtableIndex, "null string table index pointer");
   assert(posIndex, "null string table offset pointer");
   assert(offset, "null offset pointer");
 
-  if (m_status != Decoder::no_error) {
-    return m_status;
+  if (NullDecoder::is_error(m_status)) {
+    return false;
   }
 
   address pc = 0;
@@ -97,8 +97,8 @@
     long cur_pos;
     if ((cur_pos = ftell(m_file)) == -1 ||
       fseek(m_file, m_shdr.sh_offset, SEEK_SET)) {
-      m_status = Decoder::file_invalid;
-      return m_status;
+      m_status = NullDecoder::file_invalid;
+      return false;
     }
 
     Elf_Sym sym;
@@ -114,13 +114,13 @@
           }
         }
       } else {
-        m_status = Decoder::file_invalid;
-        return m_status;
+        m_status = NullDecoder::file_invalid;
+        return false;
       }
     }
     fseek(m_file, cur_pos, SEEK_SET);
   }
-  return m_status;
+  return true;
 }
 
 #endif // _WINDOWS
--- a/hotspot/src/share/vm/utilities/elfSymbolTable.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/elfSymbolTable.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,8 @@
  *
  */
 
-#ifndef __ELF_SYMBOL_TABLE_HPP
-#define __ELF_SYMBOL_TABLE_HPP
+#ifndef SHARE_VM_UTILITIES_ELF_SYMBOL_TABLE_HPP
+#define SHARE_VM_UTILITIES_ELF_SYMBOL_TABLE_HPP
 
 #if !defined(_WINDOWS) && !defined(__APPLE__)
 
@@ -45,9 +45,9 @@
   ~ElfSymbolTable();
 
   // search the symbol that is nearest to the specified address.
-  Decoder::decoder_status lookup(address addr, int* stringtableIndex, int* posIndex, int* offset);
+  bool lookup(address addr, int* stringtableIndex, int* posIndex, int* offset);
 
-  Decoder::decoder_status get_status() { return m_status; };
+  NullDecoder::decoder_status get_status() { return m_status; };
 
  protected:
   ElfSymbolTable*  m_next;
@@ -62,9 +62,9 @@
   // section header
   Elf_Shdr            m_shdr;
 
-  Decoder::decoder_status  m_status;
+  NullDecoder::decoder_status  m_status;
 };
 
-#endif // _WINDOWS
+#endif // _WINDOWS and _APPLE
 
-#endif // __ELF_SYMBOL_TABLE_HPP
+#endif // SHARE_VM_UTILITIES_ELF_SYMBOL_TABLE_HPP
--- a/hotspot/src/share/vm/utilities/exceptions.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/exceptions.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -189,6 +189,13 @@
 #define CHECK_NULL                               CHECK_(NULL)
 #define CHECK_false                              CHECK_(false)
 
+#define CHECK_AND_CLEAR                         THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return;        } (0
+#define CHECK_AND_CLEAR_(result)                THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (0
+#define CHECK_AND_CLEAR_0                       CHECK_AND_CLEAR_(0)
+#define CHECK_AND_CLEAR_NH                      CHECK_AND_CLEAR_(Handle())
+#define CHECK_AND_CLEAR_NULL                    CHECK_AND_CLEAR_(NULL)
+#define CHECK_AND_CLEAR_false                   CHECK_AND_CLEAR_(false)
+
 // The THROW... macros should be used to throw an exception. They require a THREAD variable to be
 // visible within the scope containing the THROW. Usually this is achieved by declaring the function
 // with a TRAPS argument.
@@ -258,7 +265,6 @@
     ShouldNotReachHere();                  \
   } (0
 
-
 // ExceptionMark is a stack-allocated helper class for local exception handling.
 // It is used with the EXCEPTION_MARK macro.
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -298,6 +298,11 @@
 const juint   max_juint   = (juint)-1;   // 0xFFFFFFFF largest juint
 const julong  max_julong  = (julong)-1;  // 0xFF....FF largest julong
 
+typedef jbyte  s1;
+typedef jshort s2;
+typedef jint   s4;
+typedef jlong  s8;
+
 //----------------------------------------------------------------------------------------------------
 // JVM spec restrictions
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Sat Jan 28 20:41:27 2012 -0800
@@ -130,6 +130,9 @@
 //----------------------------------------------------------------------------------------------------
 // Non-standard stdlib-like stuff:
 inline int strcasecmp(const char *s1, const char *s2) { return _stricmp(s1,s2); }
+inline int strncasecmp(const char *s1, const char *s2, size_t n) {
+  return _strnicmp(s1,s2,n);
+}
 
 
 //----------------------------------------------------------------------------------------------------
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Sat Jan 28 10:46:46 2012 -0800
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Sat Jan 28 20:41:27 2012 -0800
@@ -571,8 +571,6 @@
        if (fr.pc()) {
           st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
 
-          // initialize decoder to decode C frames
-          Decoder decoder;
 
           int count = 0;
           while (count++ < StackPrintLimit) {
--- a/jaxp/.hgtags	Sat Jan 28 10:46:46 2012 -0800
+++ b/jaxp/.hgtags	Sat Jan 28 20:41:27 2012 -0800
@@ -143,3 +143,5 @@
 dffeb62b1a7fc8b316bf58fe5479323f3661894e jdk8-b19
 f052abb8f37444ba77858913887d0d92795dd6b8 jdk8-b20
 d41eeadf5c1344b88c5051a997aec9e1ad7ce1db jdk8-b21
+cf9d6ec44f891236ad18451021d6dcd57dc82f7b jdk8-b22
+95102fd334183d15dc98a95dd0d749527b6c7300 jdk8-b23
--- a/jaxws/.hgtags	Sat Jan 28 10:46:46 2012 -0800
+++ b/jaxws/.hgtags	Sat Jan 28 20:41:27 2012 -0800
@@ -143,3 +143,5 @@
 b73b733214aa43648d69a2da51e6b48fda902a2d jdk8-b19
 2b2818e3386f4510c390f6aea90d77e1c6a5bf9e jdk8-b20
 c266cab0e3fff05f2048c23046c14d60f7102175 jdk8-b21
+8d3df89b0f2d3c603b2edb0f5e24af1245397cc6 jdk8-b22
+25ce7a0004874273f6aeda14e7c3538cba34bdf1 jdk8-b23
--- a/jdk/.hgtags	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/.hgtags	Sat Jan 28 20:41:27 2012 -0800
@@ -143,3 +143,5 @@
 3778f85773055e81eab6c5ef828935ecca241810 jdk8-b19
 39e938cd1b82ec3aab0a9aa66fd8a0457cd0c9c2 jdk8-b20
 664fa4fb0ee411ef048903c479f8b962fcdb2f4b jdk8-b21
+dda27c73d8db4a9c7a23872b6f0c5106edcb2021 jdk8-b22
+54202e0148ec7d4570cab5bc9b00d216a7677569 jdk8-b23
--- a/jdk/make/javax/sound/jsoundalsa/Makefile	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/make/javax/sound/jsoundalsa/Makefile	Sat Jan 28 20:41:27 2012 -0800
@@ -65,7 +65,7 @@
 	$(MIDIFILES_export) \
 	$(PORTFILES_export)
 
-LDFLAGS += -lasound
+OTHER_LDLIBS += -lasound
 
 CPPFLAGS += \
 	-DUSE_DAUDIO=TRUE \
--- a/jdk/make/sun/awt/Makefile	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/make/sun/awt/Makefile	Sat Jan 28 20:41:27 2012 -0800
@@ -367,7 +367,8 @@
 FONTCONFIGS_SRC	= $(CLOSED_SRC)/solaris/classes/sun/awt/fontconfigs
 _FONTCONFIGS	= \
 	fontconfig.properties				\
-	fontconfig.RedHat.5.5.properties		\
+	fontconfig.RedHat.5.properties			\
+	fontconfig.RedHat.6.properties			\
 	fontconfig.Turbo.properties			\
 	fontconfig.SuSE.10.properties                   \
 	fontconfig.SuSE.11.properties
--- a/jdk/src/share/classes/com/sun/beans/TypeResolver.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/com/sun/beans/TypeResolver.java	Sat Jan 28 20:41:27 2012 -0800
@@ -154,7 +154,7 @@
      * @see #resolve(Type)
      */
     public static Type resolve(Type actual, Type formal) {
-        return new TypeResolver(actual).resolve(formal);
+        return getTypeResolver(actual).resolve(formal);
     }
 
     /**
@@ -169,7 +169,7 @@
      * @see #resolve(Type[])
      */
     public static Type[] resolve(Type actual, Type[] formals) {
-        return new TypeResolver(actual).resolve(formals);
+        return getTypeResolver(actual).resolve(formals);
     }
 
     /**
@@ -228,9 +228,20 @@
         return classes;
     }
 
+    public static TypeResolver getTypeResolver(Type type) {
+        synchronized (CACHE) {
+            TypeResolver resolver = CACHE.get(type);
+            if (resolver == null) {
+                resolver = new TypeResolver(type);
+                CACHE.put(type, resolver);
+            }
+            return resolver;
+        }
+    }
 
-    private final Map<TypeVariable<?>, Type> map
-        = new HashMap<TypeVariable<?>, Type>();
+    private static final WeakCache<Type, TypeResolver> CACHE = new WeakCache<>();
+
+    private final Map<TypeVariable<?>, Type> map = new HashMap<>();
 
     /**
      * Constructs the type resolver for the given actual type.
--- a/jdk/src/share/classes/java/awt/AWTKeyStroke.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/awt/AWTKeyStroke.java	Sat Jan 28 20:41:27 2012 -0800
@@ -802,8 +802,11 @@
      */
     protected Object readResolve() throws java.io.ObjectStreamException {
         synchronized (AWTKeyStroke.class) {
-            return getCachedStroke(keyChar, keyCode, modifiers, onKeyRelease);
+            if (getClass().equals(getAWTKeyStrokeClass())) {
+                return  getCachedStroke(keyChar, keyCode, modifiers, onKeyRelease);
+            }
         }
+        return this;
     }
 
     private static int mapOldModifiers(int modifiers) {
--- a/jdk/src/share/classes/java/lang/Class.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/lang/Class.java	Sat Jan 28 20:41:27 2012 -0800
@@ -3118,4 +3118,9 @@
     AnnotationType getAnnotationType() {
         return annotationType;
     }
+
+    /* Backing store of user-defined values pertaining to this class.
+     * Maintained by the ClassValue class.
+     */
+    transient ClassValue.ClassValueMap classValueMap;
 }
--- a/jdk/src/share/classes/java/lang/ClassValue.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/lang/ClassValue.java	Sat Jan 28 20:41:27 2012 -0800
@@ -25,9 +25,14 @@
 
 package java.lang;
 
+import java.lang.ClassValue.ClassValueMap;
 import java.util.WeakHashMap;
+import java.lang.ref.WeakReference;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static java.lang.ClassValue.ClassValueMap.probeHomeLocation;
+import static java.lang.ClassValue.ClassValueMap.probeBackupLocations;
+
 /**
  * Lazily associate a computed value with (potentially) every type.
  * For example, if a dynamic language needs to construct a message dispatch
@@ -92,14 +97,22 @@
      * @see #computeValue
      */
     public T get(Class<?> type) {
-        ClassValueMap map = getMap(type);
-        if (map != null) {
-            Object x = map.get(this);
-            if (x != null) {
-                return (T) map.unmaskNull(x);
-            }
-        }
-        return setComputedValue(type);
+        // non-racing this.hashCodeForCache : final int
+        Entry<?>[] cache;
+        Entry<T> e = probeHomeLocation(cache = getCacheCarefully(type), this);
+        // racing e : current value <=> stale value from current cache or from stale cache
+        // invariant:  e is null or an Entry with readable Entry.version and Entry.value
+        if (match(e))
+            // invariant:  No false positive matches.  False negatives are OK if rare.
+            // The key fact that makes this work: if this.version == e.version,
+            // then this thread has a right to observe (final) e.value.
+            return e.value();
+        // The fast path can fail for any of these reasons:
+        // 1. no entry has been computed yet
+        // 2. hash code collision (before or after reduction mod cache.length)
+        // 3. an entry has been removed (either on this type or another)
+        // 4. the GC has somehow managed to delete e.version and clear the reference
+        return getFromBackup(cache, type);
     }
 
     /**
@@ -157,83 +170,582 @@
      */
     public void remove(Class<?> type) {
         ClassValueMap map = getMap(type);
-        if (map != null) {
-            synchronized (map) {
-                map.remove(this);
+        map.removeEntry(this);
+    }
+
+    // Possible functionality for JSR 292 MR 1
+    /*public*/ void put(Class<?> type, T value) {
+        ClassValueMap map = getMap(type);
+        map.changeEntry(this, value);
+    }
+
+    /// --------
+    /// Implementation...
+    /// --------
+
+    /** Return the cache, if it exists, else a dummy empty cache. */
+    private static Entry<?>[] getCacheCarefully(Class<?> type) {
+        // racing type.classValueMap{.cacheArray} : null => new Entry[X] <=> new Entry[Y]
+        ClassValueMap map = type.classValueMap;
+        if (map == null)  return EMPTY_CACHE;
+        Entry<?>[] cache = map.getCache();
+        return cache;
+        // invariant:  returned value is safe to dereference and check for an Entry
+    }
+
+    /** Initial, one-element, empty cache used by all Class instances.  Must never be filled. */
+    private static final Entry<?>[] EMPTY_CACHE = { null };
+
+    /**
+     * Slow tail of ClassValue.get to retry at nearby locations in the cache,
+     * or take a slow lock and check the hash table.
+     * Called only if the first probe was empty or a collision.
+     * This is a separate method, so compilers can process it independently.
+     */
+    private T getFromBackup(Entry<?>[] cache, Class<?> type) {
+        Entry<T> e = probeBackupLocations(cache, this);
+        if (e != null)
+            return e.value();
+        return getFromHashMap(type);
+    }
+
+    // Hack to suppress warnings on the (T) cast, which is a no-op.
+    @SuppressWarnings("unchecked")
+    Entry<T> castEntry(Entry<?> e) { return (Entry<T>) e; }
+
+    /** Called when the fast path of get fails, and cache reprobe also fails.
+     */
+    private T getFromHashMap(Class<?> type) {
+        // The fail-safe recovery is to fall back to the underlying classValueMap.
+        ClassValueMap map = getMap(type);
+        for (;;) {
+            Entry<T> e = map.startEntry(this);
+            if (!e.isPromise())
+                return e.value();
+            try {
+                // Try to make a real entry for the promised version.
+                e = makeEntry(e.version(), computeValue(type));
+            } finally {
+                // Whether computeValue throws or returns normally,
+                // be sure to remove the empty entry.
+                e = map.finishEntry(this, e);
             }
+            if (e != null)
+                return e.value();
+            // else try again, in case a racing thread called remove (so e == null)
         }
     }
 
-    /// Implementation...
-    // FIXME: Use a data structure here similar that of ThreadLocal (7030453).
+    /** Check that e is non-null, matches this ClassValue, and is live. */
+    boolean match(Entry<?> e) {
+        // racing e.version : null (blank) => unique Version token => null (GC-ed version)
+        // non-racing this.version : v1 => v2 => ... (updates are read faithfully from volatile)
+        return (e != null && e.get() == this.version);
+        // invariant:  No false positives on version match.  Null is OK for false negative.
+        // invariant:  If version matches, then e.value is readable (final set in Entry.<init>)
+    }
+
+    /** Internal hash code for accessing Class.classValueMap.cacheArray. */
+    final int hashCodeForCache = nextHashCode.getAndAdd(HASH_INCREMENT) & HASH_MASK;
+
+    /** Value stream for hashCodeForCache.  See similar structure in ThreadLocal. */
+    private static final AtomicInteger nextHashCode = new AtomicInteger();
 
-    private static final AtomicInteger STORE_BARRIER = new AtomicInteger();
+    /** Good for power-of-two tables.  See similar structure in ThreadLocal. */
+    private static final int HASH_INCREMENT = 0x61c88647;
+
+    /** Mask a hash code to be positive but not too large, to prevent wraparound. */
+    static final int HASH_MASK = (-1 >>> 2);
+
+    /**
+     * Private key for retrieval of this object from ClassValueMap.
+     */
+    static class Identity {
+    }
+    /**
+     * This ClassValue's identity, expressed as an opaque object.
+     * The main object {@code ClassValue.this} is incorrect since
+     * subclasses may override {@code ClassValue.equals}, which
+     * could confuse keys in the ClassValueMap.
+     */
+    final Identity identity = new Identity();
 
-    /** Slow path for {@link #get}. */
-    private T setComputedValue(Class<?> type) {
-        ClassValueMap map = getMap(type);
-        if (map == null) {
-            map = initializeMap(type);
+    /**
+     * Current version for retrieving this class value from the cache.
+     * Any number of computeValue calls can be cached in association with one version.
+     * But the version changes when a remove (on any type) is executed.
+     * A version change invalidates all cache entries for the affected ClassValue,
+     * by marking them as stale.  Stale cache entries do not force another call
+     * to computeValue, but they do require a synchronized visit to a backing map.
+     * <p>
+     * All user-visible state changes on the ClassValue take place under
+     * a lock inside the synchronized methods of ClassValueMap.
+     * Readers (of ClassValue.get) are notified of such state changes
+     * when this.version is bumped to a new token.
+     * This variable must be volatile so that an unsynchronized reader
+     * will receive the notification without delay.
+     * <p>
+     * If version were not volatile, one thread T1 could persistently hold onto
+     * a stale value this.value == V1, while while another thread T2 advances
+     * (under a lock) to this.value == V2.  This will typically be harmless,
+     * but if T1 and T2 interact causally via some other channel, such that
+     * T1's further actions are constrained (in the JMM) to happen after
+     * the V2 event, then T1's observation of V1 will be an error.
+     * <p>
+     * The practical effect of making this.version be volatile is that it cannot
+     * be hoisted out of a loop (by an optimizing JIT) or otherwise cached.
+     * Some machines may also require a barrier instruction to execute
+     * before this.version.
+     */
+    private volatile Version<T> version = new Version<>(this);
+    Version<T> version() { return version; }
+    void bumpVersion() { version = new Version<>(this); }
+    static class Version<T> {
+        private final ClassValue<T> classValue;
+        private final Entry<T> promise = new Entry<>(this);
+        Version(ClassValue<T> classValue) { this.classValue = classValue; }
+        ClassValue<T> classValue() { return classValue; }
+        Entry<T> promise() { return promise; }
+        boolean isLive() { return classValue.version() == this; }
+    }
+
+    /** One binding of a value to a class via a ClassValue.
+     *  States are:<ul>
+     *  <li> promise if value == Entry.this
+     *  <li> else dead if version == null
+     *  <li> else stale if version != classValue.version
+     *  <li> else live </ul>
+     *  Promises are never put into the cache; they only live in the
+     *  backing map while a computeValue call is in flight.
+     *  Once an entry goes stale, it can be reset at any time
+     *  into the dead state.
+     */
+    static class Entry<T> extends WeakReference<Version<T>> {
+        final Object value;  // usually of type T, but sometimes (Entry)this
+        Entry(Version<T> version, T value) {
+            super(version);
+            this.value = value;  // for a regular entry, value is of type T
         }
-        T value = computeValue(type);
-        STORE_BARRIER.lazySet(0);
-        // All stores pending from computeValue are completed.
-        synchronized (map) {
-            // Warm up the table with a null entry.
-            map.preInitializeEntry(this);
+        private void assertNotPromise() { assert(!isPromise()); }
+        /** For creating a promise. */
+        Entry(Version<T> version) {
+            super(version);
+            this.value = this;  // for a promise, value is not of type T, but Entry!
+        }
+        /** Fetch the value.  This entry must not be a promise. */
+        @SuppressWarnings("unchecked")  // if !isPromise, type is T
+        T value() { assertNotPromise(); return (T) value; }
+        boolean isPromise() { return value == this; }
+        Version<T> version() { return get(); }
+        ClassValue<T> classValueOrNull() {
+            Version<T> v = version();
+            return (v == null) ? null : v.classValue();
+        }
+        boolean isLive() {
+            Version<T> v = version();
+            if (v == null)  return false;
+            if (v.isLive())  return true;
+            clear();
+            return false;
+        }
+        Entry<T> refreshVersion(Version<T> v2) {
+            assertNotPromise();
+            @SuppressWarnings("unchecked")  // if !isPromise, type is T
+            Entry<T> e2 = new Entry<>(v2, (T) value);
+            clear();
+            // value = null -- caller must drop
+            return e2;
         }
-        STORE_BARRIER.lazySet(0);
-        // All stores pending from table expansion are completed.
-        synchronized (map) {
-            value = (T) map.initializeEntry(this, value);
-            // One might fear a possible race condition here
-            // if the code for map.put has flushed the write
-            // to map.table[*] before the writes to the Map.Entry
-            // are done.  This is not possible, since we have
-            // warmed up the table with an empty entry.
+        static final Entry<?> DEAD_ENTRY = new Entry<>(null, null);
+    }
+
+    /** Return the backing map associated with this type. */
+    private static ClassValueMap getMap(Class<?> type) {
+        // racing type.classValueMap : null (blank) => unique ClassValueMap
+        // if a null is observed, a map is created (lazily, synchronously, uniquely)
+        // all further access to that map is synchronized
+        ClassValueMap map = type.classValueMap;
+        if (map != null)  return map;
+        return initializeMap(type);
+    }
+
+    private static final Object CRITICAL_SECTION = new Object();
+    private static ClassValueMap initializeMap(Class<?> type) {
+        ClassValueMap map;
+        synchronized (CRITICAL_SECTION) {  // private object to avoid deadlocks
+            // happens about once per type
+            if ((map = type.classValueMap) == null)
+                type.classValueMap = map = new ClassValueMap(type);
         }
-        return value;
+            return map;
+        }
+
+    static <T> Entry<T> makeEntry(Version<T> explicitVersion, T value) {
+        // Note that explicitVersion might be different from this.version.
+        return new Entry<>(explicitVersion, value);
+
+        // As soon as the Entry is put into the cache, the value will be
+        // reachable via a data race (as defined by the Java Memory Model).
+        // This race is benign, assuming the value object itself can be
+        // read safely by multiple threads.  This is up to the user.
+        //
+        // The entry and version fields themselves can be safely read via
+        // a race because they are either final or have controlled states.
+        // If the pointer from the entry to the version is still null,
+        // or if the version goes immediately dead and is nulled out,
+        // the reader will take the slow path and retry under a lock.
     }
 
-    // Replace this map by a per-class slot.
-    private static final WeakHashMap<Class<?>, ClassValueMap> ROOT
-        = new WeakHashMap<Class<?>, ClassValueMap>();
+    // The following class could also be top level and non-public:
+
+    /** A backing map for all ClassValues, relative a single given type.
+     *  Gives a fully serialized "true state" for each pair (ClassValue cv, Class type).
+     *  Also manages an unserialized fast-path cache.
+     */
+    static class ClassValueMap extends WeakHashMap<ClassValue.Identity, Entry<?>> {
+        private final Class<?> type;
+        private Entry<?>[] cacheArray;
+        private int cacheLoad, cacheLoadLimit;
+
+        /** Number of entries initially allocated to each type when first used with any ClassValue.
+         *  It would be pointless to make this much smaller than the Class and ClassValueMap objects themselves.
+         *  Must be a power of 2.
+         */
+        private static final int INITIAL_ENTRIES = 32;
+
+        /** Build a backing map for ClassValues, relative the given type.
+         *  Also, create an empty cache array and install it on the class.
+         */
+        ClassValueMap(Class<?> type) {
+            this.type = type;
+            sizeCache(INITIAL_ENTRIES);
+        }
+
+        Entry<?>[] getCache() { return cacheArray; }
 
-    private static ClassValueMap getMap(Class<?> type) {
-        type.getClass();  // test for null
-        return ROOT.get(type);
-    }
-
-    private static ClassValueMap initializeMap(Class<?> type) {
-        synchronized (ClassValue.class) {
-            ClassValueMap map = ROOT.get(type);
-            if (map == null)
-                ROOT.put(type, map = new ClassValueMap());
-            return map;
+        /** Initiate a query.  Store a promise (placeholder) if there is no value yet. */
+        synchronized
+        <T> Entry<T> startEntry(ClassValue<T> classValue) {
+            @SuppressWarnings("unchecked")  // one map has entries for all value types <T>
+            Entry<T> e = (Entry<T>) get(classValue.identity);
+            Version<T> v = classValue.version();
+            if (e == null) {
+                e = v.promise();
+                // The presence of a promise means that a value is pending for v.
+                // Eventually, finishEntry will overwrite the promise.
+                put(classValue.identity, e);
+                // Note that the promise is never entered into the cache!
+                return e;
+            } else if (e.isPromise()) {
+                // Somebody else has asked the same question.
+                // Let the races begin!
+                if (e.version() != v) {
+                    e = v.promise();
+                    put(classValue.identity, e);
+                }
+                return e;
+            } else {
+                // there is already a completed entry here; report it
+                if (e.version() != v) {
+                    // There is a stale but valid entry here; make it fresh again.
+                    // Once an entry is in the hash table, we don't care what its version is.
+                    e = e.refreshVersion(v);
+                    put(classValue.identity, e);
+                }
+                // Add to the cache, to enable the fast path, next time.
+                checkCacheLoad();
+                addToCache(classValue, e);
+                return e;
+            }
         }
-    }
 
-    static class ClassValueMap extends WeakHashMap<ClassValue, Object> {
-        /** Make sure this table contains an Entry for the given key, even if it is empty. */
-        void preInitializeEntry(ClassValue key) {
-            if (!this.containsKey(key))
-                this.put(key, null);
+        /** Finish a query.  Overwrite a matching placeholder.  Drop stale incoming values. */
+        synchronized
+        <T> Entry<T> finishEntry(ClassValue<T> classValue, Entry<T> e) {
+            @SuppressWarnings("unchecked")  // one map has entries for all value types <T>
+            Entry<T> e0 = (Entry<T>) get(classValue.identity);
+            if (e == e0) {
+                // We can get here during exception processing, unwinding from computeValue.
+                assert(e.isPromise());
+                remove(classValue.identity);
+                return null;
+            } else if (e0 != null && e0.isPromise() && e0.version() == e.version()) {
+                // If e0 matches the intended entry, there has not been a remove call
+                // between the previous startEntry and now.  So now overwrite e0.
+                Version<T> v = classValue.version();
+                if (e.version() != v)
+                    e = e.refreshVersion(v);
+                put(classValue.identity, e);
+                // Add to the cache, to enable the fast path, next time.
+                checkCacheLoad();
+                addToCache(classValue, e);
+                return e;
+            } else {
+                // Some sort of mismatch; caller must try again.
+                return null;
+            }
+        }
+
+        /** Remove an entry. */
+        synchronized
+        void removeEntry(ClassValue<?> classValue) {
+            // make all cache elements for this guy go stale:
+            if (remove(classValue.identity) != null) {
+                classValue.bumpVersion();
+                removeStaleEntries(classValue);
+            }
         }
-        /** Make sure this table contains a non-empty Entry for the given key. */
-        Object initializeEntry(ClassValue key, Object value) {
-            Object prior = this.get(key);
-            if (prior != null) {
-                return unmaskNull(prior);
+
+        /** Change the value for an entry. */
+        synchronized
+        <T> void changeEntry(ClassValue<T> classValue, T value) {
+            @SuppressWarnings("unchecked")  // one map has entries for all value types <T>
+            Entry<T> e0 = (Entry<T>) get(classValue.identity);
+            Version<T> version = classValue.version();
+            if (e0 != null) {
+                if (e0.version() == version && e0.value() == value)
+                    // no value change => no version change needed
+                    return;
+                classValue.bumpVersion();
+                removeStaleEntries(classValue);
             }
-            this.put(key, maskNull(value));
-            return value;
+            Entry<T> e = makeEntry(version, value);
+            put(classValue.identity, e);
+            // Add to the cache, to enable the fast path, next time.
+            checkCacheLoad();
+            addToCache(classValue, e);
+        }
+
+        /// --------
+        /// Cache management.
+        /// --------
+
+        // Statics do not need synchronization.
+
+        /** Load the cache entry at the given (hashed) location. */
+        static Entry<?> loadFromCache(Entry<?>[] cache, int i) {
+            // non-racing cache.length : constant
+            // racing cache[i & (mask)] : null <=> Entry
+            return cache[i & (cache.length-1)];
+            // invariant:  returned value is null or well-constructed (ready to match)
+        }
+
+        /** Look in the cache, at the home location for the given ClassValue. */
+        static <T> Entry<T> probeHomeLocation(Entry<?>[] cache, ClassValue<T> classValue) {
+            return classValue.castEntry(loadFromCache(cache, classValue.hashCodeForCache));
+        }
+
+        /** Given that first probe was a collision, retry at nearby locations. */
+        static <T> Entry<T> probeBackupLocations(Entry<?>[] cache, ClassValue<T> classValue) {
+            if (PROBE_LIMIT <= 0)  return null;
+            // Probe the cache carefully, in a range of slots.
+            int mask = (cache.length-1);
+            int home = (classValue.hashCodeForCache & mask);
+            Entry<?> e2 = cache[home];  // victim, if we find the real guy
+            if (e2 == null) {
+                return null;   // if nobody is at home, no need to search nearby
+            }
+            // assume !classValue.match(e2), but do not assert, because of races
+            int pos2 = -1;
+            for (int i = home + 1; i < home + PROBE_LIMIT; i++) {
+                Entry<?> e = cache[i & mask];
+                if (e == null) {
+                    break;   // only search within non-null runs
+                }
+                if (classValue.match(e)) {
+                    // relocate colliding entry e2 (from cache[home]) to first empty slot
+                    cache[home] = e;
+                    if (pos2 >= 0) {
+                        cache[i & mask] = Entry.DEAD_ENTRY;
+                    } else {
+                        pos2 = i;
+                    }
+                    cache[pos2 & mask] = ((entryDislocation(cache, pos2, e2) < PROBE_LIMIT)
+                                          ? e2                  // put e2 here if it fits
+                                          : Entry.DEAD_ENTRY);
+                    return classValue.castEntry(e);
+                }
+                // Remember first empty slot, if any:
+                if (!e.isLive() && pos2 < 0)  pos2 = i;
+            }
+            return null;
         }
 
-        Object maskNull(Object x) {
-            return x == null ? this : x;
+        /** How far out of place is e? */
+        private static int entryDislocation(Entry<?>[] cache, int pos, Entry<?> e) {
+            ClassValue<?> cv = e.classValueOrNull();
+            if (cv == null)  return 0;  // entry is not live!
+            int mask = (cache.length-1);
+            return (pos - cv.hashCodeForCache) & mask;
+        }
+
+        /// --------
+        /// Below this line all functions are private, and assume synchronized access.
+        /// --------
+
+        private void sizeCache(int length) {
+            assert((length & (length-1)) == 0);  // must be power of 2
+            cacheLoad = 0;
+            cacheLoadLimit = (int) ((double) length * CACHE_LOAD_LIMIT / 100);
+            cacheArray = new Entry<?>[length];
+        }
+
+        /** Make sure the cache load stays below its limit, if possible. */
+        private void checkCacheLoad() {
+            if (cacheLoad >= cacheLoadLimit) {
+                reduceCacheLoad();
+            }
+        }
+        private void reduceCacheLoad() {
+            removeStaleEntries();
+            if (cacheLoad < cacheLoadLimit)
+                return;  // win
+            Entry<?>[] oldCache = getCache();
+            if (oldCache.length > HASH_MASK)
+                return;  // lose
+            sizeCache(oldCache.length * 2);
+            for (Entry<?> e : oldCache) {
+                if (e != null && e.isLive()) {
+                    addToCache(e);
+                }
+            }
+        }
+
+        /** Remove stale entries in the given range.
+         *  Should be executed under a Map lock.
+         */
+        private void removeStaleEntries(Entry<?>[] cache, int begin, int count) {
+            if (PROBE_LIMIT <= 0)  return;
+            int mask = (cache.length-1);
+            int removed = 0;
+            for (int i = begin; i < begin + count; i++) {
+                Entry<?> e = cache[i & mask];
+                if (e == null || e.isLive())
+                    continue;  // skip null and live entries
+                Entry<?> replacement = null;
+                if (PROBE_LIMIT > 1) {
+                    // avoid breaking up a non-null run
+                    replacement = findReplacement(cache, i);
+                }
+                cache[i & mask] = replacement;
+                if (replacement == null)  removed += 1;
+            }
+            cacheLoad = Math.max(0, cacheLoad - removed);
         }
-        Object unmaskNull(Object x) {
-            return x == this ? null : x;
+
+        /** Clearing a cache slot risks disconnecting following entries
+         *  from the head of a non-null run, which would allow them
+         *  to be found via reprobes.  Find an entry after cache[begin]
+         *  to plug into the hole, or return null if none is needed.
+         */
+        private Entry<?> findReplacement(Entry<?>[] cache, int home1) {
+            Entry<?> replacement = null;
+            int haveReplacement = -1, replacementPos = 0;
+            int mask = (cache.length-1);
+            for (int i2 = home1 + 1; i2 < home1 + PROBE_LIMIT; i2++) {
+                Entry<?> e2 = cache[i2 & mask];
+                if (e2 == null)  break;  // End of non-null run.
+                if (!e2.isLive())  continue;  // Doomed anyway.
+                int dis2 = entryDislocation(cache, i2, e2);
+                if (dis2 == 0)  continue;  // e2 already optimally placed
+                int home2 = i2 - dis2;
+                if (home2 <= home1) {
+                    // e2 can replace entry at cache[home1]
+                    if (home2 == home1) {
+                        // Put e2 exactly where he belongs.
+                        haveReplacement = 1;
+                        replacementPos = i2;
+                        replacement = e2;
+                    } else if (haveReplacement <= 0) {
+                        haveReplacement = 0;
+                        replacementPos = i2;
+                        replacement = e2;
+                    }
+                    // And keep going, so we can favor larger dislocations.
+                }
+            }
+            if (haveReplacement >= 0) {
+                if (cache[(replacementPos+1) & mask] != null) {
+                    // Be conservative, to avoid breaking up a non-null run.
+                    cache[replacementPos & mask] = (Entry<?>) Entry.DEAD_ENTRY;
+                } else {
+                    cache[replacementPos & mask] = null;
+                    cacheLoad -= 1;
+                }
+            }
+            return replacement;
+        }
+
+        /** Remove stale entries in the range near classValue. */
+        private void removeStaleEntries(ClassValue<?> classValue) {
+            removeStaleEntries(getCache(), classValue.hashCodeForCache, PROBE_LIMIT);
+        }
+
+        /** Remove all stale entries, everywhere. */
+        private void removeStaleEntries() {
+            Entry[] cache = getCache();
+            removeStaleEntries(cache, 0, cache.length + PROBE_LIMIT - 1);
         }
+
+        /** Add the given entry to the cache, in its home location, unless it is out of date. */
+        private <T> void addToCache(Entry<T> e) {
+            ClassValue<T> classValue = e.classValueOrNull();
+            if (classValue != null)
+                addToCache(classValue, e);
+        }
+
+        /** Add the given entry to the cache, in its home location. */
+        private <T> void addToCache(ClassValue<T> classValue, Entry<T> e) {
+            if (PROBE_LIMIT <= 0)  return;  // do not fill cache
+            // Add e to the cache.
+            Entry<?>[] cache = getCache();
+            int mask = (cache.length-1);
+            int home = classValue.hashCodeForCache & mask;
+            Entry<?> e2 = placeInCache(cache, home, e, false);
+            if (e2 == null)  return;  // done
+            if (PROBE_LIMIT > 1) {
+                // try to move e2 somewhere else in his probe range
+                int dis2 = entryDislocation(cache, home, e2);
+                int home2 = home - dis2;
+                for (int i2 = home2; i2 < home2 + PROBE_LIMIT; i2++) {
+                    if (placeInCache(cache, i2 & mask, e2, true) == null) {
+                        return;
+                    }
+                }
+            }
+            // Note:  At this point, e2 is just dropped from the cache.
+        }
+
+        /** Store the given entry.  Update cacheLoad, and return any live victim.
+         *  'Gently' means return self rather than dislocating a live victim.
+         */
+        private Entry<?> placeInCache(Entry<?>[] cache, int pos, Entry<?> e, boolean gently) {
+            Entry<?> e2 = overwrittenEntry(cache[pos]);
+            if (gently && e2 != null) {
+                // do not overwrite a live entry
+                return e;
+            } else {
+                cache[pos] = e;
+                return e2;
+            }
+        }
+
+        /** Note an entry that is about to be overwritten.
+         *  If it is not live, quietly replace it by null.
+         *  If it is an actual null, increment cacheLoad,
+         *  because the caller is going to store something
+         *  in its place.
+         */
+        private <T> Entry<T> overwrittenEntry(Entry<T> e2) {
+            if (e2 == null)  cacheLoad += 1;
+            else if (e2.isLive())  return e2;
+            return null;
+        }
+
+        /** Percent loading of cache before resize. */
+        private static final int CACHE_LOAD_LIMIT = 67;  // 0..100
+        /** Maximum number of probes to attempt. */
+        private static final int PROBE_LIMIT      =  6;       // 1..
+        // N.B.  Set PROBE_LIMIT=0 to disable all fast paths.
     }
 }
--- a/jdk/src/share/classes/java/lang/invoke/AdapterMethodHandle.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/lang/invoke/AdapterMethodHandle.java	Sat Jan 28 20:41:27 2012 -0800
@@ -378,6 +378,7 @@
     }
 
     /** Construct an adapter conversion descriptor for a single-argument conversion. */
+    @SuppressWarnings("cast")  // some (int) casts below provide clarity but trigger warnings
     private static long makeConv(int convOp, int argnum, int src, int dest) {
         assert(src  == (src  & CONV_TYPE_MASK));
         assert(dest == (dest & CONV_TYPE_MASK));
@@ -390,6 +391,7 @@
                 insertStackMove(stackMove)
                 );
     }
+    @SuppressWarnings("cast")  // some (int) casts below provide clarity but trigger warnings
     private static long makeDupConv(int convOp, int argnum, int stackMove) {
         // simple argument motion, requiring one slot to specify
         assert(convOp == OP_DUP_ARGS || convOp == OP_DROP_ARGS);
@@ -401,6 +403,7 @@
                 insertStackMove(stackMove)
                 );
     }
+    @SuppressWarnings("cast")  // some (int) casts below provide clarity but trigger warnings
     private static long makeSwapConv(int convOp, int srcArg, byte srcType, int destSlot, byte destType) {
         // more complex argument motion, requiring two slots to specify
         assert(convOp == OP_SWAP_ARGS || convOp == OP_ROT_ARGS);
@@ -411,6 +414,7 @@
                 (int)  destSlot << CONV_VMINFO_SHIFT
                 );
     }
+    @SuppressWarnings("cast")  // some (int) casts below provide clarity but trigger warnings
     private static long makeSpreadConv(int convOp, int argnum, int src, int dest, int stackMove) {
         // spreading or collecting, at a particular slot location
         assert(convOp == OP_SPREAD_ARGS || convOp == OP_COLLECT_ARGS || convOp == OP_FOLD_ARGS);
--- a/jdk/src/share/classes/java/lang/invoke/MemberName.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/lang/invoke/MemberName.java	Sat Jan 28 20:41:27 2012 -0800
@@ -353,7 +353,7 @@
         assert(isResolved());
     }
     /** Create a name for the given reflected constructor.  The resulting name will be in a resolved state. */
-    public MemberName(Constructor ctor) {
+    public MemberName(Constructor<?> ctor) {
         Object[] typeInfo = { void.class, ctor.getParameterTypes() };
         init(ctor.getDeclaringClass(), CONSTRUCTOR_NAME, typeInfo, flagsMods(IS_CONSTRUCTOR, ctor.getModifiers()));
         // fill in vmtarget, vmindex while we have ctor in hand:
--- a/jdk/src/share/classes/java/lang/invoke/MethodHandleImpl.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/lang/invoke/MethodHandleImpl.java	Sat Jan 28 20:41:27 2012 -0800
@@ -112,7 +112,7 @@
         assert(cookedConstructor.type().equals(ctype));
         ctype = ctype.dropParameterTypes(0, 1);
         cookedConstructor = AdapterMethodHandle.makeCollectArguments(cookedConstructor, returner, 0, true);
-        MethodHandle allocator = new AllocateObject(allocateClass);
+        AllocateObject allocator = new AllocateObject(allocateClass);
         // allocate() => new C(void)
         assert(allocator.type().equals(MethodType.methodType(allocateClass)));
         ctype = ctype.dropParameterTypes(0, 1);
@@ -120,19 +120,19 @@
         return fold;
     }
 
-    static final class AllocateObject<C> extends BoundMethodHandle {
+    static final class AllocateObject /*<C>*/ extends BoundMethodHandle {
         private static final Unsafe unsafe = Unsafe.getUnsafe();
 
-        private final Class<C> allocateClass;
+        private final Class<?> /*<C>*/ allocateClass;
 
         // for allocation only:
-        private AllocateObject(Class<C> allocateClass) {
+        private AllocateObject(Class<?> /*<C>*/ allocateClass) {
             super(ALLOCATE.asType(MethodType.methodType(allocateClass, AllocateObject.class)));
             this.allocateClass = allocateClass;
         }
         @SuppressWarnings("unchecked")
-        private C allocate() throws InstantiationException {
-            return (C) unsafe.allocateInstance(allocateClass);
+        private Object /*C*/ allocate() throws InstantiationException {
+            return unsafe.allocateInstance(allocateClass);
         }
         static final MethodHandle ALLOCATE;
         static {
@@ -148,8 +148,8 @@
     MethodHandle accessField(MemberName member, boolean isSetter,
                              Class<?> lookupClass) {
         // Use sun. misc.Unsafe to dig up the dirt on the field.
-        MethodHandle mh = new FieldAccessor(member, isSetter);
-        return mh;
+        FieldAccessor accessor = new FieldAccessor(member, isSetter);
+        return accessor;
     }
 
     static
@@ -175,7 +175,7 @@
         return mhs[isSetter ? 1 : 0];
     }
 
-    static final class FieldAccessor<C,V> extends BoundMethodHandle {
+    static final class FieldAccessor /*<C,V>*/ extends BoundMethodHandle {
         private static final Unsafe unsafe = Unsafe.getUnsafe();
         final Object base;  // for static refs only
         final long offset;
@@ -190,26 +190,24 @@
         @Override
         String debugString() { return addTypeString(name, this); }
 
-        int getFieldI(C obj) { return unsafe.getInt(obj, offset); }
-        void setFieldI(C obj, int x) { unsafe.putInt(obj, offset, x); }
-        long getFieldJ(C obj) { return unsafe.getLong(obj, offset); }
-        void setFieldJ(C obj, long x) { unsafe.putLong(obj, offset, x); }
-        float getFieldF(C obj) { return unsafe.getFloat(obj, offset); }
-        void setFieldF(C obj, float x) { unsafe.putFloat(obj, offset, x); }
-        double getFieldD(C obj) { return unsafe.getDouble(obj, offset); }
-        void setFieldD(C obj, double x) { unsafe.putDouble(obj, offset, x); }
-        boolean getFieldZ(C obj) { return unsafe.getBoolean(obj, offset); }
-        void setFieldZ(C obj, boolean x) { unsafe.putBoolean(obj, offset, x); }
-        byte getFieldB(C obj) { return unsafe.getByte(obj, offset); }
-        void setFieldB(C obj, byte x) { unsafe.putByte(obj, offset, x); }
-        short getFieldS(C obj) { return unsafe.getShort(obj, offset); }
-        void setFieldS(C obj, short x) { unsafe.putShort(obj, offset, x); }
-        char getFieldC(C obj) { return unsafe.getChar(obj, offset); }
-        void setFieldC(C obj, char x) { unsafe.putChar(obj, offset, x); }
-        @SuppressWarnings("unchecked")
-        V getFieldL(C obj) { return (V) unsafe.getObject(obj, offset); }
-        @SuppressWarnings("unchecked")
-        void setFieldL(C obj, V x) { unsafe.putObject(obj, offset, x); }
+        int getFieldI(Object /*C*/ obj) { return unsafe.getInt(obj, offset); }
+        void setFieldI(Object /*C*/ obj, int x) { unsafe.putInt(obj, offset, x); }
+        long getFieldJ(Object /*C*/ obj) { return unsafe.getLong(obj, offset); }
+        void setFieldJ(Object /*C*/ obj, long x) { unsafe.putLong(obj, offset, x); }
+        float getFieldF(Object /*C*/ obj) { return unsafe.getFloat(obj, offset); }
+        void setFieldF(Object /*C*/ obj, float x) { unsafe.putFloat(obj, offset, x); }
+        double getFieldD(Object /*C*/ obj) { return unsafe.getDouble(obj, offset); }
+        void setFieldD(Object /*C*/ obj, double x) { unsafe.putDouble(obj, offset, x); }
+        boolean getFieldZ(Object /*C*/ obj) { return unsafe.getBoolean(obj, offset); }
+        void setFieldZ(Object /*C*/ obj, boolean x) { unsafe.putBoolean(obj, offset, x); }
+        byte getFieldB(Object /*C*/ obj) { return unsafe.getByte(obj, offset); }
+        void setFieldB(Object /*C*/ obj, byte x) { unsafe.putByte(obj, offset, x); }
+        short getFieldS(Object /*C*/ obj) { return unsafe.getShort(obj, offset); }
+        void setFieldS(Object /*C*/ obj, short x) { unsafe.putShort(obj, offset, x); }
+        char getFieldC(Object /*C*/ obj) { return unsafe.getChar(obj, offset); }
+        void setFieldC(Object /*C*/ obj, char x) { unsafe.putChar(obj, offset, x); }
+        Object /*V*/ getFieldL(Object /*C*/ obj) { return unsafe.getObject(obj, offset); }
+        void setFieldL(Object /*C*/ obj, Object /*V*/ x) { unsafe.putObject(obj, offset, x); }
         // cast (V) is OK here, since we wrap convertArguments around the MH.
 
         static Object staticBase(final MemberName field) {
@@ -244,8 +242,9 @@
         void setStaticS(short x) { unsafe.putShort(base, offset, x); }
         char getStaticC() { return unsafe.getChar(base, offset); }
         void setStaticC(char x) { unsafe.putChar(base, offset, x); }
-        V getStaticL() { return (V) unsafe.getObject(base, offset); }
-        void setStaticL(V x) { unsafe.putObject(base, offset, x); }
+        @SuppressWarnings("unchecked")  // (V) is for internal clarity but triggers warning
+        Object /*V*/ getStaticL() { return unsafe.getObject(base, offset); }
+        void setStaticL(Object /*V*/ x) { unsafe.putObject(base, offset, x); }
 
         static String fname(Class<?> vclass, boolean isSetter, boolean isStatic) {
             String stem;
--- a/jdk/src/share/classes/java/lang/invoke/MethodHandleProxies.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/lang/invoke/MethodHandleProxies.java	Sat Jan 28 20:41:27 2012 -0800
@@ -150,7 +150,7 @@
         }
         return intfc.cast(Proxy.newProxyInstance(
                 intfc.getClassLoader(),
-                new Class[]{ intfc, WrapperInstance.class },
+                new Class<?>[]{ intfc, WrapperInstance.class },
                 new InvocationHandler() {
                     private Object getArg(String name) {
                         if ((Object)name == "getWrapperInstanceTarget")  return target;
--- a/jdk/src/share/classes/java/lang/invoke/MethodHandles.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/java/lang/invoke/MethodHandles.java	Sat Jan 28 20:41:27 2012 -0800
@@ -948,10 +948,11 @@
         public MethodHandle unreflect(Method m) throws IllegalAccessException {
             MemberName method = new MemberName(m);
             assert(method.isMethod());
-            if (!m.isAccessible())  checkMethod(method.getDeclaringClass(), method, method.isStatic());
+            if (m.isAccessible())
+                return MethodHandleImpl.findMethod(method, true, /*no lookupClass*/ null);
+            checkMethod(method.getDeclaringClass(), method, method.isStatic());
             MethodHandle mh = MethodHandleImpl.findMethod(method, true, lookupClassOrNull());
-            if (!m.isAccessible())  mh = restrictProtectedReceiver(method, mh);
-            return mh;
+            return restrictProtectedReceiver(method, mh);
         }
 
         /**
@@ -1006,11 +1007,17 @@
          *                                is set and {@code asVarargsCollector} fails
          * @throws NullPointerException if the argument is null
          */
+        @SuppressWarnings("rawtypes")  // Will be Constructor<?> after JSR 292 MR
         public MethodHandle unreflectConstructor(Constructor c) throws IllegalAccessException {
             MemberName ctor = new MemberName(c);
             assert(ctor.isConstructor());
-            if (!c.isAccessible())  checkAccess(c.getDeclaringClass(), ctor);
-            MethodHandle rawCtor = MethodHandleImpl.findMethod(ctor, false, lookupClassOrNull());
+            MethodHandle rawCtor;
+            if (c.isAccessible()) {
+                rawCtor = MethodHandleImpl.findMethod(ctor, false, /*no lookupClass*/ null);
+            } else {
+                checkAccess(c.getDeclaringClass(), ctor);
+                rawCtor = MethodHandleImpl.findMethod(ctor, false, lookupClassOrNull());
+            }
             MethodHandle allocator = MethodHandleImpl.makeAllocator(rawCtor);
             return fixVarargs(allocator, rawCtor);
         }
@@ -1225,7 +1232,7 @@
                                                 ? "expected a static field"
                                                 : "expected a non-static field", this);
             if (trusted)
-                return MethodHandleImpl.accessField(field, isSetter, lookupClassOrNull());
+                return MethodHandleImpl.accessField(field, isSetter, /*no lookupClass*/ null);
             checkAccess(refc, field);
             MethodHandle mh = MethodHandleImpl.accessField(field, isSetter, lookupClassOrNull());
             return restrictProtectedReceiver(field, mh);
--- a/jdk/src/share/classes/javax/swing/plaf/basic/BasicTreeUI.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/javax/swing/plaf/basic/BasicTreeUI.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1932,20 +1932,22 @@
             else {
                 Rectangle   beginRect = getPathBounds(tree, getPathForRow
                                                       (tree, beginRow));
-                Rectangle   visRect = tree.getVisibleRect();
-                Rectangle   testRect = beginRect;
-                int         beginY = beginRect.y;
-                int         maxY = beginY + visRect.height;
-
-                for(int counter = beginRow + 1; counter <= endRow; counter++) {
-                    testRect = getPathBounds(tree,
-                                             getPathForRow(tree, counter));
-                    if((testRect.y + testRect.height) > maxY)
-                        counter = endRow;
+                if (beginRect != null) {
+                    Rectangle   visRect = tree.getVisibleRect();
+                    Rectangle   testRect = beginRect;
+                    int         beginY = beginRect.y;
+                    int         maxY = beginY + visRect.height;
+
+                    for(int counter = beginRow + 1; counter <= endRow; counter++) {
+                            testRect = getPathBounds(tree,
+                                    getPathForRow(tree, counter));
+                        if((testRect.y + testRect.height) > maxY)
+                                counter = endRow;
+                            }
+                        tree.scrollRectToVisible(new Rectangle(visRect.x, beginY, 1,
+                                                      testRect.y + testRect.height-
+                                                      beginY));
                 }
-                tree.scrollRectToVisible(new Rectangle(visRect.x, beginY, 1,
-                                                  testRect.y + testRect.height-
-                                                  beginY));
             }
         }
     }
@@ -3485,7 +3487,7 @@
             }
 
             Rectangle bounds = getPathBounds(tree, path);
-            if (y > (bounds.y + bounds.height)) {
+            if (bounds == null || y > (bounds.y + bounds.height)) {
                 return false;
             }
 
--- a/jdk/src/share/classes/sun/invoke/util/ValueConversions.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/sun/invoke/util/ValueConversions.java	Sat Jan 28 20:41:27 2012 -0800
@@ -55,9 +55,9 @@
     private static final Lookup IMPL_LOOKUP = MethodHandles.lookup();
 
     private static EnumMap<Wrapper, MethodHandle>[] newWrapperCaches(int n) {
-        @SuppressWarnings("unchecked")
+        @SuppressWarnings("unchecked")  // generic array creation
         EnumMap<Wrapper, MethodHandle>[] caches
-                = (EnumMap<Wrapper, MethodHandle>[]) new EnumMap[n];  // unchecked warning expected here
+                = (EnumMap<Wrapper, MethodHandle>[]) new EnumMap<?,?>[n];
         for (int i = 0; i < n; i++)
             caches[i] = new EnumMap<>(Wrapper.class);
         return caches;
@@ -1097,7 +1097,7 @@
     }
 
     private static MethodHandle buildNewArray(int nargs) {
-        return MethodHandles.insertArguments(NEW_ARRAY, 0, (int) nargs);
+        return MethodHandles.insertArguments(NEW_ARRAY, 0, nargs);
     }
 
     private static final MethodHandle[] FILLERS = new MethodHandle[MAX_ARITY+1];
@@ -1122,7 +1122,7 @@
         }
         MethodHandle leftFill = filler(leftLen);  // recursive fill
         MethodHandle rightFill = FILL_ARRAYS[rightLen];
-        rightFill = MethodHandles.insertArguments(rightFill, 1, (int) leftLen);  // [leftLen..nargs-1]
+        rightFill = MethodHandles.insertArguments(rightFill, 1, leftLen);  // [leftLen..nargs-1]
 
         // Combine the two fills: right(left(newArray(nargs), x1..x20), x21..x23)
         MethodHandle mh = filler(0);  // identity function produces result
--- a/jdk/src/share/classes/sun/invoke/util/Wrapper.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/share/classes/sun/invoke/util/Wrapper.java	Sat Jan 28 20:41:27 2012 -0800
@@ -31,7 +31,7 @@
     BYTE(Byte.class, byte.class, 'B', (Byte)(byte)0, new byte[0], Format.signed(8)),
     SHORT(Short.class, short.class, 'S', (Short)(short)0, new short[0], Format.signed(16)),
     CHAR(Character.class, char.class, 'C', (Character)(char)0, new char[0], Format.unsigned(16)),
-    INT(Integer.class, int.class, 'I', (Integer)(int)0, new int[0], Format.signed(32)),
+    INT(Integer.class, int.class, 'I', (Integer)/*(int)*/0, new int[0], Format.signed(32)),
     LONG(Long.class, long.class, 'J', (Long)(long)0, new long[0], Format.signed(64)),
     FLOAT(Float.class, float.class, 'F', (Float)(float)0, new float[0], Format.floating(32)),
     DOUBLE(Double.class, double.class, 'D', (Double)(double)0, new double[0], Format.floating(64)),
@@ -539,7 +539,7 @@
         switch (basicTypeChar) {
             case 'L': throw newIllegalArgumentException("cannot wrap to object type");
             case 'V': return null;
-            case 'I': return Integer.valueOf((int)x);
+            case 'I': return Integer.valueOf(x);
             case 'J': return Long.valueOf(x);
             case 'F': return Float.valueOf(x);
             case 'D': return Double.valueOf(x);
--- a/jdk/src/solaris/classes/sun/awt/X11/GtkFileDialogPeer.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/solaris/classes/sun/awt/X11/GtkFileDialogPeer.java	Sat Jan 28 20:41:27 2012 -0800
@@ -76,9 +76,12 @@
             accessor.setFiles(fd, null, null);
         } else {
             // Fix 6987233: add the trailing slash if it's absent
-            accessor.setDirectory(fd, directory +
-                    (directory.endsWith(File.separator) ?
-                     "" : File.separator));
+            String with_separator = directory;
+            if (directory != null) {
+                with_separator = directory.endsWith(File.separator) ?
+                        directory : (directory + File.separator);
+            }
+            accessor.setDirectory(fd, with_separator);
             accessor.setFile(fd, filenames[0]);
             accessor.setFiles(fd, directory, filenames);
         }
--- a/jdk/src/solaris/classes/sun/java2d/xr/XRCompositeManager.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/src/solaris/classes/sun/java2d/xr/XRCompositeManager.java	Sat Jan 28 20:41:27 2012 -0800
@@ -28,6 +28,9 @@
 import java.awt.*;
 import java.awt.geom.*;
 
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+
 import sun.font.*;
 import sun.java2d.*;
 import sun.java2d.jules.*;
@@ -83,7 +86,13 @@
         con = new XRBackendNative();
         // con = XRBackendJava.getInstance();
 
-        String gradProp = System.getProperty("sun.java2d.xrgradcache");
+        String gradProp =
+            AccessController.doPrivileged(new PrivilegedAction<String>() {
+                public String run() {
+                    return System.getProperty("sun.java2d.xrgradcache");
+                }
+            });
+
         enableGradCache = gradProp == null ||
                           !(gradProp.equalsIgnoreCase("false") ||
                           gradProp.equalsIgnoreCase("f"));
--- a/jdk/test/java/lang/invoke/CallSiteTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/CallSiteTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -43,7 +43,7 @@
 import static java.lang.invoke.MethodType.*;
 
 public class CallSiteTest {
-    private final static Class CLASS = CallSiteTest.class;
+    private final static Class<?> CLASS = CallSiteTest.class;
 
     private static CallSite mcs;
     private static CallSite vcs;
--- a/jdk/test/java/lang/invoke/ClassValueTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/ClassValueTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -38,10 +38,6 @@
 
 package test.java.lang.invoke;
 
-import java.util.*;
-
-import java.lang.invoke.*;
-
 import org.junit.*;
 import static org.junit.Assert.*;
 
@@ -61,7 +57,7 @@
         }
     }
 
-    static final Class[] CLASSES = {
+    static final Class<?>[] CLASSES = {
         String.class,
         Integer.class,
         int.class,
@@ -73,11 +69,11 @@
     @Test
     public void testGet() {
         countForCV1 = 0;
-        for (Class c : CLASSES) {
+        for (Class<?> c : CLASSES) {
             assertEquals(nameForCV1(c), CV1.get(c));
         }
         assertEquals(CLASSES.length, countForCV1);
-        for (Class c : CLASSES) {
+        for (Class<?> c : CLASSES) {
             assertEquals(nameForCV1(c), CV1.get(c));
         }
         assertEquals(CLASSES.length, countForCV1);
@@ -85,7 +81,7 @@
 
     @Test
     public void testRemove() {
-        for (Class c : CLASSES) {
+        for (Class<?> c : CLASSES) {
             CV1.get(c);
         }
         countForCV1 = 0;
@@ -94,7 +90,7 @@
             CV1.remove(CLASSES[i]);
         }
         assertEquals(0, countForCV1);  // no change
-        for (Class c : CLASSES) {
+        for (Class<?> c : CLASSES) {
             assertEquals(nameForCV1(c), CV1.get(c));
         }
         assertEquals(REMCOUNT, countForCV1);
@@ -124,7 +120,7 @@
         for (int pass = 0; pass <= 2; pass++) {
             for (int i1 = 0; i1 < CVN_COUNT1; i1++) {
                 eachClass:
-                for (Class c : CLASSES) {
+                for (Class<?> c : CLASSES) {
                     for (int i2 = 0; i2 < CVN_COUNT2; i2++) {
                         int n = i1*CVN_COUNT2 + i2;
                         assertEquals(0, countForCVN);
@@ -156,8 +152,10 @@
             }
         }
         assertEquals(countForCVN, 0);
-        for (int n = 0; n < cvns.length; n++) {
-            for (Class c : CLASSES) {
+        System.out.println("[rechecking values]");
+        for (int i = 0; i < cvns.length * 10; i++) {
+            int n = i % cvns.length;
+            for (Class<?> c : CLASSES) {
                 assertEquals(nameForCVN(c, n), cvns[n].get(c));
             }
         }
--- a/jdk/test/java/lang/invoke/InvokeGenericTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/InvokeGenericTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -45,6 +45,7 @@
  *
  * @author jrose
  */
+@SuppressWarnings("cast")  // various casts help emphasize arguments to invokeExact
 public class InvokeGenericTest {
     // How much output?
     static int verbosity = 0;
@@ -129,7 +130,7 @@
         }
     }
 
-    static List<Object> calledLog = new ArrayList<Object>();
+    static List<Object> calledLog = new ArrayList<>();
     static Object logEntry(String name, Object... args) {
         return Arrays.asList(name, Arrays.asList(args));
     }
@@ -237,8 +238,7 @@
         else
             try {
                 return param.newInstance();
-            } catch (InstantiationException ex) {
-            } catch (IllegalAccessException ex) {
+            } catch (InstantiationException | IllegalAccessException ex) {
             }
         return null;  // random class not Object, String, Integer, etc.
     }
@@ -274,9 +274,11 @@
         return zeroArgs(params.toArray(new Class<?>[0]));
     }
 
+    @SafeVarargs @SuppressWarnings("varargs")
     static <T, E extends T> T[] array(Class<T[]> atype, E... a) {
         return Arrays.copyOf(a, a.length, atype);
     }
+    @SafeVarargs @SuppressWarnings("varargs")
     static <T> T[] cat(T[] a, T... b) {
         int alen = a.length, blen = b.length;
         if (blen == 0)  return a;
@@ -311,7 +313,7 @@
             int beg, int end, Class<?> argType) {
         MethodType targetType = target.type();
         end = Math.min(end, targetType.parameterCount());
-        ArrayList<Class<?>> argTypes = new ArrayList<Class<?>>(targetType.parameterList());
+        ArrayList<Class<?>> argTypes = new ArrayList<>(targetType.parameterList());
         Collections.fill(argTypes.subList(beg, end), argType);
         MethodType ttype2 = MethodType.methodType(targetType.returnType(), argTypes);
         return target.asType(ttype2);
@@ -320,7 +322,7 @@
     // This lookup is good for all members in and under InvokeGenericTest.
     static final Lookup LOOKUP = MethodHandles.lookup();
 
-    Map<List<Class<?>>, MethodHandle> CALLABLES = new HashMap<List<Class<?>>, MethodHandle>();
+    Map<List<Class<?>>, MethodHandle> CALLABLES = new HashMap<>();
     MethodHandle callable(List<Class<?>> params) {
         MethodHandle mh = CALLABLES.get(params);
         if (mh == null) {
@@ -353,8 +355,8 @@
         countTest();
         String[] args = { "one", "two" };
         MethodHandle mh = callable(Object.class, String.class);
-        Object res; List resl;
-        res = resl = (List) mh.invoke((String)args[0], (Object)args[1]);
+        Object res; List<?> resl;
+        res = resl = (List<?>) mh.invoke((String)args[0], (Object)args[1]);
         //System.out.println(res);
         assertEquals(Arrays.asList(args), res);
     }
@@ -365,8 +367,8 @@
         countTest();
         int[] args = { 1, 2 };
         MethodHandle mh = callable(Object.class, Object.class);
-        Object res; List resl;
-        res = resl = (List) mh.invoke(args[0], args[1]);
+        Object res; List<?> resl;
+        res = resl = (List<?>) mh.invoke(args[0], args[1]);
         //System.out.println(res);
         assertEquals(Arrays.toString(args), res.toString());
     }
@@ -377,8 +379,8 @@
         countTest();
         String[] args = { "one", "two" };
         MethodHandle mh = callable(Object.class, String.class);
-        Object res; List resl;
-        res = resl = (List) mh.invoke((String)args[0], (Object)args[1]);
+        Object res; List<?> resl;
+        res = resl = (List<?>) mh.invoke((String)args[0], (Object)args[1]);
         //System.out.println(res);
         assertEquals(Arrays.asList(args), res);
     }
@@ -440,9 +442,9 @@
      *  A void return type is possible iff the first type is void.class.
      */
     static List<MethodType> allMethodTypes(int minargc, int maxargc, Class<?>... types) {
-        ArrayList<MethodType> result = new ArrayList<MethodType>();
+        ArrayList<MethodType> result = new ArrayList<>();
         if (types.length > 0) {
-            ArrayList<MethodType> argcTypes = new ArrayList<MethodType>();
+            ArrayList<MethodType> argcTypes = new ArrayList<>();
             // build arity-zero types first
             for (Class<?> rtype : types) {
                 argcTypes.add(MethodType.methodType(rtype));
@@ -456,7 +458,7 @@
                 if (argc >= maxargc)
                     break;
                 ArrayList<MethodType> prevTypes = argcTypes;
-                argcTypes = new ArrayList<MethodType>();
+                argcTypes = new ArrayList<>();
                 for (MethodType prevType : prevTypes) {
                     for (Class<?> ptype : types) {
                         argcTypes.add(prevType.insertParameterTypes(argc, ptype));
@@ -524,8 +526,8 @@
         countTest();
         Object[] args = { 1, 2 };
         MethodHandle mh = callable(Object.class, int.class);
-        Object res; List resl; int resi;
-        res = resl = (List) mh.invoke((int)args[0], (Object)args[1]);
+        Object res; List<?> resl; int resi;
+        res = resl = (List<?>) mh.invoke((int)args[0], (Object)args[1]);
         //System.out.println(res);
         assertEquals(Arrays.asList(args), res);
         mh = MethodHandles.identity(int.class);
--- a/jdk/test/java/lang/invoke/JavaDocExamplesTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/JavaDocExamplesTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -54,6 +54,7 @@
 /**
  * @author jrose
  */
+@SuppressWarnings("LocalVariableHidesMemberVariable")
 public class JavaDocExamplesTest {
     /** Wrapper for running the JUnit tests in this module.
      *  Put JUnit on the classpath!
@@ -336,6 +337,7 @@
             }}
     }
 
+    @SuppressWarnings("rawtypes")
     @Test public void testAsVarargsCollector() throws Throwable {
         {{
 {} /// JAVADOC
--- a/jdk/test/java/lang/invoke/MethodHandlesTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/MethodHandlesTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -176,7 +176,7 @@
         }
     }
 
-    static List<Object> calledLog = new ArrayList<Object>();
+    static List<Object> calledLog = new ArrayList<>();
     static Object logEntry(String name, Object... args) {
         return Arrays.asList(name, Arrays.asList(args));
     }
@@ -211,6 +211,7 @@
         return dst.cast(value);
     }
 
+    @SuppressWarnings("cast")  // primitive cast to (long) is part of the pattern
     static Object castToWrapperOrNull(long value, Class<?> dst) {
         if (dst == int.class || dst == Integer.class)
             return (int)(value);
@@ -284,8 +285,7 @@
         else
             try {
                 return param.newInstance();
-            } catch (InstantiationException ex) {
-            } catch (IllegalAccessException ex) {
+            } catch (InstantiationException | IllegalAccessException ex) {
             }
         return null;  // random class not Object, String, Integer, etc.
     }
@@ -302,9 +302,11 @@
         return args;
     }
 
+    @SafeVarargs @SuppressWarnings("varargs")
     static <T, E extends T> T[] array(Class<T[]> atype, E... a) {
         return Arrays.copyOf(a, a.length, atype);
     }
+    @SafeVarargs @SuppressWarnings("varargs")
     static <T> T[] cat(T[] a, T... b) {
         int alen = a.length, blen = b.length;
         if (blen == 0)  return a;
@@ -354,14 +356,14 @@
                 try {
                     LIST_TO_STRING = PRIVATE.findStatic(PRIVATE.lookupClass(), "listToString",
                                                         MethodType.methodType(String.class, List.class));
-                } catch (Exception ex) { throw new RuntimeException(ex); }
+                } catch (NoSuchMethodException | IllegalAccessException ex) { throw new RuntimeException(ex); }
             list = MethodHandles.filterReturnValue(list, LIST_TO_STRING);
         } else if (rtype.isPrimitive()) {
             if (LIST_TO_INT == null)
                 try {
                     LIST_TO_INT = PRIVATE.findStatic(PRIVATE.lookupClass(), "listToInt",
                                                      MethodType.methodType(int.class, List.class));
-                } catch (Exception ex) { throw new RuntimeException(ex); }
+                } catch (NoSuchMethodException | IllegalAccessException ex) { throw new RuntimeException(ex); }
             list = MethodHandles.filterReturnValue(list, LIST_TO_INT);
             list = MethodHandles.explicitCastArguments(list, listType);
         } else {
@@ -370,8 +372,8 @@
         return list.asType(listType);
     }
     private static MethodHandle LIST_TO_STRING, LIST_TO_INT;
-    private static String listToString(List x) { return x.toString(); }
-    private static int listToInt(List x) { return x.toString().hashCode(); }
+    private static String listToString(List<?> x) { return x.toString(); }
+    private static int listToInt(List<?> x) { return x.toString().hashCode(); }
 
     static MethodHandle changeArgTypes(MethodHandle target, Class<?> argType) {
         return changeArgTypes(target, 0, 999, argType);
@@ -380,7 +382,7 @@
             int beg, int end, Class<?> argType) {
         MethodType targetType = target.type();
         end = Math.min(end, targetType.parameterCount());
-        ArrayList<Class<?>> argTypes = new ArrayList<Class<?>>(targetType.parameterList());
+        ArrayList<Class<?>> argTypes = new ArrayList<>(targetType.parameterList());
         Collections.fill(argTypes.subList(beg, end), argType);
         MethodType ttype2 = MethodType.methodType(targetType.returnType(), argTypes);
         return target.asType(ttype2);
@@ -405,6 +407,7 @@
         final String name;
         public Example() { name = "Example#"+nextArg(); }
         protected Example(String name) { this.name = name; }
+        @SuppressWarnings("LeakingThisInConstructor")
         protected Example(int x) { this(); called("protected <init>", this, x); }
         @Override public String toString() { return name; }
 
@@ -441,6 +444,7 @@
     static class SubExample extends Example {
         @Override public void  v0()     { called("Sub/v0", this); }
         @Override void         pkg_v0() { called("Sub/pkg_v0", this); }
+        @SuppressWarnings("LeakingThisInConstructor")
         private      SubExample(int x)  { called("<init>", this, x); }
         public SubExample() { super("SubExample#"+nextArg()); }
     }
@@ -912,7 +916,7 @@
 
         static final Object[][] CASES;
         static {
-            ArrayList<Object[]> cases = new ArrayList<Object[]>();
+            ArrayList<Object[]> cases = new ArrayList<>();
             Object types[][] = {
                 {'L',Object.class}, {'R',String.class},
                 {'I',int.class}, {'J',long.class},
@@ -931,12 +935,12 @@
                     Field field;
                         try {
                         field = HasFields.class.getDeclaredField(name);
-                    } catch (Exception ex) {
+                    } catch (NoSuchFieldException | SecurityException ex) {
                         throw new InternalError("no field HasFields."+name);
                     }
                     try {
                         value = field.get(fields);
-                    } catch (Exception ex) {
+                    } catch (IllegalArgumentException | IllegalAccessException ex) {
                         throw new InternalError("cannot fetch field HasFields."+name);
                     }
                     if (type == float.class) {
@@ -1257,7 +1261,7 @@
 
     List<Object> array2list(Object array) {
         int length = Array.getLength(array);
-        ArrayList<Object> model = new ArrayList<Object>(length);
+        ArrayList<Object> model = new ArrayList<>(length);
         for (int i = 0; i < length; i++)
             model.add(Array.get(array, i));
         return model;
@@ -1288,7 +1292,7 @@
             String name = pfx+"id";
             try {
                 return PRIVATE.findStatic(Callee.class, name, type);
-            } catch (Exception ex) {
+            } catch (NoSuchMethodException | IllegalAccessException ex) {
                 throw new RuntimeException(ex);
             }
         }
@@ -1365,7 +1369,7 @@
         MethodHandle vac = vac0.asVarargsCollector(Object[].class);
         testConvert(true, vac.asType(MethodType.genericMethodType(0)), null, "vac");
         testConvert(true, vac.asType(MethodType.genericMethodType(0)), null, "vac");
-        for (Class<?> at : new Class[] { Object.class, String.class, Integer.class }) {
+        for (Class<?> at : new Class<?>[] { Object.class, String.class, Integer.class }) {
             testConvert(true, vac.asType(MethodType.genericMethodType(1)), null, "vac", at);
             testConvert(true, vac.asType(MethodType.genericMethodType(2)), null, "vac", at, at);
         }
@@ -1514,7 +1518,7 @@
     public void testSpreadArguments() throws Throwable {
         if (CAN_SKIP_WORKING)  return;
         startTest("spreadArguments");
-        for (Class<?> argType : new Class[]{Object.class, Integer.class, int.class}) {
+        for (Class<?> argType : new Class<?>[]{Object.class, Integer.class, int.class}) {
             if (verbosity >= 3)
                 System.out.println("spreadArguments "+argType);
             for (int nargs = 0; nargs < 50; nargs++) {
@@ -1538,7 +1542,7 @@
         Object[] args = randomArgs(target2.type().parameterArray());
         // make sure the target does what we think it does:
         if (pos == 0 && nargs < 5 && !argType.isPrimitive()) {
-            Object[] check = (Object[]) (Object) target.invokeWithArguments(args);
+            Object[] check = (Object[]) target.invokeWithArguments(args);
             assertArrayEquals(args, check);
             switch (nargs) {
                 case 0:
@@ -1555,7 +1559,7 @@
                     break;
             }
         }
-        List<Class<?>> newParams = new ArrayList<Class<?>>(target2.type().parameterList());
+        List<Class<?>> newParams = new ArrayList<>(target2.type().parameterList());
         {   // modify newParams in place
             List<Class<?>> spreadParams = newParams.subList(pos, nargs);
             spreadParams.clear(); spreadParams.add(arrayType);
@@ -1608,7 +1612,7 @@
     public void testCollectArguments() throws Throwable {
         if (CAN_SKIP_WORKING)  return;
         startTest("collectArguments");
-        for (Class<?> argType : new Class[]{Object.class, Integer.class, int.class}) {
+        for (Class<?> argType : new Class<?>[]{Object.class, Integer.class, int.class}) {
             if (verbosity >= 3)
                 System.out.println("collectArguments "+argType);
             for (int nargs = 0; nargs < 50; nargs++) {
@@ -1670,12 +1674,13 @@
         MethodHandle target = varargsArray(nargs + ins);
         Object[] args = randomArgs(target.type().parameterArray());
         List<Object> resList = Arrays.asList(args);
-        List<Object> argsToPass = new ArrayList<Object>(resList);
+        List<Object> argsToPass = new ArrayList<>(resList);
         List<Object> argsToInsert = argsToPass.subList(pos, pos + ins);
         if (verbosity >= 3)
             System.out.println("insert: "+argsToInsert+" into "+target);
+        @SuppressWarnings("cast")  // cast to spread Object... is helpful
         MethodHandle target2 = MethodHandles.insertArguments(target, pos,
-                (Object[]) argsToInsert.toArray());
+                (Object[]/*...*/) argsToInsert.toArray());
         argsToInsert.clear();  // remove from argsToInsert
         Object res2 = target2.invokeWithArguments(argsToPass);
         Object res2List = Arrays.asList((Object[])res2);
@@ -1693,7 +1698,7 @@
         Class<?> classOfVCList = varargsList(1).invokeWithArguments(0).getClass();
         assertTrue(List.class.isAssignableFrom(classOfVCList));
         for (int nargs = 0; nargs <= 3; nargs++) {
-            for (Class<?> rtype : new Class[] { Object.class,
+            for (Class<?> rtype : new Class<?>[] { Object.class,
                                                 List.class,
                                                 int.class,
                                                 byte.class,
@@ -1790,7 +1795,7 @@
             System.out.println("fold "+target+" with "+combine);
         MethodHandle target2 = MethodHandles.foldArguments(target, combine);
         // Simulate expected effect of combiner on arglist:
-        List<Object> expected = new ArrayList<Object>(argsToPass);
+        List<Object> expected = new ArrayList<>(argsToPass);
         List<Object> argsToFold = expected.subList(pos, pos + fold);
         if (verbosity >= 3)
             System.out.println("fold: "+argsToFold+" into "+target2);
@@ -1822,9 +1827,9 @@
         MethodHandle target = varargsArray(nargs);
         Object[] args = randomArgs(target.type().parameterArray());
         MethodHandle target2 = MethodHandles.dropArguments(target, pos,
-                Collections.nCopies(drop, Object.class).toArray(new Class[0]));
+                Collections.nCopies(drop, Object.class).toArray(new Class<?>[0]));
         List<Object> resList = Arrays.asList(args);
-        List<Object> argsToDrop = new ArrayList<Object>(resList);
+        List<Object> argsToDrop = new ArrayList<>(resList);
         for (int i = drop; i > 0; i--) {
             argsToDrop.add(pos, "blort#"+i);
         }
@@ -1840,11 +1845,11 @@
         if (CAN_SKIP_WORKING)  return;
         startTest("exactInvoker, genericInvoker, varargsInvoker, dynamicInvoker");
         // exactInvoker, genericInvoker, varargsInvoker[0..N], dynamicInvoker
-        Set<MethodType> done = new HashSet<MethodType>();
+        Set<MethodType> done = new HashSet<>();
         for (int i = 0; i <= 6; i++) {
             if (CAN_TEST_LIGHTLY && i > 3)  break;
             MethodType gtype = MethodType.genericMethodType(i);
-            for (Class<?> argType : new Class[]{Object.class, Integer.class, int.class}) {
+            for (Class<?> argType : new Class<?>[]{Object.class, Integer.class, int.class}) {
                 for (int j = -1; j < i; j++) {
                     MethodType type = gtype;
                     if (j < 0)
@@ -1873,7 +1878,7 @@
         assertTrue(target.isVarargsCollector());
         target = target.asType(type);
         Object[] args = randomArgs(type.parameterArray());
-        List<Object> targetPlusArgs = new ArrayList<Object>(Arrays.asList(args));
+        List<Object> targetPlusArgs = new ArrayList<>(Arrays.asList(args));
         targetPlusArgs.add(0, target);
         int code = (Integer) invokee(args);
         Object log = logEntry("invokee", args);
@@ -1960,7 +1965,7 @@
                                   .appendParameterTypes(Object[].class)
                                   .insertParameterTypes(0, MethodHandle.class));
             assertEquals(expType, inv.type());
-            List<Object> targetPlusVarArgs = new ArrayList<Object>(targetPlusArgs);
+            List<Object> targetPlusVarArgs = new ArrayList<>(targetPlusArgs);
             List<Object> tailList = targetPlusVarArgs.subList(1+k, 1+nargs);
             Object[] tail = tailList.toArray();
             tailList.clear(); tailList.add(tail);
@@ -2191,7 +2196,7 @@
         if (throwMode == THROW_NOTHING) {
             assertSame(arg0, returned);
         } else if (throwMode == THROW_CAUGHT) {
-            List<Object> catchArgs = new ArrayList<Object>(Arrays.asList(args));
+            List<Object> catchArgs = new ArrayList<>(Arrays.asList(args));
             // catcher receives an initial subsequence of target arguments:
             catchArgs.subList(nargs - catchDrops, nargs).clear();
             // catcher also receives the exception, prepended:
@@ -2317,12 +2322,13 @@
                 INT_IDENTITY = PRIVATE.findStatic(
                     Surprise.class, "intIdentity",
                         MethodType.methodType(int.class, int.class));
-            } catch (Exception ex) {
+            } catch (NoSuchMethodException | IllegalAccessException ex) {
                 throw new RuntimeException(ex);
             }
         }
     }
 
+    @SuppressWarnings("ConvertToStringSwitch")
     void testCastFailure(String mode, int okCount) throws Throwable {
         countTest(false);
         if (verbosity > 2)  System.out.println("mode="+mode);
@@ -2418,13 +2424,14 @@
     }
     public interface Fooable {
         // overloads:
-        Object foo(Object x, String y);
-        List   foo(String x, int y);
-        Object foo(String x);
+        Object  foo(Object x, String y);
+        List<?> foo(String x, int y);
+        Object  foo(String x);
     }
     static Object fooForFooable(String x, Object... y) {
         return called("fooForFooable/"+x, y);
     }
+    @SuppressWarnings("serial")  // not really a public API, just a test case
     public static class MyCheckedException extends Exception {
     }
     public interface WillThrow {
@@ -2453,7 +2460,7 @@
         {
             countTest();
             if (verbosity >= 2)  System.out.println("Appendable");
-            ArrayList<List> appendResults = new ArrayList<List>();
+            ArrayList<List<?>> appendResults = new ArrayList<>();
             MethodHandle append = lookup.bind(appendResults, "add", MethodType.methodType(boolean.class, Object.class));
             append = append.asType(MethodType.methodType(void.class, List.class)); // specialize the type
             MethodHandle asList = lookup.findStatic(Arrays.class, "asList", MethodType.methodType(List.class, Object[].class));
@@ -2475,11 +2482,11 @@
             formatter.format(fmt, fmtArgs);
             String actual = "";
             if (verbosity >= 3)  System.out.println("appendResults="+appendResults);
-            for (List l : appendResults) {
+            for (List<?> l : appendResults) {
                 Object x = l.get(0);
                 switch (l.size()) {
                 case 1:  actual += x; continue;
-                case 3:  actual += ((String)x).substring((int)l.get(1), (int)l.get(2)); continue;
+                case 3:  actual += ((String)x).substring((int)(Object)l.get(1), (int)(Object)l.get(2)); continue;
                 }
                 actual += l;
             }
@@ -2551,7 +2558,7 @@
             }
         }
         // Test error checking on bad interfaces:
-        for (Class<?> nonSMI : new Class[] { Object.class,
+        for (Class<?> nonSMI : new Class<?>[] { Object.class,
                                              String.class,
                                              CharSequence.class,
                                              java.io.Serializable.class,
@@ -2579,7 +2586,7 @@
             }
         }
         // Test error checking on interfaces with the wrong method type:
-        for (Class<?> intfc : new Class[] { Runnable.class /*arity 0*/,
+        for (Class<?> intfc : new Class<?>[] { Runnable.class /*arity 0*/,
                                             Fooable.class /*arity 1 & 2*/ }) {
             int badArity = 1;  // known to be incompatible
             if (verbosity > 2)  System.out.println(intfc.getName());
@@ -2657,7 +2664,7 @@
                                   Object a8, Object a9)
                 { return makeArray(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9); }
     static MethodHandle[] makeArrays() {
-        ArrayList<MethodHandle> arrays = new ArrayList<MethodHandle>();
+        ArrayList<MethodHandle> arrays = new ArrayList<>();
         MethodHandles.Lookup lookup = IMPL_LOOKUP;
         for (;;) {
             int nargs = arrays.size();
@@ -2746,7 +2753,7 @@
                                      Object a8, Object a9)
                 { return makeList(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9); }
     static MethodHandle[] makeLists() {
-        ArrayList<MethodHandle> lists = new ArrayList<MethodHandle>();
+        ArrayList<MethodHandle> lists = new ArrayList<>();
         MethodHandles.Lookup lookup = IMPL_LOOKUP;
         for (;;) {
             int nargs = lists.size();
@@ -2769,7 +2776,7 @@
     static {
         try {
             AS_LIST = IMPL_LOOKUP.findStatic(Arrays.class, "asList", MethodType.methodType(List.class, Object[].class));
-        } catch (Exception ex) { throw new RuntimeException(ex); }
+        } catch (NoSuchMethodException | IllegalAccessException ex) { throw new RuntimeException(ex); }
     }
 
     /** Return a method handle that takes the indicated number of Object
--- a/jdk/test/java/lang/invoke/MethodTypeTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/MethodTypeTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -29,6 +29,7 @@
 
 package test.java.lang.invoke;
 
+import java.io.IOException;
 import java.lang.invoke.MethodType;
 import java.lang.reflect.Method;
 
@@ -378,7 +379,7 @@
     public void testHashCode() {
         System.out.println("hashCode");
         MethodType instance = mt_viS;
-        ArrayList<Class<?>> types = new ArrayList<Class<?>>();
+        ArrayList<Class<?>> types = new ArrayList<>();
         types.add(instance.returnType());
         types.addAll(instance.parameterList());
         int expResult = types.hashCode();
@@ -556,7 +557,7 @@
             Object decode;
             try {
                 decode = readSerial(wire);
-            } catch (Exception ex) {
+            } catch (IOException | ClassNotFoundException ex) {
                 decode = ex;  // oops!
             }
             assertEquals(mt, decode);
--- a/jdk/test/java/lang/invoke/PermuteArgsTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/PermuteArgsTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -45,7 +45,7 @@
 import static java.lang.invoke.MethodType.*;
 
 public class PermuteArgsTest {
-    private static final Class CLASS = PermuteArgsTest.class;
+    private static final Class<?> CLASS = PermuteArgsTest.class;
     private static final int MAX_ARITY = Integer.getInteger(CLASS.getSimpleName()+".MAX_ARITY", 8);
     private static final boolean DRY_RUN = Boolean.getBoolean(CLASS.getSimpleName()+".DRY_RUN");
     private static final boolean VERBOSE = Boolean.getBoolean(CLASS.getSimpleName()+".VERBOSE") || DRY_RUN;
@@ -99,12 +99,12 @@
         return Arrays.asList(w, x, y, z);
     }
     static Object listI_etc(int... va) {
-        ArrayList<Object> res = new ArrayList<Object>();
+        ArrayList<Object> res = new ArrayList<>();
         for (int x : va)  res.add(x);
         return res;
     }
     static Object listIJL_etc(int x, long y, Object z, Object... va) {
-        ArrayList<Object> res = new ArrayList<Object>();
+        ArrayList<Object> res = new ArrayList<>();
         res.addAll(Arrays.asList(x, y, z));
         res.addAll(Arrays.asList(va));
         return res;
@@ -168,7 +168,7 @@
                     mh1 = adjustArity(mh, arity);
                 } catch (IllegalArgumentException ex) {
                     System.out.println("*** mh = "+name+" : "+mh+"; arity = "+arity+" => "+ex);
-                    ex.printStackTrace();
+                    ex.printStackTrace(System.out);
                     break;  // cannot get this arity for this type
                 }
                 test("("+arity+")"+name, mh1);
@@ -213,7 +213,7 @@
     }
 
     static void testPermutations(MethodHandle mh) throws Throwable {
-        HashSet<String> done = new HashSet<String>();
+        HashSet<String> done = new HashSet<>();
         MethodType mt = mh.type();
         int[] perm = nullPerm(mt.parameterCount());
         final int MARGIN = (perm.length <= 10 ? 2 : 0);
@@ -326,8 +326,8 @@
             Class<?> pt = ptypes[i];
             Object arg;
             if (pt == Void.class)       arg = null;
-            else if (pt == int.class)   arg = (int)  i + 101;
-            else if (pt == long.class)  arg = (long) i + 10_000_000_001L;
+            else if (pt == int.class)   arg = i + 101;
+            else if (pt == long.class)  arg = i + 10_000_000_001L;
             else                        arg = "#" + (i + 1);
             args[i] = arg;
         }
--- a/jdk/test/java/lang/invoke/RicochetTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/RicochetTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -40,7 +40,6 @@
 import static java.lang.invoke.MethodType.*;
 import static java.lang.invoke.MethodHandles.*;
 import static org.junit.Assert.*;
-import static org.junit.Assume.*;
 
 
 /**
@@ -48,7 +47,7 @@
  * @author jrose
  */
 public class RicochetTest {
-    private static final Class CLASS = RicochetTest.class;
+    private static final Class<?> CLASS = RicochetTest.class;
     private static final int MAX_ARITY = Integer.getInteger(CLASS.getSimpleName()+".MAX_ARITY", 40);
 
     public static void main(String... av) throws Throwable {
@@ -148,7 +147,7 @@
         for (int nargs = 0; nargs <= MAX; nargs++) {
             if (nargs > 30 && nargs < MAX-20)  nargs += 10;
             int[] args = new int[nargs];
-            for (int j = 0; j < args.length; j++)  args[j] = (int)(j + 11);
+            for (int j = 0; j < args.length; j++)  args[j] = j + 11;
             //System.out.println("testIntSpreads "+Arrays.toString(args));
             int[] args1 = (int[]) id.invokeExact(args);
             assertArrayEquals(args, args1);
@@ -388,6 +387,7 @@
         java.util.Random random;
         final MethodHandle[] fns;
         int depth;
+        @SuppressWarnings("LeakingThisInConstructor")
         RFCB(int seed) throws Throwable {
             this.random = new java.util.Random(seed);
             this.fns = new MethodHandle[Math.max(29, (1 << MAX_DEPTH-2)/3)];
@@ -408,7 +408,7 @@
                 case 1:
                     Throwable ex = new RuntimeException();
                     ex.fillInStackTrace();
-                    if (VERBOSITY >= 2) ex.printStackTrace();
+                    if (VERBOSITY >= 2) ex.printStackTrace(System.out);
                     x = "ST; " + x;
                     break;
                 case 2:
@@ -467,7 +467,7 @@
             return mh.invokeWithArguments(args);
         } catch (Throwable ex) {
             System.out.println("threw: "+mh+Arrays.asList(args));
-            ex.printStackTrace();
+            ex.printStackTrace(System.out);
             return ex;
         }
     }
@@ -515,8 +515,8 @@
     private static long opJ(long x) { return (long) opI((int)x); }
     private static Object opL2(Object x, Object y) { return (Object) opI2((int)x, (int)y); }
     private static Object opL(Object x) { return (Object) opI((int)x); }
-    private static int opL2_I(Object x, Object y) { return (int) opI2((int)x, (int)y); }
-    private static int opL_I(Object x) { return (int) opI((int)x); }
+    private static int opL2_I(Object x, Object y) { return opI2((int)x, (int)y); }
+    private static int opL_I(Object x) { return opI((int)x); }
     private static long opL_J(Object x) { return (long) opI((int)x); }
     private static final MethodHandle opI, opI2, opI3, opI4, opI_L, opJ, opJ2, opJ3, opL2, opL, opL2_I, opL_I, opL_J;
     static {
@@ -570,8 +570,8 @@
             INT_LISTERS[i] = lister;
             LONG_LISTERS[i] = llister;
             if (i == 0)  break;
-            lister  = insertArguments(lister,  i-1, (int)0);
-            llister = insertArguments(llister, i-1, (long)0);
+            lister  = insertArguments(lister,  i-1, 0);
+            llister = insertArguments(llister, i-1, 0L);
         }
     }
 
--- a/jdk/test/java/lang/invoke/ThrowExceptionsTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/java/lang/invoke/ThrowExceptionsTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -40,7 +40,7 @@
 import static java.lang.invoke.MethodType.*;
 
 public class ThrowExceptionsTest {
-    private static final Class CLASS = ThrowExceptionsTest.class;
+    private static final Class<?> CLASS = ThrowExceptionsTest.class;
     private static final Lookup LOOKUP = lookup();
 
     public static void main(String argv[]) throws Throwable {
@@ -132,9 +132,9 @@
                 int tc = testCases;
                 try {
                     m.invoke(this);
-                } catch (Throwable ex) {
+                } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException ex) {
                     System.out.println("*** "+ex);
-                    ex.printStackTrace();
+                    ex.printStackTrace(System.out);
                 }
                 if (testCases == tc)  testCases++;
             }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/JSplitPane/4885629/bug4885629.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 4885629
+ * @summary With JSplitPane in VERTICAL_SPLIT, SplitPaneBorder draws bottom edge of divider
+ * @author Andrey Pikalev
+ */
+
+import sun.awt.SunToolkit;
+
+import javax.swing.*;
+import javax.swing.border.Border;
+import javax.swing.border.EmptyBorder;
+import javax.swing.plaf.basic.BasicBorders;
+import javax.swing.plaf.basic.BasicLookAndFeel;
+import javax.swing.plaf.basic.BasicSplitPaneUI;
+import java.awt.*;
+
+
+public class bug4885629 {
+
+    private static final Color darkShadow = new Color(100,120,200);
+    private static final Color darkHighlight = new Color(200,120,50);
+    private static final Color lightHighlight = darkHighlight.brighter();
+    private static final Color BGCOLOR = Color.blue;
+
+    private static JSplitPane sp;
+
+    public static void main(String[] args) throws Exception {
+        UIManager.setLookAndFeel(new BasicLookAndFeel() {
+                public boolean isSupportedLookAndFeel(){ return true; }
+                public boolean isNativeLookAndFeel(){ return false; }
+                public String getDescription() { return "Foo"; }
+                public String getID() { return "FooID"; }
+                public String getName() { return "FooName"; }
+        });
+
+        SwingUtilities.invokeAndWait(new Runnable() {
+            public void run() {
+                JFrame frame = new JFrame();
+
+                JComponent a = new JPanel();
+                a.setBackground(Color.white);
+                a.setMinimumSize(new Dimension(10, 10));
+
+                JComponent b = new JPanel();
+                b.setBackground(Color.white);
+                b.setMinimumSize(new Dimension(10, 10));
+
+                sp = new JSplitPane(JSplitPane.VERTICAL_SPLIT, a, b);
+                sp.setPreferredSize(new Dimension(20, 20));
+                sp.setBackground(BGCOLOR);
+
+                Border bo = new BasicBorders.SplitPaneBorder(lightHighlight,
+                        Color.red);
+                Border ibo = new EmptyBorder(0, 0, 0, 0);
+                sp.setBorder(bo);
+                sp.setMinimumSize(new Dimension(200, 200));
+
+                ((BasicSplitPaneUI) sp.getUI()).getDivider().setBorder(ibo);
+
+                frame.getContentPane().setLayout(new FlowLayout());
+                frame.getContentPane().setBackground(darkShadow);
+                frame.getContentPane().add(sp);
+
+                frame.setSize(200, 200);
+                frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+                frame.setVisible(true);
+            }
+        });
+
+        ((SunToolkit) SunToolkit.getDefaultToolkit()).realSync();
+
+        final Robot robot = new Robot();
+        robot.delay(1000);
+
+        SwingUtilities.invokeAndWait(new Runnable() {
+            public void run() {
+                Rectangle rect = ((BasicSplitPaneUI) sp.getUI()).getDivider().getBounds();
+
+                Point p = rect.getLocation();
+
+                SwingUtilities.convertPointToScreen(p, sp);
+
+                for (int i = 0; i < rect.width; i++) {
+                    if (!BGCOLOR.equals(robot.getPixelColor(p.x + i, p.y + rect.height - 1))) {
+                        throw new Error("The divider's area has incorrect color.");
+                    }
+                }
+            }
+        });
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/JTextArea/4697612/bug4697612.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 4697612 6244705
+ * @author Peter Zhelezniakov
+ * @library ../../regtesthelpers
+ * @build Util
+ * @run main bug4697612
+ */
+import java.io.*;
+import java.awt.*;
+import java.awt.event.*;
+import javax.swing.*;
+
+import javax.swing.text.BadLocationException;
+import sun.awt.SunToolkit;
+
+public class bug4697612 {
+
+    static final int FRAME_WIDTH = 300;
+    static final int FRAME_HEIGHT = 300;
+    static final int FONT_HEIGHT = 16;
+    private static volatile int frameHeight;
+    private static volatile int fontHeight;
+    private static JFrame frame;
+    private static JTextArea text;
+    private static JScrollPane scroller;
+
+    public static void main(String[] args) throws Throwable {
+        SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit();
+        Robot robot = new Robot();
+        robot.setAutoDelay(100);
+
+        SwingUtilities.invokeAndWait(new Runnable() {
+
+            @Override
+            public void run() {
+                createAndShowGUI();
+            }
+        });
+
+        toolkit.realSync();
+
+        SwingUtilities.invokeAndWait(new Runnable() {
+
+            @Override
+            public void run() {
+                text.requestFocus();
+            }
+        });
+
+        toolkit.realSync();
+
+        // 4697612: pressing PgDn + PgUp should not alter caret position
+        Util.hitKeys(robot, KeyEvent.VK_HOME);
+        Util.hitKeys(robot, KeyEvent.VK_PAGE_DOWN);
+
+
+        int pos0 = getTextCaretPosition();
+        int caretHeight = getTextCaretHeight();
+        fontHeight = FONT_HEIGHT;
+
+        // iterate two times, for different (even and odd) font height
+        for (int i = 0; i < 2; i++) {
+
+            SwingUtilities.invokeAndWait(new Runnable() {
+
+                public void run() {
+                    text.setFont(text.getFont().deriveFont(fontHeight));
+                }
+            });
+
+            frameHeight = FRAME_HEIGHT;
+
+            for (int j = 0; j < caretHeight; j++) {
+                SwingUtilities.invokeAndWait(new Runnable() {
+
+                    public void run() {
+                        frame.setSize(FRAME_WIDTH, frameHeight);
+                    }
+                });
+
+                toolkit.realSync();
+
+                Util.hitKeys(robot, KeyEvent.VK_PAGE_DOWN);
+                Util.hitKeys(robot, KeyEvent.VK_PAGE_UP);
+                toolkit.realSync();
+
+                int pos = getTextCaretPosition();
+                if (pos0 != pos) {
+                    throw new RuntimeException("Failed 4697612: PgDn & PgUp keys scroll by different amounts");
+                }
+                frameHeight++;
+            }
+            fontHeight++;
+        }
+
+
+        // 6244705: pressing PgDn at the very bottom should not scroll
+        LookAndFeel laf = UIManager.getLookAndFeel();
+        if (laf.getID().equals("Aqua")) {
+            Util.hitKeys(robot, KeyEvent.VK_END);
+        } else {
+            Util.hitKeys(robot, KeyEvent.VK_CONTROL, KeyEvent.VK_END);
+        }
+
+        toolkit.realSync();
+
+        pos0 = getScrollerViewPosition();
+        Util.hitKeys(robot, KeyEvent.VK_PAGE_DOWN);
+        toolkit.realSync();
+
+        int pos = getScrollerViewPosition();
+
+        if (pos0 != pos) {
+            throw new RuntimeException("Failed 6244705: PgDn at the bottom causes scrolling");
+        }
+    }
+
+    private static int getTextCaretPosition() throws Exception {
+        final int[] result = new int[1];
+        SwingUtilities.invokeAndWait(new Runnable() {
+
+            @Override
+            public void run() {
+                result[0] = text.getCaretPosition();
+            }
+        });
+
+        return result[0];
+    }
+
+    private static int getTextCaretHeight() throws Exception {
+        final int[] result = new int[1];
+        SwingUtilities.invokeAndWait(new Runnable() {
+
+            @Override
+            public void run() {
+                try {
+                    int pos0 = text.getCaretPosition();
+                    Rectangle dotBounds = text.modelToView(pos0);
+                    result[0] = dotBounds.height;
+                } catch (BadLocationException ex) {
+                    throw new RuntimeException(ex);
+                }
+            }
+        });
+
+        return result[0];
+    }
+
+    private static int getScrollerViewPosition() throws Exception {
+        final int[] result = new int[1];
+        SwingUtilities.invokeAndWait(new Runnable() {
+
+            @Override
+            public void run() {
+                result[0] = scroller.getViewport().getViewPosition().y;
+            }
+        });
+
+        return result[0];
+    }
+
+    private static void createAndShowGUI() {
+        frame = new JFrame();
+        frame.setSize(FRAME_WIDTH, FRAME_HEIGHT);
+        frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+
+        text = new JTextArea();
+        try {
+            InputStream is =
+                    bug4697612.class.getResourceAsStream("bug4697612.txt");
+            text.read(new InputStreamReader(is), null);
+        } catch (IOException e) {
+            throw new Error(e);
+        }
+
+        scroller = new JScrollPane(text);
+
+        frame.getContentPane().add(scroller);
+
+        frame.pack();
+        frame.setVisible(true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/JTextArea/4697612/bug4697612.txt	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,223 @@
+                                   README
+
+                      Java(TM) 2 SDK, Standard Edition
+                             Version 1.4.2 Beta
+
+     For a more extensive HTML version of this file, see README.html.
+
+Contents
+
+   * Introduction
+   * Release Notes
+   * Bug Reports and Feedback
+   * Java 2 SDK Documentation
+   * Redistribution
+   * Web Pages
+
+
+Introduction
+
+     Thank you for downloading this release of the Java(TM) 2 SDK,
+     Standard Edition. The Java 2 SDK is a development environment for
+     building applications, applets, and components that can be
+     deployed on the Java platform.
+
+     The Java 2 SDK software includes tools useful for developing and
+     testing programs written in the Java programming language and
+     running on the Java platform. These tools are designed to be used
+     from the command line. Except for appletviewer, these tools do not
+     provide a graphical user interface.
+
+
+Release Notes
+
+      See the Release Notes on the Java Software web site for additional 
+      information pertaining to this release. 
+ 
+      http://java.sun.com/j2se/1.4.2/relnotes.html
+
+      The on-line release notes will be updated as needed, so you should 
+      check it occasionally for the latest information. 
+
+
+Bug Reports and Feedback
+
+      The Bug Parade Web Page on the Java Developer Connection(SM) web 
+      site lets you search for and examine existing bug reports, submit 
+      your own bug reports, and tell us which bug fixes matter most to you. 
+
+      http://java.sun.com/jdc/bugParade/
+
+      To directly submit a bug or request a feature, fill out this form: 
+
+      http://java.sun.com/cgi-bin/bugreport.cgi 
+
+      You can also send comments directly to Java Software engineering 
+      team email addresses. 
+
+      http://java.sun.com/mail/
+
+
+Java 2 SDK Documentation
+
+     The on-line Java 2 SDK Documentation contains API specifications,
+     feature descriptions, developer guides, tool reference pages, demos, 
+     and links to related information. It is located at
+
+     http://java.sun.com/j2se/1.4.2/docs/
+
+     The Java 2 SDK documentation is also available in a download bundle 
+     which you can install locally on your machine. See the 
+     Java 2 SDK download page: 
+     
+     http://java.sun.com/j2se/1.4.2/download.html
+
+
+Redistribution
+
+      The term "vendors" used here refers to licensees, developers, 
+      and independent software vendors (ISVs) who license and 
+      distribute the Java 2 Runtime Environment with their programs. 
+      Vendors must follow the terms of the Java 2 SDK, Standard 
+      Edition, Binary Code License agreement. 
+      Required vs. Optional Files
+
+      The files that make up the Java 2 SDK, Standard Edition, are
+      divided into two categories: required and optional. Optional 
+      files may be excluded from redistributions of the Java 2 SDK 
+      at the vendor's discretion. The following section contains a 
+      list of the files and directories that may optionally be 
+      omitted from redistributions of the Java 2 SDK. All files not 
+      in these lists of optional files must be included in 
+      redistributions of the Java 2 SDK. 
+
+      Optional Files and Directories
+
+      The following files may be optionally excluded from 
+      redistributions:
+
+      jre/lib/charsets.jar 
+            Character conversion classes 
+      jre/lib/ext/ 
+            sunjce_provider.jar - the SunJCE provider for Java 
+              Cryptography APIs
+            localedata.jar - contains many of the resources 
+              needed for non US English locales
+            ldapsec.jar - contains security features supported 
+              by the LDAP service provider
+            dnsns.jar - for the InetAddress wrapper of JNDI DNS 
+              provider
+      bin/rmid and jre/bin/rmid 
+            Java RMI Activation System Daemon 
+      bin/rmiregistry and jre/bin/rmiregistry 
+            Java Remote Object Registry 
+      bin/tnameserv and jre/bin/tnameserv 
+            Java IDL Name Server 
+      bin/keytool and jre/bin/keytool 
+            Key and Certificate Management Tool 
+      bin/kinit and jre/bin/kinit
+            Used to obtain and cache Kerberos ticket-granting tickets
+      bin/klist and jre/bin/klist
+            Kerberos display entries in credentials cache and keytab
+      bin/ktab and jre/bin/ktab
+            Kerberos key table manager
+      bin/policytool and jre/bin/policytool 
+            Policy File Creation and Management Tool 
+      bin/orbd and jre/bin/orbd 
+            Object Request Broker Daemon 
+      bin/servertool and jre/bin/servertool 
+            Java IDL Server Tool 
+      src.zip 
+            Archive of source files 
+
+      In addition, the Java Web Start product may be excluded from
+      redistributions. The Java Web Start product is contained in a 
+      file named javaws-1_2-solaris-sparc-i.zip,
+      javaws-1_2-solaris-i586-i.zip,
+      javaws-1_2-linux-i586-i.zip, or
+      javaws-1_2-windows-i586-i.exe, depending on the platform. 
+
+
+      Unlimited Strength Java Cryptography Extension
+
+      Due to import control restrictions for some countries, the 
+      Java Cryptography Extension (JCE) policy files shipped with 
+      the Java 2 SDK, Standard Edition and the Java 2 Runtime 
+      Environment allow strong but limited cryptography to be 
+      used.  These files are located at
+
+      <java-home>/lib/security/local_policy.jar
+      <java-home>/lib/security/US_export_policy.jar
+
+      where <java-home> is the jre directory of the Java 2 
+      SDK or the top-level directory of the Java 2 Runtime 
+      Environment.
+
+      An unlimited strength version of these files indicating 
+      no restrictions on cryptographic strengths is available 
+      on the Java 2 SDK web site for those living in eligible 
+      countries.  Those living in eligible countries may download 
+      the unlimited strength version and replace the strong 
+      cryptography jar files with the unlimited strength files.
+
+
+      Endorsed Standards Override Mechanism
+
+      An endorsed standard is a Java API defined through a standards
+      process other than the Java Community Process(SM) (JCP(SM)).
+      Because endorsed standards are defined outside the JCP, it is
+      anticipated that such standards will be revised between 
+      releases of the Java 2 Platform. In order to take advantage of 
+      new revisions to endorsed standards, developers and software
+      vendors may use the Endorsed Standards Override Mechanism to
+      provide newer versions of an endorsed standard than those
+      included in the Java 2 Platform as released by Sun Microsystems.
+
+      For more information on the Endorsed Standards Override
+      Mechanism, including the list of platform packages that it may 
+      be used to override, see 
+
+            http://java.sun.com/j2se/1.4.2/docs/guide/standards/ 
+
+      Classes in the packages listed on that web page may be replaced
+      only by classes implementing a more recent version of the API 
+      as defined by the appropriate standards body. 
+
+      In addition to the packages listed in the document at the above 
+      URL, which are part of the Java 2 Platform, Standard Edition 
+      (J2SE(TM)) specification, redistributors of Sun's J2SE 
+      Reference Implementation are allowed to override classes whose 
+      sole purpose is to implement the functionality provided by 
+      public APIs defined in these Endorsed Standards packages.  
+      Redistributors may also override classes in the org.w3c.dom.* 
+      packages, or other classes whose sole purpose is to implement 
+      these APIs. 
+
+
+Sun Java Web Pages
+
+     For additional information, refer to these Sun Microsystems pages
+     on the World Wide Web:
+
+     http://java.sun.com/
+          The Java Software web site, with the latest information on
+          Java technology, product information, news, and features.
+     http://java.sun.com/docs
+          Java Platform Documentation provides access to white papers,
+          the Java Tutorial and other documents.
+     http://java.sun.com/jdc
+          The Java Developer Connection(SM) web site. (Free registration
+          required.) Additional technical information, news, and
+          features; user forums; support information, and much more.
+     http://java.sun.com/products/
+          Java Technology Products & API
+
+
+------------------------------------------------------------------------
+The Java 2 SDK, Standard Edition, is a product of Sun Microsystems(TM), 
+Inc.  This product includes code licensed from RSA Security.
+
+Copyright 2003 Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 
+California 95054, U.S.A.  All rights reserved.
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/JTree/6505523/bug6505523.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6505523
+ * @summary NullPointerException in BasicTreeUI when a node is removed by expansion listener
+ * @author Alexandr Scherbatiy
+ * @run main bug6505523
+ */
+import java.awt.Point;
+import java.awt.Rectangle;
+import java.awt.Robot;
+import java.awt.Toolkit;
+import java.awt.event.InputEvent;
+import javax.swing.JFrame;
+import javax.swing.JScrollPane;
+import javax.swing.JTree;
+import javax.swing.SwingUtilities;
+import javax.swing.event.TreeExpansionEvent;
+import javax.swing.event.TreeExpansionListener;
+import javax.swing.tree.DefaultMutableTreeNode;
+import javax.swing.tree.DefaultTreeModel;
+import javax.swing.tree.TreeNode;
+import sun.awt.SunToolkit;
+
+public class bug6505523 {
+
+    private static JTree tree;
+
+    public static void main(String[] args) throws Exception {
+        SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit();
+        Robot robot = new Robot();
+        robot.setAutoDelay(50);
+
+        SwingUtilities.invokeAndWait(new Runnable() {
+
+            @Override
+            public void run() {
+                createAndShowGUI();
+            }
+        });
+
+        toolkit.realSync();
+
+        Point point = getRowPointToClick(2);
+        robot.mouseMove(point.x, point.y);
+        robot.mousePress(InputEvent.BUTTON1_MASK);
+        robot.mouseRelease(InputEvent.BUTTON1_MASK);
+
+        toolkit.realSync();
+
+    }
+
+    private static Point getRowPointToClick(final int row) throws Exception {
+
+        final Point[] result = new Point[1];
+
+        SwingUtilities.invokeAndWait(new Runnable() {
+
+            @Override
+            public void run() {
+                Rectangle rect = tree.getRowBounds(row);
+                Point point = new Point(rect.x - 5, rect.y + rect.height / 2);
+                SwingUtilities.convertPointToScreen(point, tree);
+                result[0] = point;
+            }
+        });
+
+        return result[0];
+    }
+
+    private static void createAndShowGUI() {
+        final DefaultMutableTreeNode root = new DefaultMutableTreeNode("Problem with NPE under JDK 1.6");
+        final DefaultMutableTreeNode problematic = new DefaultMutableTreeNode("Expand me and behold a NPE in stderr");
+        problematic.add(new DefaultMutableTreeNode("some content"));
+        root.add(new DefaultMutableTreeNode("irrelevant..."));
+        root.add(problematic);
+
+        final DefaultTreeModel model = new DefaultTreeModel(root);
+        tree = new JTree(model);
+        tree.setRootVisible(true);
+        tree.setShowsRootHandles(true);
+        tree.expandRow(0);
+        tree.collapseRow(2);
+
+        // this is critical - without dragEnabled everything works
+        tree.setDragEnabled(true);
+
+        tree.addTreeExpansionListener(new TreeExpansionListener() {
+
+            @Override
+            public void treeExpanded(TreeExpansionEvent event) {
+                TreeNode parent = problematic.getParent();
+                if (parent instanceof DefaultMutableTreeNode) {
+                    model.removeNodeFromParent(problematic);
+                }
+            }
+
+            @Override
+            public void treeCollapsed(TreeExpansionEvent event) {
+            }
+        });
+
+        JFrame frame = new JFrame("JTree Problem");
+        frame.add(new JScrollPane(tree));
+        frame.setSize(500, 300);
+        frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+        frame.setLocationRelativeTo(null);
+        frame.setVisible(true);
+    }
+}
\ No newline at end of file
--- a/jdk/test/sun/invoke/util/ValueConversionsTest.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/jdk/test/sun/invoke/util/ValueConversionsTest.java	Sat Jan 28 20:41:27 2012 -0800
@@ -27,11 +27,9 @@
 import sun.invoke.util.Wrapper;
 import java.lang.invoke.MethodType;
 import java.lang.invoke.MethodHandle;
-import java.lang.invoke.MethodHandles;
 import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Collections;
-import org.junit.Ignore;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
@@ -52,7 +50,7 @@
  * @author jrose
  */
 public class ValueConversionsTest {
-    private static final Class CLASS = ValueConversionsTest.class;
+    private static final Class<?> CLASS = ValueConversionsTest.class;
     private static final int MAX_ARITY = Integer.getInteger(CLASS.getSimpleName()+".MAX_ARITY", 40);
     private static final int START_ARITY = Integer.getInteger(CLASS.getSimpleName()+".START_ARITY", 0);
     private static final boolean EXHAUSTIVE = Boolean.getBoolean(CLASS.getSimpleName()+".EXHAUSTIVE");
@@ -165,7 +163,7 @@
                 Object expResult = box;
                 Object result = null;
                 switch (w) {
-                    case INT:     result = boxer.invokeExact((int)n); break;
+                    case INT:     result = boxer.invokeExact(/*int*/n); break;
                     case LONG:    result = boxer.invokeExact((long)n); break;
                     case FLOAT:   result = boxer.invokeExact((float)n); break;
                     case DOUBLE:  result = boxer.invokeExact((double)n); break;
@@ -361,6 +359,7 @@
             assert(stype == MethodType.methodType(arrayType, arrayType));
             if (nargs <= 5) {
                 // invoke target as a spreader also:
+                @SuppressWarnings("cast")
                 Object res2 = spreader.invokeWithArguments((Object)res);
                 String res2String = toArrayString(res2);
                 assertEquals(Arrays.toString(args), res2String);
--- a/langtools/.hgtags	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/.hgtags	Sat Jan 28 20:41:27 2012 -0800
@@ -143,3 +143,5 @@
 77b2c066084cbc75150efc6603a713c558329813 jdk8-b19
 ffd294128a48cbb90ce8f0569f82b61f1f164a18 jdk8-b20
 bcb21abf1c4177baf4574f99709513dcd4474727 jdk8-b21
+390a7828ae18324030c0546b6452d51093ffa451 jdk8-b22
+601ffcc6551d5414ef871be306c3a26396cf16a7 jdk8-b23
--- a/langtools/src/share/classes/com/sun/tools/javac/code/Types.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/src/share/classes/com/sun/tools/javac/code/Types.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3600,39 +3600,44 @@
 
         @Override
         public Type visitCapturedType(CapturedType t, Void s) {
-            Type bound = visitWildcardType(t.wildcard, null);
-            return (bound.contains(t)) ?
-                    erasure(bound) :
-                    bound;
+            Type w_bound = t.wildcard.type;
+            Type bound = w_bound.contains(t) ?
+                        erasure(w_bound) :
+                        visit(w_bound);
+            return rewriteAsWildcardType(visit(bound), t.wildcard.bound, t.wildcard.kind);
         }
 
         @Override
         public Type visitTypeVar(TypeVar t, Void s) {
             if (rewriteTypeVars) {
-                Type bound = high ?
-                    (t.bound.contains(t) ?
+                Type bound = t.bound.contains(t) ?
                         erasure(t.bound) :
-                        visit(t.bound)) :
-                    syms.botType;
-                return rewriteAsWildcardType(bound, t);
+                        visit(t.bound);
+                return rewriteAsWildcardType(bound, t, EXTENDS);
+            } else {
+                return t;
             }
-            else
-                return t;
         }
 
         @Override
         public Type visitWildcardType(WildcardType t, Void s) {
-            Type bound = high ? t.getExtendsBound() :
-                                t.getSuperBound();
-            if (bound == null)
-            bound = high ? syms.objectType : syms.botType;
-            return rewriteAsWildcardType(visit(bound), t.bound);
+            Type bound2 = visit(t.type);
+            return t.type == bound2 ? t : rewriteAsWildcardType(bound2, t.bound, t.kind);
         }
 
-        private Type rewriteAsWildcardType(Type bound, TypeVar formal) {
-            return high ?
-                makeExtendsWildcard(B(bound), formal) :
-                makeSuperWildcard(B(bound), formal);
+        private Type rewriteAsWildcardType(Type bound, TypeVar formal, BoundKind bk) {
+            switch (bk) {
+               case EXTENDS: return high ?
+                       makeExtendsWildcard(B(bound), formal) :
+                       makeExtendsWildcard(syms.objectType, formal);
+               case SUPER: return high ?
+                       makeSuperWildcard(syms.botType, formal) :
+                       makeSuperWildcard(B(bound), formal);
+               case UNBOUND: return makeExtendsWildcard(syms.objectType, formal);
+               default:
+                   Assert.error("Invalid bound kind " + bk);
+                   return null;
+            }
         }
 
         Type B(Type t) {
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/Infer.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/Infer.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -332,25 +332,29 @@
             //replace uninferred type-vars
             targs = types.subst(targs,
                     that.tvars,
-                    instaniateAsUninferredVars(undetvars, that.tvars));
+                    instantiateAsUninferredVars(undetvars, that.tvars));
         }
         return chk.checkType(warn.pos(), that.inst(targs, types), to);
     }
     //where
-    private List<Type> instaniateAsUninferredVars(List<Type> undetvars, List<Type> tvars) {
+    private List<Type> instantiateAsUninferredVars(List<Type> undetvars, List<Type> tvars) {
+        Assert.check(undetvars.length() == tvars.length());
         ListBuffer<Type> new_targs = ListBuffer.lb();
-        //step 1 - create syntethic captured vars
+        //step 1 - create synthetic captured vars
         for (Type t : undetvars) {
             UndetVar uv = (UndetVar)t;
             Type newArg = new CapturedType(t.tsym.name, t.tsym, uv.inst, syms.botType, null);
             new_targs = new_targs.append(newArg);
         }
         //step 2 - replace synthetic vars in their bounds
+        List<Type> formals = tvars;
         for (Type t : new_targs.toList()) {
             CapturedType ct = (CapturedType)t;
             ct.bound = types.subst(ct.bound, tvars, new_targs.toList());
-            WildcardType wt = new WildcardType(ct.bound, BoundKind.EXTENDS, syms.boundClass);
+            WildcardType wt = new WildcardType(syms.objectType, BoundKind.UNBOUND, syms.boundClass);
+            wt.bound = (TypeVar)formals.head;
             ct.wildcard = wt;
+            formals = formals.tail;
         }
         return new_targs.toList();
     }
--- a/langtools/src/share/classes/javax/lang/model/element/Element.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/src/share/classes/javax/lang/model/element/Element.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -214,14 +214,13 @@
      * Returns the elements that are, loosely speaking, directly
      * enclosed by this element.
      *
-     * A class or interface is considered to enclose the fields,
-     * methods, constructors, and member types that it directly
-     * declares.  This includes any (implicit) default constructor and
-     * the implicit {@code values} and {@code valueOf} methods of an
-     * enum type.
+     * A {@linkplain TypeElement#getEnclosedElements class or
+     * interface} is considered to enclose the fields, methods,
+     * constructors, and member types that it directly declares.
      *
-     * A package encloses the top-level classes and interfaces within
-     * it, but is not considered to enclose subpackages.
+     * A {@linkplain PackageElement#getEnclosedElements package}
+     * encloses the top-level classes and interfaces within it, but is
+     * not considered to enclose subpackages.
      *
      * Other kinds of elements are not currently considered to enclose
      * any elements; however, that may change as this API or the
@@ -231,6 +230,8 @@
      * methods in {@link ElementFilter}.
      *
      * @return the enclosed elements, or an empty list if none
+     * @see PackageElement#getEnclosedElements
+     * @see TypeElement#getEnclosedElements
      * @see Elements#getAllMembers
      * @jls 8.8.9 Default Constructor
      * @jls 8.9 Enums
--- a/langtools/src/share/classes/javax/lang/model/element/PackageElement.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/src/share/classes/javax/lang/model/element/PackageElement.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 
 package javax.lang.model.element;
 
+import java.util.List;
+
 /**
  * Represents a package program element.  Provides access to information
  * about the package and its members.
@@ -49,7 +51,7 @@
 
     /**
      * Returns the simple name of this package.  For an unnamed
-     * package, an empty name is returned
+     * package, an empty name is returned.
      *
      * @return the simple name of this package or an empty name if
      * this is an unnamed package
@@ -58,6 +60,18 @@
     Name getSimpleName();
 
     /**
+     * Returns the {@linkplain NestingKind#TOP_LEVEL top-level}
+     * classes and interfaces within this package.  Note that
+     * subpackages are <em>not</em> considered to be enclosed by a
+     * package.
+     *
+     * @return the top-level classes and interfaces within this
+     * package
+     */
+    @Override
+    List<? extends Element> getEnclosedElements();
+
+    /**
      * Returns {@code true} is this is an unnamed package and {@code
      * false} otherwise.
      *
--- a/langtools/src/share/classes/javax/lang/model/element/TypeElement.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/src/share/classes/javax/lang/model/element/TypeElement.java	Sat Jan 28 20:41:27 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,12 @@
  */
 public interface TypeElement extends Element, Parameterizable, QualifiedNameable {
     /**
-     * {@inheritDoc}
+     * Returns the fields, methods, constructors, and member types
+     * that are directly declared in this class or interface.
+     *
+     * This includes any (implicit) default constructor and
+     * the implicit {@code values} and {@code valueOf} methods of an
+     * enum type.
      *
      * <p> Note that as a particular instance of the {@linkplain
      * javax.lang.model.element general accuracy requirements} and the
@@ -75,6 +80,7 @@
      *
      * @return the enclosed elements in proper order, or an empty list if none
      */
+    @Override
     List<? extends Element> getEnclosedElements();
 
     /**
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100a.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,16 @@
+/*
+ * @test /nodynamiccopyright/
+ * @bug     7123100
+ * @summary javac fails with java.lang.StackOverflowError
+ * @compile/fail/ref=T7123100a.out -Werror -Xlint:unchecked -XDrawDiagnostics T7123100a.java
+ */
+
+class T7123100a {
+    <E extends Enum<E>> E m() {
+        return null;
+    }
+
+    <Z> void test() {
+        Z z = (Z)m();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100a.out	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,4 @@
+T7123100a.java:14:19: compiler.warn.prob.found.req: (compiler.misc.unchecked.cast.to.type), compiler.misc.type.captureof: 1, ?, Z
+- compiler.err.warnings.and.werror
+1 error
+1 warning
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100b.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,12 @@
+/*
+ * @test /nodynamiccopyright/
+ * @bug     7123100
+ * @summary javac fails with java.lang.StackOverflowError
+ * @compile/fail/ref=T7123100b.out -Werror -Xlint:unchecked -XDrawDiagnostics T7123100b.java
+ */
+
+class T7123100b {
+    <Z> void test(Enum<?> e) {
+        Z z = (Z)e;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100b.out	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,4 @@
+T7123100b.java:10:18: compiler.warn.prob.found.req: (compiler.misc.unchecked.cast.to.type), java.lang.Enum<compiler.misc.type.captureof: 1, ?>, Z
+- compiler.err.warnings.and.werror
+1 error
+1 warning
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100c.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,16 @@
+/*
+ * @test /nodynamiccopyright/
+ * @bug     7123100
+ * @summary javac fails with java.lang.StackOverflowError
+ * @compile/fail/ref=T7123100c.out -Werror -Xlint:unchecked -XDrawDiagnostics T7123100c.java
+ */
+
+class T7123100c {
+    <E> E m(E e) {
+        return null;
+    }
+
+    <Z> void test(Enum<?> e) {
+        Z z = (Z)m(e);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100c.out	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,4 @@
+T7123100c.java:14:19: compiler.warn.prob.found.req: (compiler.misc.unchecked.cast.to.type), java.lang.Enum<compiler.misc.type.captureof: 1, ?>, Z
+- compiler.err.warnings.and.werror
+1 error
+1 warning
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100d.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,16 @@
+/*
+ * @test /nodynamiccopyright/
+ * @bug     7123100
+ * @summary javac fails with java.lang.StackOverflowError
+ * @compile/fail/ref=T7123100d.out -Werror -Xlint:unchecked -XDrawDiagnostics T7123100d.java
+ */
+
+class T7123100d {
+    <E extends Enum<E>> E m(Enum<E> e) {
+        return null;
+    }
+
+    <Z> void test(Enum<?> e) {
+        Z z = (Z)m(e);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7123100/T7123100d.out	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,4 @@
+T7123100d.java:14:19: compiler.warn.prob.found.req: (compiler.misc.unchecked.cast.to.type), compiler.misc.type.captureof: 1, ?, Z
+- compiler.err.warnings.and.werror
+1 error
+1 warning
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7126754/T7126754.java	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,14 @@
+/*
+ * @test /nodynamiccopyright/
+ * @author mcimadamore
+ * @bug     7005671
+ * @summary Generics compilation failure casting List<? extends Set...> to List<Set...>
+ * @compile/fail/ref=T7126754.out -Xlint:unchecked -Werror -XDrawDiagnostics T7126754.java
+ */
+
+import java.util.List;
+
+class T7126754 {
+    List<? extends List<? extends String>> c = null;
+    List<List<? extends String>> d = (List<List<? extends String>>)c;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/7126754/T7126754.out	Sat Jan 28 20:41:27 2012 -0800
@@ -0,0 +1,4 @@
+T7126754.java:13:68: compiler.warn.prob.found.req: (compiler.misc.unchecked.cast.to.type), java.util.List<compiler.misc.type.captureof: 1, ? extends java.util.List<? extends java.lang.String>>, java.util.List<java.util.List<? extends java.lang.String>>
+- compiler.err.warnings.and.werror
+1 error
+1 warning
--- a/langtools/test/tools/javac/diags/CheckExamples.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/test/tools/javac/diags/CheckExamples.java	Sat Jan 28 20:41:27 2012 -0800
@@ -23,10 +23,13 @@
 
 /*
  * @test
- * @bug 6968063
+ * @bug 6968063 7127924
  * @summary provide examples of code that generate diagnostics
  * @build Example CheckExamples
- * @run main CheckExamples
+ * @run main/othervm CheckExamples
+ */
+/*
+ *      See CR 7127924 for info on why othervm is used.
  */
 
 import java.io.*;
--- a/langtools/test/tools/javac/diags/MessageInfo.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/test/tools/javac/diags/MessageInfo.java	Sat Jan 28 20:41:27 2012 -0800
@@ -23,10 +23,13 @@
 
 /**
  * @test
- * @bug 7013272
+ * @bug 7013272 7127924
  * @summary Automatically generate info about how compiler resource keys are used
  * @build Example ArgTypeCompilerFactory MessageFile MessageInfo
- * @run main MessageInfo
+ * @run main/othervm MessageInfo
+ */
+/*
+ *      See CR 7127924 for info on why othervm is used.
  */
 
 import java.io.*;
--- a/langtools/test/tools/javac/diags/RunExamples.java	Sat Jan 28 10:46:46 2012 -0800
+++ b/langtools/test/tools/javac/diags/RunExamples.java	Sat Jan 28 20:41:27 2012 -0800
@@ -23,10 +23,13 @@
 
 /**
  * @test
- * @bug 6968063
+ * @bug 6968063 7127924
  * @summary provide examples of code that generate diagnostics
  * @build ArgTypeCompilerFactory Example HTMLWriter RunExamples
- * @run main RunExamples
+ * @run main/othervm RunExamples
+ */
+/*
+ *      See CR 7127924 for info on why othervm is used.
  */
 
 import java.io.*;